xref: /haiku/src/system/kernel/sem.cpp (revision 23f179da55b1bd1ba84fbf3d3c56947e2c8d0aca)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Semaphore code */
12 
13 
14 #include <OS.h>
15 
16 #include <sem.h>
17 #include <kernel.h>
18 #include <kscheduler.h>
19 #include <ksignal.h>
20 #include <smp.h>
21 #include <int.h>
22 #include <arch/int.h>
23 #include <debug.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <team.h>
28 #include <util/AutoLock.h>
29 #include <util/DoublyLinkedList.h>
30 #include <vfs.h>
31 #include <vm_page.h>
32 #include <boot/kernel_args.h>
33 #include <syscall_restart.h>
34 #include <wait_for_objects.h>
35 
36 #include <string.h>
37 #include <stdlib.h>
38 
39 #include "kernel_debug_config.h"
40 
41 
42 //#define TRACE_SEM
43 #ifdef TRACE_SEM
44 #	define TRACE(x) dprintf_no_syslog x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 //#define KTRACE_SEM
50 #ifdef KTRACE_SEM
51 #	define KTRACE(x...) ktrace_printf(x)
52 #else
53 #	define KTRACE(x...) do {} while (false)
54 #endif
55 
56 
57 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
58 	queued_thread(struct thread *thread, int32 count)
59 		:
60 		thread(thread),
61 		count(count),
62 		queued(false)
63 	{
64 	}
65 
66 	struct thread	*thread;
67 	int32			count;
68 	bool			queued;
69 };
70 
71 typedef DoublyLinkedList<queued_thread> ThreadQueue;
72 
73 struct sem_entry {
74 	union {
75 		// when slot in use
76 		struct {
77 			struct list_link	team_link;
78 			int32				count;
79 			int32				net_count;
80 									// count + acquisition count of all blocked
81 									// threads
82 			char*				name;
83 			struct team*		owner;
84 			select_info*		select_infos;
85 			thread_id			last_acquirer;
86 #if DEBUG_SEM_LAST_ACQUIRER
87 			int32				last_acquire_count;
88 			thread_id			last_releaser;
89 			int32				last_release_count;
90 #endif
91 		} used;
92 
93 		// when slot unused
94 		struct {
95 			sem_id				next_id;
96 			struct sem_entry*	next;
97 		} unused;
98 	} u;
99 
100 	sem_id				id;
101 	spinlock			lock;	// protects only the id field when unused
102 	ThreadQueue			queue;	// should be in u.used, but has a constructor
103 };
104 
105 static const int32 kMaxSemaphores = 65536;
106 static int32 sMaxSems = 4096;
107 	// Final value is computed based on the amount of available memory
108 static int32 sUsedSems = 0;
109 
110 static struct sem_entry *sSems = NULL;
111 static bool sSemsActive = false;
112 static struct sem_entry	*sFreeSemsHead = NULL;
113 static struct sem_entry	*sFreeSemsTail = NULL;
114 
115 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
116 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sSemsSpinlock)
117 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sSemsSpinlock)
118 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
119 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
120 
121 
122 static int
123 dump_sem_list(int argc, char** argv)
124 {
125 	const char* name = NULL;
126 	team_id owner = -1;
127 	thread_id last = -1;
128 	int32 i;
129 
130 	if (argc > 2) {
131 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
132 			owner = strtoul(argv[2], NULL, 0);
133 		else if (!strcmp(argv[1], "name"))
134 			name = argv[2];
135 		else if (!strcmp(argv[1], "last"))
136 			last = strtoul(argv[2], NULL, 0);
137 	} else if (argc > 1)
138 		owner = strtoul(argv[1], NULL, 0);
139 
140 	kprintf("sem            id count   team   last  name\n");
141 
142 	for (i = 0; i < sMaxSems; i++) {
143 		struct sem_entry* sem = &sSems[i];
144 		if (sem->id < 0
145 			|| (last != -1 && sem->u.used.last_acquirer != last)
146 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
147 			|| (owner != -1
148 				&& (sem->u.used.owner == NULL
149 					|| sem->u.used.owner->id != owner)))
150 			continue;
151 
152 		kprintf("%p %6ld %5ld %6ld "
153 			"%6ld "
154 			" %s\n", sem, sem->id, sem->u.used.count,
155 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1,
156 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
157 			sem->u.used.name);
158 	}
159 
160 	return 0;
161 }
162 
163 
164 static void
165 dump_sem(struct sem_entry* sem)
166 {
167 	kprintf("SEM: %p\n", sem);
168 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
169 	if (sem->id >= 0) {
170 		kprintf("name:    '%s'\n", sem->u.used.name);
171 		kprintf("owner:   %ld\n",
172 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
173 		kprintf("count:   %ld\n", sem->u.used.count);
174 		kprintf("queue:  ");
175 		if (!sem->queue.IsEmpty()) {
176 			ThreadQueue::Iterator it = sem->queue.GetIterator();
177 			while (queued_thread* entry = it.Next())
178 				kprintf(" %ld", entry->thread->id);
179 			kprintf("\n");
180 		} else
181 			kprintf(" -\n");
182 
183 		set_debug_variable("_sem", (addr_t)sem);
184 		set_debug_variable("_semID", sem->id);
185 		set_debug_variable("_owner",
186 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
187 
188 #if DEBUG_SEM_LAST_ACQUIRER
189 		kprintf("last acquired by: %ld, count: %ld\n",
190 			sem->u.used.last_acquirer, sem->u.used.last_acquire_count);
191 		kprintf("last released by: %ld, count: %ld\n",
192 			sem->u.used.last_releaser, sem->u.used.last_release_count);
193 
194 		if (sem->u.used.last_releaser != 0)
195 			set_debug_variable("_releaser", sem->u.used.last_releaser);
196 		else
197 			unset_debug_variable("_releaser");
198 #else
199 		kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer);
200 #endif
201 
202 		if (sem->u.used.last_acquirer != 0)
203 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
204 		else
205 			unset_debug_variable("_acquirer");
206 	} else {
207 		kprintf("next:    %p\n", sem->u.unused.next);
208 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
209 	}
210 }
211 
212 
213 static int
214 dump_sem_info(int argc, char **argv)
215 {
216 	bool found = false;
217 	addr_t num;
218 	int32 i;
219 
220 	if (argc < 2) {
221 		print_debugger_command_usage(argv[0]);
222 		return 0;
223 	}
224 
225 	num = strtoul(argv[1], NULL, 0);
226 
227 	if (IS_KERNEL_ADDRESS(num)) {
228 		dump_sem((struct sem_entry *)num);
229 		return 0;
230 	} else if (num >= 0) {
231 		uint32 slot = num % sMaxSems;
232 		if (sSems[slot].id != (int)num) {
233 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
234 			return 0;
235 		}
236 
237 		dump_sem(&sSems[slot]);
238 		return 0;
239 	}
240 
241 	// walk through the sem list, trying to match name
242 	for (i = 0; i < sMaxSems; i++) {
243 		if (sSems[i].u.used.name != NULL
244 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
245 			dump_sem(&sSems[i]);
246 			found = true;
247 		}
248 	}
249 
250 	if (!found)
251 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
252 	return 0;
253 }
254 
255 
256 /*!	\brief Appends a semaphore slot to the free list.
257 
258 	The semaphore list must be locked.
259 	The slot's id field is not changed. It should already be set to -1.
260 
261 	\param slot The index of the semaphore slot.
262 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
263 		   is used.
264 */
265 static void
266 free_sem_slot(int slot, sem_id nextID)
267 {
268 	struct sem_entry *sem = sSems + slot;
269 	// set next_id to the next possible value; for sanity check the current ID
270 	if (nextID < 0)
271 		sem->u.unused.next_id = slot;
272 	else
273 		sem->u.unused.next_id = nextID;
274 	// append the entry to the list
275 	if (sFreeSemsTail)
276 		sFreeSemsTail->u.unused.next = sem;
277 	else
278 		sFreeSemsHead = sem;
279 	sFreeSemsTail = sem;
280 	sem->u.unused.next = NULL;
281 }
282 
283 
284 static inline void
285 notify_sem_select_events(struct sem_entry* sem, uint16 events)
286 {
287 	if (sem->u.used.select_infos)
288 		notify_select_events_list(sem->u.used.select_infos, events);
289 }
290 
291 
292 /*!	Fills the thread_info structure with information from the specified
293 	thread.
294 	The thread lock must be held when called.
295 */
296 static void
297 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
298 {
299 	info->sem = sem->id;
300 	info->team = sem->u.used.owner != NULL ? sem->u.used.owner->id : -1;
301 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
302 	info->count = sem->u.used.count;
303 	info->latest_holder = sem->u.used.last_acquirer;
304 }
305 
306 
307 /*!	You must call this function with interrupts disabled, and the semaphore's
308 	spinlock held. Note that it will unlock the spinlock itself.
309 	Since it cannot free() the semaphore's name with interrupts turned off, it
310 	will return that one in \a name.
311 */
312 static void
313 uninit_sem_locked(struct sem_entry& sem, char** _name)
314 {
315 	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);
316 
317 	notify_sem_select_events(&sem, B_EVENT_INVALID);
318 	sem.u.used.select_infos = NULL;
319 
320 	// free any threads waiting for this semaphore
321 	GRAB_THREAD_LOCK();
322 	while (queued_thread* entry = sem.queue.RemoveHead()) {
323 		entry->queued = false;
324 		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
325 	}
326 	RELEASE_THREAD_LOCK();
327 
328 	int32 id = sem.id;
329 	sem.id = -1;
330 	*_name = sem.u.used.name;
331 	sem.u.used.name = NULL;
332 
333 	RELEASE_SEM_LOCK(sem);
334 
335 	// append slot to the free list
336 	GRAB_SEM_LIST_LOCK();
337 	free_sem_slot(id % sMaxSems, id + sMaxSems);
338 	atomic_add(&sUsedSems, -1);
339 	RELEASE_SEM_LIST_LOCK();
340 }
341 
342 
343 static status_t
344 delete_sem_internal(sem_id id, bool checkPermission)
345 {
346 	if (sSemsActive == false)
347 		return B_NO_MORE_SEMS;
348 	if (id < 0)
349 		return B_BAD_SEM_ID;
350 
351 	int32 slot = id % sMaxSems;
352 
353 	cpu_status state = disable_interrupts();
354 	GRAB_TEAM_LOCK();
355 	GRAB_SEM_LOCK(sSems[slot]);
356 
357 	if (sSems[slot].id != id) {
358 		RELEASE_SEM_LOCK(sSems[slot]);
359 		RELEASE_TEAM_LOCK();
360 		restore_interrupts(state);
361 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
362 		return B_BAD_SEM_ID;
363 	}
364 
365 	if (checkPermission
366 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
367 		RELEASE_SEM_LOCK(sSems[slot]);
368 		RELEASE_TEAM_LOCK();
369 		restore_interrupts(state);
370 		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
371 			thread_get_current_thread_id(), id);
372 		return B_NOT_ALLOWED;
373 	}
374 
375 	if (sSems[slot].u.used.owner != NULL) {
376 		list_remove_link(&sSems[slot].u.used.team_link);
377 		sSems[slot].u.used.owner = NULL;
378 	} else
379 		panic("sem %ld has no owner", id);
380 
381 	RELEASE_TEAM_LOCK();
382 
383 	char* name;
384 	uninit_sem_locked(sSems[slot], &name);
385 
386 	restore_interrupts(state);
387 
388 	free(name);
389 	return B_OK;
390 }
391 
392 
393 //	#pragma mark - Private Kernel API
394 
395 
396 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
397 status_t
398 haiku_sem_init(kernel_args *args)
399 {
400 	area_id area;
401 	int32 i;
402 
403 	TRACE(("sem_init: entry\n"));
404 
405 	// compute maximal number of semaphores depending on the available memory
406 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
407 	// 256 MB -> 32768, 896 kB
408 	// 512 MB and more-> 65536, 1.75 MB
409 	i = vm_page_num_pages() / 2;
410 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
411 		sMaxSems <<= 1;
412 
413 	// create and initialize semaphore table
414 	area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS,
415 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
416 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
417 	if (area < 0)
418 		panic("unable to allocate semaphore table!\n");
419 
420 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
421 	for (i = 0; i < sMaxSems; i++) {
422 		sSems[i].id = -1;
423 		free_sem_slot(i, i);
424 	}
425 
426 	// add debugger commands
427 	add_debugger_command_etc("sems", &dump_sem_list,
428 		"Dump a list of all active semaphores (for team, with name, etc.)",
429 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
430 			" | (\"last\" <last acquirer>)\n"
431 		"Prints a list of all active semaphores meeting the given\n"
432 		"requirement. If no argument is given, all sems are listed.\n"
433 		"  <team>             - The team owning the semaphores.\n"
434 		"  <name>             - Part of the name of the semaphores.\n"
435 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
436 		, 0);
437 	add_debugger_command_etc("sem", &dump_sem_info,
438 		"Dump info about a particular semaphore",
439 		"<sem>\n"
440 		"Prints info about the specified semaphore.\n"
441 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
442 		"           of the semaphore to print info for.\n", 0);
443 
444 	TRACE(("sem_init: exit\n"));
445 
446 	sSemsActive = true;
447 
448 	return 0;
449 }
450 
451 
452 /*!	Creates a semaphore with the given parameters.
453 
454 	This function is only available from within the kernel, and
455 	should not be made public - if possible, we should remove it
456 	completely (and have only create_sem() exported).
457 */
458 sem_id
459 create_sem_etc(int32 count, const char* name, team_id owner)
460 {
461 	struct sem_entry* sem = NULL;
462 	cpu_status state;
463 	sem_id id = B_NO_MORE_SEMS;
464 	char* tempName;
465 	size_t nameLength;
466 
467 	if (sSemsActive == false || sUsedSems == sMaxSems)
468 		return B_NO_MORE_SEMS;
469 
470 	if (name == NULL)
471 		name = "unnamed semaphore";
472 
473 	nameLength = strlen(name) + 1;
474 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
475 	tempName = (char*)malloc(nameLength);
476 	if (tempName == NULL)
477 		return B_NO_MEMORY;
478 
479 	strlcpy(tempName, name, nameLength);
480 
481 	struct team* team = NULL;
482 	if (owner == team_get_kernel_team_id())
483 		team = team_get_kernel_team();
484 	else if (owner == team_get_current_team_id())
485 		team = thread_get_current_thread()->team;
486 
487 	bool teamsLocked = false;
488 	state = disable_interrupts();
489 
490 	if (team == NULL) {
491 		// We need to hold the team lock to make sure this one exists (and
492 		// won't go away.
493 		GRAB_TEAM_LOCK();
494 
495 		team = team_get_team_struct_locked(owner);
496 		if (team == NULL) {
497 			RELEASE_TEAM_LOCK();
498 			restore_interrupts(state);
499 			return B_BAD_TEAM_ID;
500 		}
501 
502 		teamsLocked = true;
503 	}
504 	GRAB_SEM_LIST_LOCK();
505 
506 	// get the first slot from the free list
507 	sem = sFreeSemsHead;
508 	if (sem) {
509 		// remove it from the free list
510 		sFreeSemsHead = sem->u.unused.next;
511 		if (!sFreeSemsHead)
512 			sFreeSemsTail = NULL;
513 
514 		// init the slot
515 		GRAB_SEM_LOCK(*sem);
516 		sem->id = sem->u.unused.next_id;
517 		sem->u.used.count = count;
518 		sem->u.used.net_count = count;
519 		new(&sem->queue) ThreadQueue;
520 		sem->u.used.name = tempName;
521 		sem->u.used.owner = team;
522 		sem->u.used.select_infos = NULL;
523 		id = sem->id;
524 
525 		if (teamsLocked) {
526 			// insert now
527 			list_add_item(&team->sem_list, &sem->u.used.team_link);
528 		}
529 
530 		RELEASE_SEM_LOCK(*sem);
531 
532 		atomic_add(&sUsedSems, 1);
533 
534 		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
535 			count, name, owner, id);
536 
537 		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
538 		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
539 			name);
540 	}
541 
542 	RELEASE_SEM_LIST_LOCK();
543 
544 	int32 slot = id % sMaxSems;
545 	if (sem != NULL && !teamsLocked) {
546 		GRAB_TEAM_LOCK();
547 		GRAB_SEM_LOCK(sSems[slot]);
548 
549 		list_add_item(&team->sem_list, &sem->u.used.team_link);
550 
551 		RELEASE_SEM_LOCK(sSems[slot]);
552 		teamsLocked = true;
553 	}
554 
555 	if (teamsLocked)
556 		RELEASE_TEAM_LOCK();
557 	restore_interrupts(state);
558 
559 	if (sem == NULL)
560 		free(tempName);
561 
562 	return id;
563 }
564 
565 
566 status_t
567 select_sem(int32 id, struct select_info* info, bool kernel)
568 {
569 	cpu_status state;
570 	int32 slot;
571 	status_t error = B_OK;
572 
573 	if (id < 0)
574 		return B_BAD_SEM_ID;
575 
576 	slot = id % sMaxSems;
577 
578 	state = disable_interrupts();
579 	GRAB_SEM_LOCK(sSems[slot]);
580 
581 	if (sSems[slot].id != id) {
582 		// bad sem ID
583 		error = B_BAD_SEM_ID;
584 	} else if (!kernel
585 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
586 		// kernel semaphore, but call from userland
587 		error = B_NOT_ALLOWED;
588 	} else {
589 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
590 
591 		if (info->selected_events != 0) {
592 			info->next = sSems[slot].u.used.select_infos;
593 			sSems[slot].u.used.select_infos = info;
594 
595 			if (sSems[slot].u.used.count > 0)
596 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
597 		}
598 	}
599 
600 	RELEASE_SEM_LOCK(sSems[slot]);
601 	restore_interrupts(state);
602 
603 	return error;
604 }
605 
606 
607 status_t
608 deselect_sem(int32 id, struct select_info* info, bool kernel)
609 {
610 	cpu_status state;
611 	int32 slot;
612 
613 	if (id < 0)
614 		return B_BAD_SEM_ID;
615 
616 	if (info->selected_events == 0)
617 		return B_OK;
618 
619 	slot = id % sMaxSems;
620 
621 	state = disable_interrupts();
622 	GRAB_SEM_LOCK(sSems[slot]);
623 
624 	if (sSems[slot].id == id) {
625 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
626 		while (*infoLocation != NULL && *infoLocation != info)
627 			infoLocation = &(*infoLocation)->next;
628 
629 		if (*infoLocation == info)
630 			*infoLocation = info->next;
631 	}
632 
633 	RELEASE_SEM_LOCK(sSems[slot]);
634 	restore_interrupts(state);
635 
636 	return B_OK;
637 }
638 
639 
640 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
641 	other threads in the process.
642 	Must be called with semaphore lock held. The thread lock must not be held.
643 */
644 static void
645 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
646 {
647 	if (!entry->queued)
648 		return;
649 
650 	sem->queue.Remove(entry);
651 	entry->queued = false;
652 	sem->u.used.count += entry->count;
653 
654 	// We're done with this entry. We only have to check, if other threads
655 	// need unblocking, too.
656 
657 	// Now see if more threads need to be woken up. We get the thread lock for
658 	// that time, so the blocking state of threads won't change. We need that
659 	// lock anyway when unblocking a thread.
660 	GRAB_THREAD_LOCK();
661 
662 	while ((entry = sem->queue.Head()) != NULL) {
663 		if (thread_is_blocked(entry->thread)) {
664 			// The thread is still waiting. If its count is satisfied, unblock
665 			// it. Otherwise we can't unblock any other thread.
666 			if (entry->count > sem->u.used.net_count)
667 				break;
668 
669 			thread_unblock_locked(entry->thread, B_OK);
670 			sem->u.used.net_count -= entry->count;
671 		} else {
672 			// The thread is no longer waiting, but still queued, which means
673 			// acquiration failed and we can just remove it.
674 			sem->u.used.count += entry->count;
675 		}
676 
677 		sem->queue.Remove(entry);
678 		entry->queued = false;
679 	}
680 
681 	RELEASE_THREAD_LOCK();
682 
683 	// select notification, if the semaphore is now acquirable
684 	if (sem->u.used.count > 0)
685 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
686 }
687 
688 
689 /*!	This function deletes all semaphores belonging to a particular team.
690 */
691 void
692 sem_delete_owned_sems(struct team* team)
693 {
694 	struct list queue;
695 
696 	{
697 		InterruptsSpinLocker locker(gTeamSpinlock);
698 		list_move_to_list(&team->sem_list, &queue);
699 	}
700 
701 	while (sem_entry* sem = (sem_entry*)list_remove_head_item(&queue)) {
702 		char* name;
703 
704 		{
705 			InterruptsLocker locker;
706 			GRAB_SEM_LOCK(*sem);
707 			uninit_sem_locked(*sem, &name);
708 		}
709 
710 		free(name);
711 	}
712 }
713 
714 
715 int32
716 sem_max_sems(void)
717 {
718 	return sMaxSems;
719 }
720 
721 
722 int32
723 sem_used_sems(void)
724 {
725 	return sUsedSems;
726 }
727 
728 
729 //	#pragma mark - Public Kernel API
730 
731 
732 sem_id
733 create_sem(int32 count, const char* name)
734 {
735 	return create_sem_etc(count, name, team_get_kernel_team_id());
736 }
737 
738 
739 status_t
740 delete_sem(sem_id id)
741 {
742 	return delete_sem_internal(id, false);
743 }
744 
745 
746 status_t
747 acquire_sem(sem_id id)
748 {
749 	return switch_sem_etc(-1, id, 1, 0, 0);
750 }
751 
752 
753 status_t
754 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
755 {
756 	return switch_sem_etc(-1, id, count, flags, timeout);
757 }
758 
759 
760 status_t
761 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
762 {
763 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
764 }
765 
766 
767 status_t
768 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
769 	uint32 flags, bigtime_t timeout)
770 {
771 	int slot = id % sMaxSems;
772 	int state;
773 	status_t status = B_OK;
774 
775 	if (gKernelStartup)
776 		return B_OK;
777 	if (sSemsActive == false)
778 		return B_NO_MORE_SEMS;
779 
780 	if (!are_interrupts_enabled()) {
781 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
782 			id);
783 	}
784 
785 	if (id < 0)
786 		return B_BAD_SEM_ID;
787 	if (count <= 0
788 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
789 		return B_BAD_VALUE;
790 	}
791 
792 	state = disable_interrupts();
793 	GRAB_SEM_LOCK(sSems[slot]);
794 
795 	if (sSems[slot].id != id) {
796 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
797 		status = B_BAD_SEM_ID;
798 		goto err;
799 	}
800 
801 	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
802 	//	doesn't have any use outside the kernel
803 	if ((flags & B_CHECK_PERMISSION) != 0
804 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
805 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
806 			thread_get_current_thread_id(), id);
807 		status = B_NOT_ALLOWED;
808 		goto err;
809 	}
810 
811 	if (sSems[slot].u.used.count - count < 0) {
812 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
813 			// immediate timeout
814 			status = B_WOULD_BLOCK;
815 			goto err;
816 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
817 			// absolute negative timeout
818 			status = B_TIMED_OUT;
819 			goto err;
820 		}
821 	}
822 
823 	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
824 		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
825 		timeout);
826 
827 	if ((sSems[slot].u.used.count -= count) < 0) {
828 		// we need to block
829 		struct thread *thread = thread_get_current_thread();
830 
831 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
832 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
833 
834 		// do a quick check to see if the thread has any pending signals
835 		// this should catch most of the cases where the thread had a signal
836 		if (thread_is_interrupted(thread, flags)) {
837 			sSems[slot].u.used.count += count;
838 			status = B_INTERRUPTED;
839 				// the other semaphore will be released later
840 			goto err;
841 		}
842 
843 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
844 			timeout = B_INFINITE_TIMEOUT;
845 
846 		// enqueue in the semaphore queue and get ready to wait
847 		queued_thread queueEntry(thread, count);
848 		sSems[slot].queue.Add(&queueEntry);
849 		queueEntry.queued = true;
850 
851 		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
852 			(void*)(addr_t)id);
853 
854 		RELEASE_SEM_LOCK(sSems[slot]);
855 
856 		// release the other semaphore, if any
857 		if (semToBeReleased >= 0) {
858 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
859 			semToBeReleased = -1;
860 		}
861 
862 		GRAB_THREAD_LOCK();
863 
864 		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
865 			? thread_block_locked(thread)
866 			: thread_block_with_timeout_locked(flags, timeout);
867 
868 		RELEASE_THREAD_LOCK();
869 		GRAB_SEM_LOCK(sSems[slot]);
870 
871 		// If we're still queued, this means the acquiration failed, and we
872 		// need to remove our entry and (potentially) wake up other threads.
873 		if (queueEntry.queued)
874 			remove_thread_from_sem(&queueEntry, &sSems[slot]);
875 
876 		if (acquireStatus >= B_OK) {
877 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
878 #if DEBUG_SEM_LAST_ACQUIRER
879 			sSems[slot].u.used.last_acquire_count = count;
880 #endif
881 		}
882 
883 		RELEASE_SEM_LOCK(sSems[slot]);
884 		restore_interrupts(state);
885 
886 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
887 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
888 			thread->name));
889 		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
890 		return acquireStatus;
891 	} else {
892 		sSems[slot].u.used.net_count -= count;
893 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
894 #if DEBUG_SEM_LAST_ACQUIRER
895 		sSems[slot].u.used.last_acquire_count = count;
896 #endif
897 	}
898 
899 err:
900 	RELEASE_SEM_LOCK(sSems[slot]);
901 	restore_interrupts(state);
902 
903 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
904 		// depending on when we were interrupted, we need to still
905 		// release the semaphore to always leave in a consistent
906 		// state
907 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
908 	}
909 
910 #if 0
911 	if (status == B_NOT_ALLOWED)
912 	_user_debugger("Thread tried to acquire kernel semaphore.");
913 #endif
914 
915 	KTRACE("switch_sem_etc() done: 0x%lx", status);
916 
917 	return status;
918 }
919 
920 
921 status_t
922 release_sem(sem_id id)
923 {
924 	return release_sem_etc(id, 1, 0);
925 }
926 
927 
928 status_t
929 release_sem_etc(sem_id id, int32 count, uint32 flags)
930 {
931 	int32 slot = id % sMaxSems;
932 
933 	if (gKernelStartup)
934 		return B_OK;
935 	if (sSemsActive == false)
936 		return B_NO_MORE_SEMS;
937 	if (id < 0)
938 		return B_BAD_SEM_ID;
939 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
940 		return B_BAD_VALUE;
941 
942 	InterruptsLocker _;
943 	SpinLocker semLocker(sSems[slot].lock);
944 
945 	if (sSems[slot].id != id) {
946 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
947 		return B_BAD_SEM_ID;
948 	}
949 
950 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
951 	//	doesn't have any use outside the kernel
952 	if ((flags & B_CHECK_PERMISSION) != 0
953 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
954 		dprintf("thread %ld tried to release kernel semaphore.\n",
955 			thread_get_current_thread_id());
956 		return B_NOT_ALLOWED;
957 	}
958 
959 	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
960 		flags);
961 
962 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
963 #if DEBUG_SEM_LAST_ACQUIRER
964 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
965 	sSems[slot].u.used.last_release_count = count;
966 #endif
967 
968 	if (flags & B_RELEASE_ALL) {
969 		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
970 
971 		// is there anything to do for us at all?
972 		if (count == 0)
973 			return B_OK;
974 
975 		// Don't release more than necessary -- there might be interrupted/
976 		// timed out threads in the queue.
977 		flags |= B_RELEASE_IF_WAITING_ONLY;
978 	}
979 
980 	bool reschedule = false;
981 
982 	SpinLocker threadLocker(gThreadSpinlock);
983 
984 	while (count > 0) {
985 		queued_thread* entry = sSems[slot].queue.Head();
986 		if (entry == NULL) {
987 			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
988 				sSems[slot].u.used.count += count;
989 				sSems[slot].u.used.net_count += count;
990 			}
991 			break;
992 		}
993 
994 		if (thread_is_blocked(entry->thread)) {
995 			// The thread is still waiting. If its count is satisfied,
996 			// unblock it. Otherwise we can't unblock any other thread.
997 			if (entry->count > sSems[slot].u.used.net_count + count) {
998 				sSems[slot].u.used.count += count;
999 				sSems[slot].u.used.net_count += count;
1000 				break;
1001 			}
1002 
1003 			reschedule |= thread_unblock_locked(entry->thread, B_OK);
1004 
1005 			int delta = min_c(count, entry->count);
1006 			sSems[slot].u.used.count += delta;
1007 			sSems[slot].u.used.net_count += delta - entry->count;
1008 			count -= delta;
1009 		} else {
1010 			// The thread is no longer waiting, but still queued, which
1011 			// means acquiration failed and we can just remove it.
1012 			sSems[slot].u.used.count += entry->count;
1013 		}
1014 
1015 		sSems[slot].queue.Remove(entry);
1016 		entry->queued = false;
1017 	}
1018 
1019 	threadLocker.Unlock();
1020 
1021 	if (sSems[slot].u.used.count > 0)
1022 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1023 
1024 	// If we've unblocked another thread reschedule, if we've not explicitly
1025 	// been told not to.
1026 	if (reschedule && (flags & B_DO_NOT_RESCHEDULE) == 0) {
1027 		semLocker.Unlock();
1028 		threadLocker.Lock();
1029 		scheduler_reschedule();
1030 	}
1031 
1032 	return B_OK;
1033 }
1034 
1035 
1036 status_t
1037 get_sem_count(sem_id id, int32 *_count)
1038 {
1039 	int slot;
1040 	int state;
1041 
1042 	if (sSemsActive == false)
1043 		return B_NO_MORE_SEMS;
1044 	if (id < 0)
1045 		return B_BAD_SEM_ID;
1046 	if (_count == NULL)
1047 		return B_BAD_VALUE;
1048 
1049 	slot = id % sMaxSems;
1050 
1051 	state = disable_interrupts();
1052 	GRAB_SEM_LOCK(sSems[slot]);
1053 
1054 	if (sSems[slot].id != id) {
1055 		RELEASE_SEM_LOCK(sSems[slot]);
1056 		restore_interrupts(state);
1057 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1058 		return B_BAD_SEM_ID;
1059 	}
1060 
1061 	*_count = sSems[slot].u.used.count;
1062 
1063 	RELEASE_SEM_LOCK(sSems[slot]);
1064 	restore_interrupts(state);
1065 
1066 	return B_OK;
1067 }
1068 
1069 
1070 /*!	Called by the get_sem_info() macro. */
1071 status_t
1072 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1073 {
1074 	status_t status = B_OK;
1075 	int state;
1076 	int slot;
1077 
1078 	if (!sSemsActive)
1079 		return B_NO_MORE_SEMS;
1080 	if (id < 0)
1081 		return B_BAD_SEM_ID;
1082 	if (info == NULL || size != sizeof(sem_info))
1083 		return B_BAD_VALUE;
1084 
1085 	slot = id % sMaxSems;
1086 
1087 	state = disable_interrupts();
1088 	GRAB_SEM_LOCK(sSems[slot]);
1089 
1090 	if (sSems[slot].id != id) {
1091 		status = B_BAD_SEM_ID;
1092 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1093 	} else
1094 		fill_sem_info(&sSems[slot], info, size);
1095 
1096 	RELEASE_SEM_LOCK(sSems[slot]);
1097 	restore_interrupts(state);
1098 
1099 	return status;
1100 }
1101 
1102 
1103 /*!	Called by the get_next_sem_info() macro. */
1104 status_t
1105 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
1106 	size_t size)
1107 {
1108 	if (!sSemsActive)
1109 		return B_NO_MORE_SEMS;
1110 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1111 		return B_BAD_VALUE;
1112 	if (teamID < 0)
1113 		return B_BAD_TEAM_ID;
1114 
1115 	InterruptsSpinLocker locker(gTeamSpinlock);
1116 
1117 	struct team* team;
1118 	if (teamID == B_CURRENT_TEAM)
1119 		team = thread_get_current_thread()->team;
1120 	else
1121 		team = team_get_team_struct_locked(teamID);
1122 
1123 	if (team == NULL)
1124 		return B_BAD_TEAM_ID;
1125 
1126 	// TODO: find a way to iterate the list that is more reliable
1127 	sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
1128 	int32 newIndex = *_cookie;
1129 	int32 index = 0;
1130 	bool found = false;
1131 
1132 	while (!found) {
1133 		// find the next entry to be returned
1134 		while (sem != NULL && index < newIndex) {
1135 			sem = (sem_entry*)list_get_next_item(&team->sem_list, sem);
1136 			index++;
1137 		}
1138 
1139 		if (sem == NULL)
1140 			return B_BAD_VALUE;
1141 
1142 		GRAB_SEM_LOCK(*sem);
1143 
1144 		if (sem->id != -1 && sem->u.used.owner == team) {
1145 			// found one!
1146 			fill_sem_info(sem, info, size);
1147 			newIndex = index + 1;
1148 			found = true;
1149 		} else
1150 			newIndex++;
1151 
1152 		RELEASE_SEM_LOCK(*sem);
1153 	}
1154 
1155 	if (!found)
1156 		return B_BAD_VALUE;
1157 
1158 	*_cookie = newIndex;
1159 	return B_OK;
1160 }
1161 
1162 
1163 status_t
1164 set_sem_owner(sem_id id, team_id newTeamID)
1165 {
1166 	if (sSemsActive == false)
1167 		return B_NO_MORE_SEMS;
1168 	if (id < 0)
1169 		return B_BAD_SEM_ID;
1170 	if (newTeamID < 0)
1171 		return B_BAD_TEAM_ID;
1172 
1173 	int32 slot = id % sMaxSems;
1174 
1175 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1176 
1177 	struct team* newTeam = team_get_team_struct_locked(newTeamID);
1178 	if (newTeam == NULL)
1179 		return B_BAD_TEAM_ID;
1180 
1181 	SpinLocker semLocker(sSems[slot].lock);
1182 
1183 	if (sSems[slot].id != id) {
1184 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1185 		return B_BAD_SEM_ID;
1186 	}
1187 
1188 	list_remove_link(&sSems[slot].u.used.team_link);
1189 	list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
1190 
1191 	sSems[slot].u.used.owner = newTeam;
1192 	return B_OK;
1193 }
1194 
1195 
1196 /*!	Returns the name of the semaphore. The name is not copied, so the caller
1197 	must make sure that the semaphore remains alive as long as the name is used.
1198 */
1199 const char*
1200 sem_get_name_unsafe(sem_id id)
1201 {
1202 	int slot = id % sMaxSems;
1203 
1204 	if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1205 		return NULL;
1206 
1207 	return sSems[slot].u.used.name;
1208 }
1209 
1210 
1211 //	#pragma mark - Syscalls
1212 
1213 
1214 sem_id
1215 _user_create_sem(int32 count, const char *userName)
1216 {
1217 	char name[B_OS_NAME_LENGTH];
1218 
1219 	if (userName == NULL)
1220 		return create_sem_etc(count, NULL, team_get_current_team_id());
1221 
1222 	if (!IS_USER_ADDRESS(userName)
1223 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1224 		return B_BAD_ADDRESS;
1225 
1226 	return create_sem_etc(count, name, team_get_current_team_id());
1227 }
1228 
1229 
1230 status_t
1231 _user_delete_sem(sem_id id)
1232 {
1233 	return delete_sem_internal(id, true);
1234 }
1235 
1236 
1237 status_t
1238 _user_acquire_sem(sem_id id)
1239 {
1240 	status_t error = switch_sem_etc(-1, id, 1,
1241 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1242 
1243 	return syscall_restart_handle_post(error);
1244 }
1245 
1246 
1247 status_t
1248 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1249 {
1250 	syscall_restart_handle_timeout_pre(flags, timeout);
1251 
1252 	status_t error = switch_sem_etc(-1, id, count,
1253 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1254 
1255 	return syscall_restart_handle_timeout_post(error, timeout);
1256 }
1257 
1258 
1259 status_t
1260 _user_switch_sem(sem_id releaseSem, sem_id id)
1261 {
1262 	status_t error = switch_sem_etc(releaseSem, id, 1,
1263 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1264 
1265 	if (releaseSem < 0)
1266 		return syscall_restart_handle_post(error);
1267 
1268 	return error;
1269 }
1270 
1271 
1272 status_t
1273 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1274 	bigtime_t timeout)
1275 {
1276 	if (releaseSem < 0)
1277 		syscall_restart_handle_timeout_pre(flags, timeout);
1278 
1279 	status_t error = switch_sem_etc(releaseSem, id, count,
1280 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1281 
1282 	if (releaseSem < 0)
1283 		return syscall_restart_handle_timeout_post(error, timeout);
1284 
1285 	return error;
1286 }
1287 
1288 
1289 status_t
1290 _user_release_sem(sem_id id)
1291 {
1292 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1293 }
1294 
1295 
1296 status_t
1297 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1298 {
1299 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1300 }
1301 
1302 
1303 status_t
1304 _user_get_sem_count(sem_id id, int32 *userCount)
1305 {
1306 	status_t status;
1307 	int32 count;
1308 
1309 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1310 		return B_BAD_ADDRESS;
1311 
1312 	status = get_sem_count(id, &count);
1313 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1314 		return B_BAD_ADDRESS;
1315 
1316 	return status;
1317 }
1318 
1319 
1320 status_t
1321 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1322 {
1323 	struct sem_info info;
1324 	status_t status;
1325 
1326 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1327 		return B_BAD_ADDRESS;
1328 
1329 	status = _get_sem_info(id, &info, size);
1330 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1331 		return B_BAD_ADDRESS;
1332 
1333 	return status;
1334 }
1335 
1336 
1337 status_t
1338 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1339 	size_t size)
1340 {
1341 	struct sem_info info;
1342 	int32 cookie;
1343 	status_t status;
1344 
1345 	if (userCookie == NULL || userInfo == NULL
1346 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1347 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1348 		return B_BAD_ADDRESS;
1349 
1350 	status = _get_next_sem_info(team, &cookie, &info, size);
1351 
1352 	if (status == B_OK) {
1353 		if (user_memcpy(userInfo, &info, size) < B_OK
1354 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1355 			return B_BAD_ADDRESS;
1356 	}
1357 
1358 	return status;
1359 }
1360 
1361 
1362 status_t
1363 _user_set_sem_owner(sem_id id, team_id team)
1364 {
1365 	return set_sem_owner(id, team);
1366 }
1367