xref: /haiku/src/system/kernel/sem.cpp (revision 7a74a5df454197933bc6e80a542102362ee98703)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Semaphore code */
12 
13 
14 #include <sem.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <arch/int.h>
22 #include <boot/kernel_args.h>
23 #include <cpu.h>
24 #include <debug.h>
25 #include <int.h>
26 #include <kernel.h>
27 #include <ksignal.h>
28 #include <kscheduler.h>
29 #include <listeners.h>
30 #include <scheduling_analysis.h>
31 #include <smp.h>
32 #include <syscall_restart.h>
33 #include <team.h>
34 #include <thread.h>
35 #include <util/AutoLock.h>
36 #include <util/DoublyLinkedList.h>
37 #include <vfs.h>
38 #include <vm/vm_page.h>
39 #include <wait_for_objects.h>
40 
41 #include "kernel_debug_config.h"
42 
43 
44 //#define TRACE_SEM
45 #ifdef TRACE_SEM
46 #	define TRACE(x) dprintf_no_syslog x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 //#define KTRACE_SEM
52 #ifdef KTRACE_SEM
53 #	define KTRACE(x...) ktrace_printf(x)
54 #else
55 #	define KTRACE(x...) do {} while (false)
56 #endif
57 
58 
59 // Locking:
60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead,
61 //   sFreeSemsTail), Team::sem_list, and together with sem_entry::lock
62 //   write access to sem_entry::owner/team_link.
63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link
64 //   additional need sSemsSpinlock for write access.
65 //   lock itself doesn't need protection -- sem_entry objects are never deleted.
66 //
67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All
68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires
69 // computing the object index (id % sMaxSems), locking the respective
70 // sem_entry::lock and verifying that sem_entry::id matches afterwards.
71 
72 
73 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
74 	queued_thread(Thread *thread, int32 count)
75 		:
76 		thread(thread),
77 		count(count),
78 		queued(false)
79 	{
80 	}
81 
82 	Thread	*thread;
83 	int32	count;
84 	bool	queued;
85 };
86 
87 typedef DoublyLinkedList<queued_thread> ThreadQueue;
88 
89 struct sem_entry {
90 	union {
91 		// when slot in use
92 		struct {
93 			struct list_link	team_link;
94 			int32				count;
95 			int32				net_count;
96 									// count + acquisition count of all blocked
97 									// threads
98 			char*				name;
99 			team_id				owner;
100 			select_info*		select_infos;
101 			thread_id			last_acquirer;
102 #if DEBUG_SEM_LAST_ACQUIRER
103 			int32				last_acquire_count;
104 			thread_id			last_releaser;
105 			int32				last_release_count;
106 #endif
107 		} used;
108 
109 		// when slot unused
110 		struct {
111 			sem_id				next_id;
112 			struct sem_entry*	next;
113 		} unused;
114 	} u;
115 
116 	sem_id				id;
117 	spinlock			lock;	// protects only the id field when unused
118 	ThreadQueue			queue;	// should be in u.used, but has a constructor
119 };
120 
121 static const int32 kMaxSemaphores = 65536;
122 static int32 sMaxSems = 4096;
123 	// Final value is computed based on the amount of available memory
124 static int32 sUsedSems = 0;
125 
126 static struct sem_entry *sSems = NULL;
127 static bool sSemsActive = false;
128 static struct sem_entry	*sFreeSemsHead = NULL;
129 static struct sem_entry	*sFreeSemsTail = NULL;
130 
131 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
132 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sSemsSpinlock)
133 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sSemsSpinlock)
134 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
135 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
136 
137 
138 static int
139 dump_sem_list(int argc, char** argv)
140 {
141 	const char* name = NULL;
142 	team_id owner = -1;
143 	thread_id last = -1;
144 	int32 i;
145 
146 	if (argc > 2) {
147 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
148 			owner = strtoul(argv[2], NULL, 0);
149 		else if (!strcmp(argv[1], "name"))
150 			name = argv[2];
151 		else if (!strcmp(argv[1], "last"))
152 			last = strtoul(argv[2], NULL, 0);
153 	} else if (argc > 1)
154 		owner = strtoul(argv[1], NULL, 0);
155 
156 	kprintf("sem            id count   team   last  name\n");
157 
158 	for (i = 0; i < sMaxSems; i++) {
159 		struct sem_entry* sem = &sSems[i];
160 		if (sem->id < 0
161 			|| (last != -1 && sem->u.used.last_acquirer != last)
162 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
163 			|| (owner != -1 && sem->u.used.owner != owner))
164 			continue;
165 
166 		kprintf("%p %6ld %5ld %6ld "
167 			"%6ld "
168 			" %s\n", sem, sem->id, sem->u.used.count,
169 			sem->u.used.owner,
170 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
171 			sem->u.used.name);
172 	}
173 
174 	return 0;
175 }
176 
177 
178 static void
179 dump_sem(struct sem_entry* sem)
180 {
181 	kprintf("SEM: %p\n", sem);
182 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
183 	if (sem->id >= 0) {
184 		kprintf("name:    '%s'\n", sem->u.used.name);
185 		kprintf("owner:   %ld\n", sem->u.used.owner);
186 		kprintf("count:   %ld\n", sem->u.used.count);
187 		kprintf("queue:  ");
188 		if (!sem->queue.IsEmpty()) {
189 			ThreadQueue::Iterator it = sem->queue.GetIterator();
190 			while (queued_thread* entry = it.Next())
191 				kprintf(" %ld", entry->thread->id);
192 			kprintf("\n");
193 		} else
194 			kprintf(" -\n");
195 
196 		set_debug_variable("_sem", (addr_t)sem);
197 		set_debug_variable("_semID", sem->id);
198 		set_debug_variable("_owner", sem->u.used.owner);
199 
200 #if DEBUG_SEM_LAST_ACQUIRER
201 		kprintf("last acquired by: %ld, count: %ld\n",
202 			sem->u.used.last_acquirer, sem->u.used.last_acquire_count);
203 		kprintf("last released by: %ld, count: %ld\n",
204 			sem->u.used.last_releaser, sem->u.used.last_release_count);
205 
206 		if (sem->u.used.last_releaser != 0)
207 			set_debug_variable("_releaser", sem->u.used.last_releaser);
208 		else
209 			unset_debug_variable("_releaser");
210 #else
211 		kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer);
212 #endif
213 
214 		if (sem->u.used.last_acquirer != 0)
215 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
216 		else
217 			unset_debug_variable("_acquirer");
218 	} else {
219 		kprintf("next:    %p\n", sem->u.unused.next);
220 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
221 	}
222 }
223 
224 
225 static int
226 dump_sem_info(int argc, char **argv)
227 {
228 	bool found = false;
229 	addr_t num;
230 	int32 i;
231 
232 	if (argc < 2) {
233 		print_debugger_command_usage(argv[0]);
234 		return 0;
235 	}
236 
237 	num = strtoul(argv[1], NULL, 0);
238 
239 	if (IS_KERNEL_ADDRESS(num)) {
240 		dump_sem((struct sem_entry *)num);
241 		return 0;
242 	} else if (num >= 0) {
243 		uint32 slot = num % sMaxSems;
244 		if (sSems[slot].id != (int)num) {
245 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
246 			return 0;
247 		}
248 
249 		dump_sem(&sSems[slot]);
250 		return 0;
251 	}
252 
253 	// walk through the sem list, trying to match name
254 	for (i = 0; i < sMaxSems; i++) {
255 		if (sSems[i].u.used.name != NULL
256 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
257 			dump_sem(&sSems[i]);
258 			found = true;
259 		}
260 	}
261 
262 	if (!found)
263 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
264 	return 0;
265 }
266 
267 
268 /*!	\brief Appends a semaphore slot to the free list.
269 
270 	The semaphore list must be locked.
271 	The slot's id field is not changed. It should already be set to -1.
272 
273 	\param slot The index of the semaphore slot.
274 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
275 		   is used.
276 */
277 static void
278 free_sem_slot(int slot, sem_id nextID)
279 {
280 	struct sem_entry *sem = sSems + slot;
281 	// set next_id to the next possible value; for sanity check the current ID
282 	if (nextID < 0)
283 		sem->u.unused.next_id = slot;
284 	else
285 		sem->u.unused.next_id = nextID;
286 	// append the entry to the list
287 	if (sFreeSemsTail)
288 		sFreeSemsTail->u.unused.next = sem;
289 	else
290 		sFreeSemsHead = sem;
291 	sFreeSemsTail = sem;
292 	sem->u.unused.next = NULL;
293 }
294 
295 
296 static inline void
297 notify_sem_select_events(struct sem_entry* sem, uint16 events)
298 {
299 	if (sem->u.used.select_infos)
300 		notify_select_events_list(sem->u.used.select_infos, events);
301 }
302 
303 
304 /*!	Fills the sem_info structure with information from the given semaphore.
305 	The semaphore's lock must be held when called.
306 */
307 static void
308 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
309 {
310 	info->sem = sem->id;
311 	info->team = sem->u.used.owner;
312 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
313 	info->count = sem->u.used.count;
314 	info->latest_holder = sem->u.used.last_acquirer;
315 }
316 
317 
318 /*!	You must call this function with interrupts disabled, and the semaphore's
319 	spinlock held. Note that it will unlock the spinlock itself.
320 	Since it cannot free() the semaphore's name with interrupts turned off, it
321 	will return that one in \a name.
322 */
323 static void
324 uninit_sem_locked(struct sem_entry& sem, char** _name)
325 {
326 	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);
327 
328 	notify_sem_select_events(&sem, B_EVENT_INVALID);
329 	sem.u.used.select_infos = NULL;
330 
331 	// free any threads waiting for this semaphore
332 	SpinLocker schedulerLocker(gSchedulerLock);
333 	while (queued_thread* entry = sem.queue.RemoveHead()) {
334 		entry->queued = false;
335 		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
336 	}
337 	schedulerLocker.Unlock();
338 
339 	int32 id = sem.id;
340 	sem.id = -1;
341 	*_name = sem.u.used.name;
342 	sem.u.used.name = NULL;
343 
344 	RELEASE_SEM_LOCK(sem);
345 
346 	// append slot to the free list
347 	GRAB_SEM_LIST_LOCK();
348 	free_sem_slot(id % sMaxSems, id + sMaxSems);
349 	atomic_add(&sUsedSems, -1);
350 	RELEASE_SEM_LIST_LOCK();
351 }
352 
353 
354 static status_t
355 delete_sem_internal(sem_id id, bool checkPermission)
356 {
357 	if (sSemsActive == false)
358 		return B_NO_MORE_SEMS;
359 	if (id < 0)
360 		return B_BAD_SEM_ID;
361 
362 	int32 slot = id % sMaxSems;
363 
364 	cpu_status state = disable_interrupts();
365 	GRAB_SEM_LIST_LOCK();
366 	GRAB_SEM_LOCK(sSems[slot]);
367 
368 	if (sSems[slot].id != id) {
369 		RELEASE_SEM_LOCK(sSems[slot]);
370 		RELEASE_SEM_LIST_LOCK();
371 		restore_interrupts(state);
372 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
373 		return B_BAD_SEM_ID;
374 	}
375 
376 	if (checkPermission
377 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
378 		RELEASE_SEM_LOCK(sSems[slot]);
379 		RELEASE_SEM_LIST_LOCK();
380 		restore_interrupts(state);
381 		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
382 			thread_get_current_thread_id(), id);
383 		return B_NOT_ALLOWED;
384 	}
385 
386 	if (sSems[slot].u.used.owner >= 0) {
387 		list_remove_link(&sSems[slot].u.used.team_link);
388 		sSems[slot].u.used.owner = -1;
389 	} else
390 		panic("sem %ld has no owner", id);
391 
392 	RELEASE_SEM_LIST_LOCK();
393 
394 	char* name;
395 	uninit_sem_locked(sSems[slot], &name);
396 
397 	SpinLocker schedulerLocker(gSchedulerLock);
398 	scheduler_reschedule_if_necessary_locked();
399 	schedulerLocker.Unlock();
400 
401 	restore_interrupts(state);
402 
403 	free(name);
404 	return B_OK;
405 }
406 
407 
408 //	#pragma mark - Private Kernel API
409 
410 
411 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
412 status_t
413 haiku_sem_init(kernel_args *args)
414 {
415 	area_id area;
416 	int32 i;
417 
418 	TRACE(("sem_init: entry\n"));
419 
420 	// compute maximal number of semaphores depending on the available memory
421 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
422 	// 256 MB -> 32768, 896 kB
423 	// 512 MB and more-> 65536, 1.75 MB
424 	i = vm_page_num_pages() / 2;
425 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
426 		sMaxSems <<= 1;
427 
428 	// create and initialize semaphore table
429 	virtual_address_restrictions virtualRestrictions = {};
430 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
431 	physical_address_restrictions physicalRestrictions = {};
432 	area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
433 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
434 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
435 		&virtualRestrictions, &physicalRestrictions, (void**)&sSems);
436 	if (area < 0)
437 		panic("unable to allocate semaphore table!\n");
438 
439 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
440 	for (i = 0; i < sMaxSems; i++) {
441 		sSems[i].id = -1;
442 		free_sem_slot(i, i);
443 	}
444 
445 	// add debugger commands
446 	add_debugger_command_etc("sems", &dump_sem_list,
447 		"Dump a list of all active semaphores (for team, with name, etc.)",
448 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
449 			" | (\"last\" <last acquirer>)\n"
450 		"Prints a list of all active semaphores meeting the given\n"
451 		"requirement. If no argument is given, all sems are listed.\n"
452 		"  <team>             - The team owning the semaphores.\n"
453 		"  <name>             - Part of the name of the semaphores.\n"
454 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
455 		, 0);
456 	add_debugger_command_etc("sem", &dump_sem_info,
457 		"Dump info about a particular semaphore",
458 		"<sem>\n"
459 		"Prints info about the specified semaphore.\n"
460 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
461 		"           of the semaphore to print info for.\n", 0);
462 
463 	TRACE(("sem_init: exit\n"));
464 
465 	sSemsActive = true;
466 
467 	return 0;
468 }
469 
470 
471 /*!	Creates a semaphore with the given parameters.
472 
473 	This function is only available from within the kernel, and
474 	should not be made public - if possible, we should remove it
475 	completely (and have only create_sem() exported).
476 */
477 sem_id
478 create_sem_etc(int32 count, const char* name, team_id owner)
479 {
480 	struct sem_entry* sem = NULL;
481 	cpu_status state;
482 	sem_id id = B_NO_MORE_SEMS;
483 	char* tempName;
484 	size_t nameLength;
485 
486 	if (sSemsActive == false || sUsedSems == sMaxSems)
487 		return B_NO_MORE_SEMS;
488 
489 	if (name == NULL)
490 		name = "unnamed semaphore";
491 
492 	// get the owning team
493 	Team* team = Team::Get(owner);
494 	if (team == NULL)
495 		return B_BAD_TEAM_ID;
496 	BReference<Team> teamReference(team, true);
497 
498 	// clone the name
499 	nameLength = strlen(name) + 1;
500 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
501 	tempName = (char*)malloc(nameLength);
502 	if (tempName == NULL)
503 		return B_NO_MEMORY;
504 
505 	strlcpy(tempName, name, nameLength);
506 
507 	state = disable_interrupts();
508 	GRAB_SEM_LIST_LOCK();
509 
510 	// get the first slot from the free list
511 	sem = sFreeSemsHead;
512 	if (sem) {
513 		// remove it from the free list
514 		sFreeSemsHead = sem->u.unused.next;
515 		if (!sFreeSemsHead)
516 			sFreeSemsTail = NULL;
517 
518 		// init the slot
519 		GRAB_SEM_LOCK(*sem);
520 		sem->id = sem->u.unused.next_id;
521 		sem->u.used.count = count;
522 		sem->u.used.net_count = count;
523 		new(&sem->queue) ThreadQueue;
524 		sem->u.used.name = tempName;
525 		sem->u.used.owner = team->id;
526 		sem->u.used.select_infos = NULL;
527 		id = sem->id;
528 
529 		list_add_item(&team->sem_list, &sem->u.used.team_link);
530 
531 		RELEASE_SEM_LOCK(*sem);
532 
533 		atomic_add(&sUsedSems, 1);
534 
535 		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
536 			count, name, owner, id);
537 
538 		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
539 		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
540 			name);
541 	}
542 
543 	RELEASE_SEM_LIST_LOCK();
544 	restore_interrupts(state);
545 
546 	if (sem == NULL)
547 		free(tempName);
548 
549 	return id;
550 }
551 
552 
553 status_t
554 select_sem(int32 id, struct select_info* info, bool kernel)
555 {
556 	cpu_status state;
557 	int32 slot;
558 	status_t error = B_OK;
559 
560 	if (id < 0)
561 		return B_BAD_SEM_ID;
562 
563 	slot = id % sMaxSems;
564 
565 	state = disable_interrupts();
566 	GRAB_SEM_LOCK(sSems[slot]);
567 
568 	if (sSems[slot].id != id) {
569 		// bad sem ID
570 		error = B_BAD_SEM_ID;
571 	} else if (!kernel
572 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
573 		// kernel semaphore, but call from userland
574 		error = B_NOT_ALLOWED;
575 	} else {
576 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
577 
578 		if (info->selected_events != 0) {
579 			info->next = sSems[slot].u.used.select_infos;
580 			sSems[slot].u.used.select_infos = info;
581 
582 			if (sSems[slot].u.used.count > 0)
583 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
584 		}
585 	}
586 
587 	RELEASE_SEM_LOCK(sSems[slot]);
588 	restore_interrupts(state);
589 
590 	return error;
591 }
592 
593 
594 status_t
595 deselect_sem(int32 id, struct select_info* info, bool kernel)
596 {
597 	cpu_status state;
598 	int32 slot;
599 
600 	if (id < 0)
601 		return B_BAD_SEM_ID;
602 
603 	if (info->selected_events == 0)
604 		return B_OK;
605 
606 	slot = id % sMaxSems;
607 
608 	state = disable_interrupts();
609 	GRAB_SEM_LOCK(sSems[slot]);
610 
611 	if (sSems[slot].id == id) {
612 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
613 		while (*infoLocation != NULL && *infoLocation != info)
614 			infoLocation = &(*infoLocation)->next;
615 
616 		if (*infoLocation == info)
617 			*infoLocation = info->next;
618 	}
619 
620 	RELEASE_SEM_LOCK(sSems[slot]);
621 	restore_interrupts(state);
622 
623 	return B_OK;
624 }
625 
626 
627 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
628 	other threads in the process.
629 	Must be called with semaphore lock held. The thread lock must not be held.
630 */
631 static void
632 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
633 {
634 	if (!entry->queued)
635 		return;
636 
637 	sem->queue.Remove(entry);
638 	entry->queued = false;
639 	sem->u.used.count += entry->count;
640 
641 	// We're done with this entry. We only have to check, if other threads
642 	// need unblocking, too.
643 
644 	// Now see if more threads need to be woken up. We get the scheduler lock
645 	// for that time, so the blocking state of threads won't change (due to
646 	// interruption or timeout). We need that lock anyway when unblocking a
647 	// thread.
648 	SpinLocker schedulerLocker(gSchedulerLock);
649 
650 	while ((entry = sem->queue.Head()) != NULL) {
651 		if (thread_is_blocked(entry->thread)) {
652 			// The thread is still waiting. If its count is satisfied, unblock
653 			// it. Otherwise we can't unblock any other thread.
654 			if (entry->count > sem->u.used.net_count)
655 				break;
656 
657 			thread_unblock_locked(entry->thread, B_OK);
658 			sem->u.used.net_count -= entry->count;
659 		} else {
660 			// The thread is no longer waiting, but still queued, which means
661 			// acquiration failed and we can just remove it.
662 			sem->u.used.count += entry->count;
663 		}
664 
665 		sem->queue.Remove(entry);
666 		entry->queued = false;
667 	}
668 
669 	schedulerLocker.Unlock();
670 
671 	// select notification, if the semaphore is now acquirable
672 	if (sem->u.used.count > 0)
673 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
674 }
675 
676 
677 /*!	This function deletes all semaphores belonging to a particular team.
678 */
679 void
680 sem_delete_owned_sems(Team* team)
681 {
682 	while (true) {
683 		char* name;
684 
685 		{
686 			// get the next semaphore from the team's sem list
687 			InterruptsLocker locker;
688 			SpinLocker semListLocker(sSemsSpinlock);
689 			sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list);
690 			if (sem == NULL)
691 				break;
692 
693 			// delete the semaphore
694 			GRAB_SEM_LOCK(*sem);
695 			semListLocker.Unlock();
696 			uninit_sem_locked(*sem, &name);
697 		}
698 
699 		free(name);
700 	}
701 
702 	scheduler_reschedule_if_necessary();
703 }
704 
705 
706 int32
707 sem_max_sems(void)
708 {
709 	return sMaxSems;
710 }
711 
712 
713 int32
714 sem_used_sems(void)
715 {
716 	return sUsedSems;
717 }
718 
719 
720 //	#pragma mark - Public Kernel API
721 
722 
723 sem_id
724 create_sem(int32 count, const char* name)
725 {
726 	return create_sem_etc(count, name, team_get_kernel_team_id());
727 }
728 
729 
730 status_t
731 delete_sem(sem_id id)
732 {
733 	return delete_sem_internal(id, false);
734 }
735 
736 
737 status_t
738 acquire_sem(sem_id id)
739 {
740 	return switch_sem_etc(-1, id, 1, 0, 0);
741 }
742 
743 
744 status_t
745 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
746 {
747 	return switch_sem_etc(-1, id, count, flags, timeout);
748 }
749 
750 
751 status_t
752 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
753 {
754 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
755 }
756 
757 
758 status_t
759 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
760 	uint32 flags, bigtime_t timeout)
761 {
762 	int slot = id % sMaxSems;
763 	int state;
764 	status_t status = B_OK;
765 
766 	if (gKernelStartup)
767 		return B_OK;
768 	if (sSemsActive == false)
769 		return B_NO_MORE_SEMS;
770 
771 	if (!are_interrupts_enabled()) {
772 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
773 			id);
774 	}
775 
776 	if (id < 0)
777 		return B_BAD_SEM_ID;
778 	if (count <= 0
779 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
780 		return B_BAD_VALUE;
781 	}
782 
783 	state = disable_interrupts();
784 	GRAB_SEM_LOCK(sSems[slot]);
785 
786 	if (sSems[slot].id != id) {
787 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
788 		status = B_BAD_SEM_ID;
789 		goto err;
790 	}
791 
792 	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
793 	//	doesn't have any use outside the kernel
794 	if ((flags & B_CHECK_PERMISSION) != 0
795 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
796 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
797 			thread_get_current_thread_id(), id);
798 		status = B_NOT_ALLOWED;
799 		goto err;
800 	}
801 
802 	if (sSems[slot].u.used.count - count < 0) {
803 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
804 			// immediate timeout
805 			status = B_WOULD_BLOCK;
806 			goto err;
807 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
808 			// absolute negative timeout
809 			status = B_TIMED_OUT;
810 			goto err;
811 		}
812 	}
813 
814 	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
815 		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
816 		timeout);
817 
818 	if ((sSems[slot].u.used.count -= count) < 0) {
819 		// we need to block
820 		Thread *thread = thread_get_current_thread();
821 
822 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
823 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
824 
825 		// do a quick check to see if the thread has any pending signals
826 		// this should catch most of the cases where the thread had a signal
827 		SpinLocker schedulerLocker(gSchedulerLock);
828 		if (thread_is_interrupted(thread, flags)) {
829 			schedulerLocker.Unlock();
830 			sSems[slot].u.used.count += count;
831 			status = B_INTERRUPTED;
832 				// the other semaphore will be released later
833 			goto err;
834 		}
835 
836 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
837 			timeout = B_INFINITE_TIMEOUT;
838 
839 		// enqueue in the semaphore queue and get ready to wait
840 		queued_thread queueEntry(thread, count);
841 		sSems[slot].queue.Add(&queueEntry);
842 		queueEntry.queued = true;
843 
844 		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
845 			(void*)(addr_t)id);
846 
847 		schedulerLocker.Unlock();
848 		RELEASE_SEM_LOCK(sSems[slot]);
849 
850 		// release the other semaphore, if any
851 		if (semToBeReleased >= 0) {
852 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
853 			semToBeReleased = -1;
854 		}
855 
856 		schedulerLocker.Lock();
857 
858 		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
859 			? thread_block_locked(thread)
860 			: thread_block_with_timeout_locked(flags, timeout);
861 
862 		schedulerLocker.Unlock();
863 		GRAB_SEM_LOCK(sSems[slot]);
864 
865 		// If we're still queued, this means the acquiration failed, and we
866 		// need to remove our entry and (potentially) wake up other threads.
867 		if (queueEntry.queued)
868 			remove_thread_from_sem(&queueEntry, &sSems[slot]);
869 
870 		if (acquireStatus >= B_OK) {
871 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
872 #if DEBUG_SEM_LAST_ACQUIRER
873 			sSems[slot].u.used.last_acquire_count = count;
874 #endif
875 		}
876 
877 		RELEASE_SEM_LOCK(sSems[slot]);
878 		restore_interrupts(state);
879 
880 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
881 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
882 			thread->name));
883 		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
884 		return acquireStatus;
885 	} else {
886 		sSems[slot].u.used.net_count -= count;
887 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
888 #if DEBUG_SEM_LAST_ACQUIRER
889 		sSems[slot].u.used.last_acquire_count = count;
890 #endif
891 	}
892 
893 err:
894 	RELEASE_SEM_LOCK(sSems[slot]);
895 	restore_interrupts(state);
896 
897 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
898 		// depending on when we were interrupted, we need to still
899 		// release the semaphore to always leave in a consistent
900 		// state
901 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
902 	}
903 
904 #if 0
905 	if (status == B_NOT_ALLOWED)
906 	_user_debugger("Thread tried to acquire kernel semaphore.");
907 #endif
908 
909 	KTRACE("switch_sem_etc() done: 0x%lx", status);
910 
911 	return status;
912 }
913 
914 
915 status_t
916 release_sem(sem_id id)
917 {
918 	return release_sem_etc(id, 1, 0);
919 }
920 
921 
922 status_t
923 release_sem_etc(sem_id id, int32 count, uint32 flags)
924 {
925 	int32 slot = id % sMaxSems;
926 
927 	if (gKernelStartup)
928 		return B_OK;
929 	if (sSemsActive == false)
930 		return B_NO_MORE_SEMS;
931 	if (id < 0)
932 		return B_BAD_SEM_ID;
933 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
934 		return B_BAD_VALUE;
935 
936 	InterruptsLocker _;
937 	SpinLocker semLocker(sSems[slot].lock);
938 
939 	if (sSems[slot].id != id) {
940 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
941 		return B_BAD_SEM_ID;
942 	}
943 
944 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
945 	//	doesn't have any use outside the kernel
946 	if ((flags & B_CHECK_PERMISSION) != 0
947 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
948 		dprintf("thread %ld tried to release kernel semaphore.\n",
949 			thread_get_current_thread_id());
950 		return B_NOT_ALLOWED;
951 	}
952 
953 	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
954 		flags);
955 
956 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
957 #if DEBUG_SEM_LAST_ACQUIRER
958 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
959 	sSems[slot].u.used.last_release_count = count;
960 #endif
961 
962 	if (flags & B_RELEASE_ALL) {
963 		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
964 
965 		// is there anything to do for us at all?
966 		if (count == 0)
967 			return B_OK;
968 
969 		// Don't release more than necessary -- there might be interrupted/
970 		// timed out threads in the queue.
971 		flags |= B_RELEASE_IF_WAITING_ONLY;
972 	}
973 
974 	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
975 	// possible interruptions or timeouts, it wouldn't be otherwise).
976 	SpinLocker schedulerLocker(gSchedulerLock);
977 
978 	while (count > 0) {
979 		queued_thread* entry = sSems[slot].queue.Head();
980 		if (entry == NULL) {
981 			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
982 				sSems[slot].u.used.count += count;
983 				sSems[slot].u.used.net_count += count;
984 			}
985 			break;
986 		}
987 
988 		if (thread_is_blocked(entry->thread)) {
989 			// The thread is still waiting. If its count is satisfied,
990 			// unblock it. Otherwise we can't unblock any other thread.
991 			if (entry->count > sSems[slot].u.used.net_count + count) {
992 				sSems[slot].u.used.count += count;
993 				sSems[slot].u.used.net_count += count;
994 				break;
995 			}
996 
997 			thread_unblock_locked(entry->thread, B_OK);
998 
999 			int delta = min_c(count, entry->count);
1000 			sSems[slot].u.used.count += delta;
1001 			sSems[slot].u.used.net_count += delta - entry->count;
1002 			count -= delta;
1003 		} else {
1004 			// The thread is no longer waiting, but still queued, which
1005 			// means acquiration failed and we can just remove it.
1006 			sSems[slot].u.used.count += entry->count;
1007 		}
1008 
1009 		sSems[slot].queue.Remove(entry);
1010 		entry->queued = false;
1011 	}
1012 
1013 	schedulerLocker.Unlock();
1014 
1015 	if (sSems[slot].u.used.count > 0)
1016 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1017 
1018 	// If we've unblocked another thread reschedule, if we've not explicitly
1019 	// been told not to.
1020 	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
1021 		semLocker.Unlock();
1022 		schedulerLocker.Lock();
1023 		scheduler_reschedule_if_necessary_locked();
1024 	}
1025 
1026 	return B_OK;
1027 }
1028 
1029 
1030 status_t
1031 get_sem_count(sem_id id, int32 *_count)
1032 {
1033 	int slot;
1034 	int state;
1035 
1036 	if (sSemsActive == false)
1037 		return B_NO_MORE_SEMS;
1038 	if (id < 0)
1039 		return B_BAD_SEM_ID;
1040 	if (_count == NULL)
1041 		return B_BAD_VALUE;
1042 
1043 	slot = id % sMaxSems;
1044 
1045 	state = disable_interrupts();
1046 	GRAB_SEM_LOCK(sSems[slot]);
1047 
1048 	if (sSems[slot].id != id) {
1049 		RELEASE_SEM_LOCK(sSems[slot]);
1050 		restore_interrupts(state);
1051 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1052 		return B_BAD_SEM_ID;
1053 	}
1054 
1055 	*_count = sSems[slot].u.used.count;
1056 
1057 	RELEASE_SEM_LOCK(sSems[slot]);
1058 	restore_interrupts(state);
1059 
1060 	return B_OK;
1061 }
1062 
1063 
1064 /*!	Called by the get_sem_info() macro. */
1065 status_t
1066 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1067 {
1068 	status_t status = B_OK;
1069 	int state;
1070 	int slot;
1071 
1072 	if (!sSemsActive)
1073 		return B_NO_MORE_SEMS;
1074 	if (id < 0)
1075 		return B_BAD_SEM_ID;
1076 	if (info == NULL || size != sizeof(sem_info))
1077 		return B_BAD_VALUE;
1078 
1079 	slot = id % sMaxSems;
1080 
1081 	state = disable_interrupts();
1082 	GRAB_SEM_LOCK(sSems[slot]);
1083 
1084 	if (sSems[slot].id != id) {
1085 		status = B_BAD_SEM_ID;
1086 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1087 	} else
1088 		fill_sem_info(&sSems[slot], info, size);
1089 
1090 	RELEASE_SEM_LOCK(sSems[slot]);
1091 	restore_interrupts(state);
1092 
1093 	return status;
1094 }
1095 
1096 
1097 /*!	Called by the get_next_sem_info() macro. */
1098 status_t
1099 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
1100 	size_t size)
1101 {
1102 	if (!sSemsActive)
1103 		return B_NO_MORE_SEMS;
1104 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1105 		return B_BAD_VALUE;
1106 	if (teamID < 0)
1107 		return B_BAD_TEAM_ID;
1108 
1109 	Team* team = Team::Get(teamID);
1110 	if (team == NULL)
1111 		return B_BAD_TEAM_ID;
1112 	BReference<Team> teamReference(team, true);
1113 
1114 	InterruptsSpinLocker semListLocker(sSemsSpinlock);
1115 
1116 	// TODO: find a way to iterate the list that is more reliable
1117 	sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
1118 	int32 newIndex = *_cookie;
1119 	int32 index = 0;
1120 	bool found = false;
1121 
1122 	while (!found) {
1123 		// find the next entry to be returned
1124 		while (sem != NULL && index < newIndex) {
1125 			sem = (sem_entry*)list_get_next_item(&team->sem_list, sem);
1126 			index++;
1127 		}
1128 
1129 		if (sem == NULL)
1130 			return B_BAD_VALUE;
1131 
1132 		GRAB_SEM_LOCK(*sem);
1133 
1134 		if (sem->id != -1 && sem->u.used.owner == team->id) {
1135 			// found one!
1136 			fill_sem_info(sem, info, size);
1137 			newIndex = index + 1;
1138 			found = true;
1139 		} else
1140 			newIndex++;
1141 
1142 		RELEASE_SEM_LOCK(*sem);
1143 	}
1144 
1145 	if (!found)
1146 		return B_BAD_VALUE;
1147 
1148 	*_cookie = newIndex;
1149 	return B_OK;
1150 }
1151 
1152 
1153 status_t
1154 set_sem_owner(sem_id id, team_id newTeamID)
1155 {
1156 	if (sSemsActive == false)
1157 		return B_NO_MORE_SEMS;
1158 	if (id < 0)
1159 		return B_BAD_SEM_ID;
1160 	if (newTeamID < 0)
1161 		return B_BAD_TEAM_ID;
1162 
1163 	int32 slot = id % sMaxSems;
1164 
1165 	// get the new team
1166 	Team* newTeam = Team::Get(newTeamID);
1167 	if (newTeam == NULL)
1168 		return B_BAD_TEAM_ID;
1169 	BReference<Team> newTeamReference(newTeam, true);
1170 
1171 	InterruptsSpinLocker semListLocker(sSemsSpinlock);
1172 	SpinLocker semLocker(sSems[slot].lock);
1173 
1174 	if (sSems[slot].id != id) {
1175 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1176 		return B_BAD_SEM_ID;
1177 	}
1178 
1179 	list_remove_link(&sSems[slot].u.used.team_link);
1180 	list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
1181 
1182 	sSems[slot].u.used.owner = newTeam->id;
1183 	return B_OK;
1184 }
1185 
1186 
1187 /*!	Returns the name of the semaphore. The name is not copied, so the caller
1188 	must make sure that the semaphore remains alive as long as the name is used.
1189 */
1190 const char*
1191 sem_get_name_unsafe(sem_id id)
1192 {
1193 	int slot = id % sMaxSems;
1194 
1195 	if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1196 		return NULL;
1197 
1198 	return sSems[slot].u.used.name;
1199 }
1200 
1201 
1202 //	#pragma mark - Syscalls
1203 
1204 
1205 sem_id
1206 _user_create_sem(int32 count, const char *userName)
1207 {
1208 	char name[B_OS_NAME_LENGTH];
1209 
1210 	if (userName == NULL)
1211 		return create_sem_etc(count, NULL, team_get_current_team_id());
1212 
1213 	if (!IS_USER_ADDRESS(userName)
1214 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1215 		return B_BAD_ADDRESS;
1216 
1217 	return create_sem_etc(count, name, team_get_current_team_id());
1218 }
1219 
1220 
1221 status_t
1222 _user_delete_sem(sem_id id)
1223 {
1224 	return delete_sem_internal(id, true);
1225 }
1226 
1227 
1228 status_t
1229 _user_acquire_sem(sem_id id)
1230 {
1231 	status_t error = switch_sem_etc(-1, id, 1,
1232 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1233 
1234 	return syscall_restart_handle_post(error);
1235 }
1236 
1237 
1238 status_t
1239 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1240 {
1241 	syscall_restart_handle_timeout_pre(flags, timeout);
1242 
1243 	status_t error = switch_sem_etc(-1, id, count,
1244 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1245 
1246 	return syscall_restart_handle_timeout_post(error, timeout);
1247 }
1248 
1249 
1250 status_t
1251 _user_switch_sem(sem_id releaseSem, sem_id id)
1252 {
1253 	status_t error = switch_sem_etc(releaseSem, id, 1,
1254 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1255 
1256 	if (releaseSem < 0)
1257 		return syscall_restart_handle_post(error);
1258 
1259 	return error;
1260 }
1261 
1262 
1263 status_t
1264 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1265 	bigtime_t timeout)
1266 {
1267 	if (releaseSem < 0)
1268 		syscall_restart_handle_timeout_pre(flags, timeout);
1269 
1270 	status_t error = switch_sem_etc(releaseSem, id, count,
1271 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1272 
1273 	if (releaseSem < 0)
1274 		return syscall_restart_handle_timeout_post(error, timeout);
1275 
1276 	return error;
1277 }
1278 
1279 
1280 status_t
1281 _user_release_sem(sem_id id)
1282 {
1283 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1284 }
1285 
1286 
1287 status_t
1288 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1289 {
1290 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1291 }
1292 
1293 
1294 status_t
1295 _user_get_sem_count(sem_id id, int32 *userCount)
1296 {
1297 	status_t status;
1298 	int32 count;
1299 
1300 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1301 		return B_BAD_ADDRESS;
1302 
1303 	status = get_sem_count(id, &count);
1304 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1305 		return B_BAD_ADDRESS;
1306 
1307 	return status;
1308 }
1309 
1310 
1311 status_t
1312 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1313 {
1314 	struct sem_info info;
1315 	status_t status;
1316 
1317 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1318 		return B_BAD_ADDRESS;
1319 
1320 	status = _get_sem_info(id, &info, size);
1321 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1322 		return B_BAD_ADDRESS;
1323 
1324 	return status;
1325 }
1326 
1327 
1328 status_t
1329 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1330 	size_t size)
1331 {
1332 	struct sem_info info;
1333 	int32 cookie;
1334 	status_t status;
1335 
1336 	if (userCookie == NULL || userInfo == NULL
1337 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1338 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1339 		return B_BAD_ADDRESS;
1340 
1341 	status = _get_next_sem_info(team, &cookie, &info, size);
1342 
1343 	if (status == B_OK) {
1344 		if (user_memcpy(userInfo, &info, size) < B_OK
1345 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1346 			return B_BAD_ADDRESS;
1347 	}
1348 
1349 	return status;
1350 }
1351 
1352 
1353 status_t
1354 _user_set_sem_owner(sem_id id, team_id team)
1355 {
1356 	return set_sem_owner(id, team);
1357 }
1358