xref: /haiku/src/system/kernel/sem.cpp (revision 17889a8c70dbb3d59c1412f6431968753c767bab)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Semaphore code */
12 
13 
14 #include <sem.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <arch/int.h>
22 #include <boot/kernel_args.h>
23 #include <cpu.h>
24 #include <debug.h>
25 #include <int.h>
26 #include <kernel.h>
27 #include <ksignal.h>
28 #include <kscheduler.h>
29 #include <listeners.h>
30 #include <scheduling_analysis.h>
31 #include <smp.h>
32 #include <syscall_restart.h>
33 #include <team.h>
34 #include <thread.h>
35 #include <util/AutoLock.h>
36 #include <util/DoublyLinkedList.h>
37 #include <vfs.h>
38 #include <vm/vm_page.h>
39 #include <wait_for_objects.h>
40 
41 #include "kernel_debug_config.h"
42 
43 
44 //#define TRACE_SEM
45 #ifdef TRACE_SEM
46 #	define TRACE(x) dprintf_no_syslog x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 //#define KTRACE_SEM
52 #ifdef KTRACE_SEM
53 #	define KTRACE(x...) ktrace_printf(x)
54 #else
55 #	define KTRACE(x...) do {} while (false)
56 #endif
57 
58 
59 // Locking:
60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead,
61 //   sFreeSemsTail), Team::sem_list, and together with sem_entry::lock
62 //   write access to sem_entry::owner/team_link.
63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link
64 //   additional need sSemsSpinlock for write access.
65 //   lock itself doesn't need protection -- sem_entry objects are never deleted.
66 //
67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All
68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires
69 // computing the object index (id % sMaxSems), locking the respective
70 // sem_entry::lock and verifying that sem_entry::id matches afterwards.
71 
72 
73 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
74 	queued_thread(Thread *thread, int32 count)
75 		:
76 		thread(thread),
77 		count(count),
78 		queued(false)
79 	{
80 	}
81 
82 	Thread	*thread;
83 	int32	count;
84 	bool	queued;
85 };
86 
87 typedef DoublyLinkedList<queued_thread> ThreadQueue;
88 
89 struct sem_entry {
90 	union {
91 		// when slot in use
92 		struct {
93 			struct list_link	team_link;
94 			int32				count;
95 			int32				net_count;
96 									// count + acquisition count of all blocked
97 									// threads
98 			char*				name;
99 			team_id				owner;
100 			select_info*		select_infos;
101 			thread_id			last_acquirer;
102 #if DEBUG_SEM_LAST_ACQUIRER
103 			int32				last_acquire_count;
104 			thread_id			last_releaser;
105 			int32				last_release_count;
106 #endif
107 		} used;
108 
109 		// when slot unused
110 		struct {
111 			sem_id				next_id;
112 			struct sem_entry*	next;
113 		} unused;
114 	} u;
115 
116 	sem_id				id;
117 	spinlock			lock;	// protects only the id field when unused
118 	ThreadQueue			queue;	// should be in u.used, but has a constructor
119 };
120 
121 static const int32 kMaxSemaphores = 65536;
122 static int32 sMaxSems = 4096;
123 	// Final value is computed based on the amount of available memory
124 static int32 sUsedSems = 0;
125 
126 static struct sem_entry *sSems = NULL;
127 static bool sSemsActive = false;
128 static struct sem_entry	*sFreeSemsHead = NULL;
129 static struct sem_entry	*sFreeSemsTail = NULL;
130 
131 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
132 
133 
134 static int
135 dump_sem_list(int argc, char** argv)
136 {
137 	const char* name = NULL;
138 	team_id owner = -1;
139 	thread_id last = -1;
140 	int32 i;
141 
142 	if (argc > 2) {
143 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
144 			owner = strtoul(argv[2], NULL, 0);
145 		else if (!strcmp(argv[1], "name"))
146 			name = argv[2];
147 		else if (!strcmp(argv[1], "last"))
148 			last = strtoul(argv[2], NULL, 0);
149 	} else if (argc > 1)
150 		owner = strtoul(argv[1], NULL, 0);
151 
152 	kprintf("%-*s       id count   team   last  name\n", B_PRINTF_POINTER_WIDTH,
153 		"sem");
154 
155 	for (i = 0; i < sMaxSems; i++) {
156 		struct sem_entry* sem = &sSems[i];
157 		if (sem->id < 0
158 			|| (last != -1 && sem->u.used.last_acquirer != last)
159 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
160 			|| (owner != -1 && sem->u.used.owner != owner))
161 			continue;
162 
163 		kprintf("%p %6" B_PRId32 " %5" B_PRId32 " %6" B_PRId32 " "
164 			"%6" B_PRId32 " "
165 			" %s\n", sem, sem->id, sem->u.used.count,
166 			sem->u.used.owner,
167 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
168 			sem->u.used.name);
169 	}
170 
171 	return 0;
172 }
173 
174 
175 static void
176 dump_sem(struct sem_entry* sem)
177 {
178 	kprintf("SEM: %p\n", sem);
179 	kprintf("id:      %" B_PRId32 " (%#" B_PRIx32 ")\n", sem->id, sem->id);
180 	if (sem->id >= 0) {
181 		kprintf("name:    '%s'\n", sem->u.used.name);
182 		kprintf("owner:   %" B_PRId32 "\n", sem->u.used.owner);
183 		kprintf("count:   %" B_PRId32 "\n", sem->u.used.count);
184 		kprintf("queue:  ");
185 		if (!sem->queue.IsEmpty()) {
186 			ThreadQueue::Iterator it = sem->queue.GetIterator();
187 			while (queued_thread* entry = it.Next())
188 				kprintf(" %" B_PRId32, entry->thread->id);
189 			kprintf("\n");
190 		} else
191 			kprintf(" -\n");
192 
193 		set_debug_variable("_sem", (addr_t)sem);
194 		set_debug_variable("_semID", sem->id);
195 		set_debug_variable("_owner", sem->u.used.owner);
196 
197 #if DEBUG_SEM_LAST_ACQUIRER
198 		kprintf("last acquired by: %" B_PRId32 ", count: %" B_PRId32 "\n",
199 			sem->u.used.last_acquirer, sem->u.used.last_acquire_count);
200 		kprintf("last released by: %" B_PRId32 ", count: %" B_PRId32 "\n",
201 			sem->u.used.last_releaser, sem->u.used.last_release_count);
202 
203 		if (sem->u.used.last_releaser != 0)
204 			set_debug_variable("_releaser", sem->u.used.last_releaser);
205 		else
206 			unset_debug_variable("_releaser");
207 #else
208 		kprintf("last acquired by: %" B_PRId32 "\n", sem->u.used.last_acquirer);
209 #endif
210 
211 		if (sem->u.used.last_acquirer != 0)
212 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
213 		else
214 			unset_debug_variable("_acquirer");
215 	} else {
216 		kprintf("next:    %p\n", sem->u.unused.next);
217 		kprintf("next_id: %" B_PRId32 "\n", sem->u.unused.next_id);
218 	}
219 }
220 
221 
222 static int
223 dump_sem_info(int argc, char **argv)
224 {
225 	bool found = false;
226 	addr_t num;
227 	int32 i;
228 
229 	if (argc < 2) {
230 		print_debugger_command_usage(argv[0]);
231 		return 0;
232 	}
233 
234 	char* endptr;
235 	num = strtoul(argv[1], &endptr, 0);
236 
237 	if (endptr != argv[1]) {
238 		if (IS_KERNEL_ADDRESS(num)) {
239 			dump_sem((struct sem_entry *)num);
240 			return 0;
241 		} else {
242 			uint32 slot = num % sMaxSems;
243 			if (sSems[slot].id != (int)num) {
244 				kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
245 				return 0;
246 			}
247 
248 			dump_sem(&sSems[slot]);
249 			return 0;
250 		}
251 	}
252 
253 	// walk through the sem list, trying to match name
254 	for (i = 0; i < sMaxSems; i++) {
255 		if (sSems[i].u.used.name != NULL
256 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
257 			dump_sem(&sSems[i]);
258 			found = true;
259 		}
260 	}
261 
262 	if (!found)
263 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
264 	return 0;
265 }
266 
267 
268 /*!	\brief Appends a semaphore slot to the free list.
269 
270 	The semaphore list must be locked.
271 	The slot's id field is not changed. It should already be set to -1.
272 
273 	\param slot The index of the semaphore slot.
274 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
275 		   is used.
276 */
277 static void
278 free_sem_slot(int slot, sem_id nextID)
279 {
280 	struct sem_entry *sem = sSems + slot;
281 	// set next_id to the next possible value; for sanity check the current ID
282 	if (nextID < 0)
283 		sem->u.unused.next_id = slot;
284 	else
285 		sem->u.unused.next_id = nextID;
286 	// append the entry to the list
287 	if (sFreeSemsTail)
288 		sFreeSemsTail->u.unused.next = sem;
289 	else
290 		sFreeSemsHead = sem;
291 	sFreeSemsTail = sem;
292 	sem->u.unused.next = NULL;
293 }
294 
295 
296 static inline void
297 notify_sem_select_events(struct sem_entry* sem, uint16 events)
298 {
299 	if (sem->u.used.select_infos)
300 		notify_select_events_list(sem->u.used.select_infos, events);
301 }
302 
303 
304 /*!	Fills the sem_info structure with information from the given semaphore.
305 	The semaphore's lock must be held when called.
306 */
307 static void
308 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
309 {
310 	info->sem = sem->id;
311 	info->team = sem->u.used.owner;
312 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
313 	info->count = sem->u.used.count;
314 	info->latest_holder = sem->u.used.last_acquirer;
315 }
316 
317 
318 /*!	You must call this function with interrupts disabled, and the semaphore's
319 	spinlock held. Note that it will unlock the spinlock itself.
320 	Since it cannot free() the semaphore's name with interrupts turned off, it
321 	will return that one in \a name.
322 */
323 static void
324 uninit_sem_locked(struct sem_entry& sem, char** _name, SpinLocker& locker)
325 {
326 	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);
327 
328 	notify_sem_select_events(&sem, B_EVENT_INVALID);
329 	sem.u.used.select_infos = NULL;
330 
331 	// free any threads waiting for this semaphore
332 	while (queued_thread* entry = sem.queue.RemoveHead()) {
333 		entry->queued = false;
334 		thread_unblock(entry->thread, B_BAD_SEM_ID);
335 	}
336 
337 	int32 id = sem.id;
338 	sem.id = -1;
339 	*_name = sem.u.used.name;
340 	sem.u.used.name = NULL;
341 
342 	locker.Unlock();
343 
344 	// append slot to the free list
345 	SpinLocker _(&sSemsSpinlock);
346 	free_sem_slot(id % sMaxSems, id + sMaxSems);
347 	atomic_add(&sUsedSems, -1);
348 }
349 
350 
351 static status_t
352 delete_sem_internal(sem_id id, bool checkPermission)
353 {
354 	if (sSemsActive == false)
355 		return B_NO_MORE_SEMS;
356 	if (id < 0)
357 		return B_BAD_SEM_ID;
358 
359 	int32 slot = id % sMaxSems;
360 
361 	InterruptsLocker interruptsLocker;
362 	SpinLocker listLocker(sSemsSpinlock);
363 	SpinLocker semLocker(sSems[slot].lock);
364 
365 	if (sSems[slot].id != id) {
366 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
367 		return B_BAD_SEM_ID;
368 	}
369 
370 	if (checkPermission
371 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
372 		dprintf("thread %" B_PRId32 " tried to delete kernel semaphore "
373 			"%" B_PRId32 ".\n", thread_get_current_thread_id(), id);
374 		return B_NOT_ALLOWED;
375 	}
376 
377 	if (sSems[slot].u.used.owner >= 0) {
378 		list_remove_link(&sSems[slot].u.used.team_link);
379 		sSems[slot].u.used.owner = -1;
380 	} else
381 		panic("sem %" B_PRId32 " has no owner", id);
382 
383 	listLocker.Unlock();
384 
385 	char* name;
386 	uninit_sem_locked(sSems[slot], &name, semLocker);
387 
388 	SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock);
389 	scheduler_reschedule_if_necessary_locked();
390 	schedulerLocker.Unlock();
391 
392 	interruptsLocker.Unlock();
393 
394 	free(name);
395 	return B_OK;
396 }
397 
398 
399 //	#pragma mark - Private Kernel API
400 
401 
402 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
403 status_t
404 haiku_sem_init(kernel_args *args)
405 {
406 	area_id area;
407 	int32 i;
408 
409 	TRACE(("sem_init: entry\n"));
410 
411 	// compute maximal number of semaphores depending on the available memory
412 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
413 	// 256 MB -> 32768, 896 kB
414 	// 512 MB and more-> 65536, 1.75 MB
415 	i = vm_page_num_pages() / 2;
416 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
417 		sMaxSems <<= 1;
418 
419 	// create and initialize semaphore table
420 	virtual_address_restrictions virtualRestrictions = {};
421 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
422 	physical_address_restrictions physicalRestrictions = {};
423 	area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
424 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
425 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
426 		&virtualRestrictions, &physicalRestrictions, (void**)&sSems);
427 	if (area < 0)
428 		panic("unable to allocate semaphore table!\n");
429 
430 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
431 	for (i = 0; i < sMaxSems; i++) {
432 		sSems[i].id = -1;
433 		free_sem_slot(i, i);
434 	}
435 
436 	// add debugger commands
437 	add_debugger_command_etc("sems", &dump_sem_list,
438 		"Dump a list of all active semaphores (for team, with name, etc.)",
439 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
440 			" | (\"last\" <last acquirer>)\n"
441 		"Prints a list of all active semaphores meeting the given\n"
442 		"requirement. If no argument is given, all sems are listed.\n"
443 		"  <team>             - The team owning the semaphores.\n"
444 		"  <name>             - Part of the name of the semaphores.\n"
445 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
446 		, 0);
447 	add_debugger_command_etc("sem", &dump_sem_info,
448 		"Dump info about a particular semaphore",
449 		"<sem>\n"
450 		"Prints info about the specified semaphore.\n"
451 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
452 		"           of the semaphore to print info for.\n", 0);
453 
454 	TRACE(("sem_init: exit\n"));
455 
456 	sSemsActive = true;
457 
458 	return 0;
459 }
460 
461 
462 /*!	Creates a semaphore with the given parameters.
463 
464 	This function is only available from within the kernel, and
465 	should not be made public - if possible, we should remove it
466 	completely (and have only create_sem() exported).
467 */
468 sem_id
469 create_sem_etc(int32 count, const char* name, team_id owner)
470 {
471 	struct sem_entry* sem = NULL;
472 	sem_id id = B_NO_MORE_SEMS;
473 	char* tempName;
474 	size_t nameLength;
475 
476 	if (sSemsActive == false || sUsedSems == sMaxSems)
477 		return B_NO_MORE_SEMS;
478 
479 	if (name == NULL)
480 		name = "unnamed semaphore";
481 
482 	// get the owning team
483 	Team* team = Team::Get(owner);
484 	if (team == NULL)
485 		return B_BAD_TEAM_ID;
486 	BReference<Team> teamReference(team, true);
487 
488 	// clone the name
489 	nameLength = strlen(name) + 1;
490 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
491 	tempName = (char*)malloc(nameLength);
492 	if (tempName == NULL)
493 		return B_NO_MEMORY;
494 
495 	strlcpy(tempName, name, nameLength);
496 
497 	InterruptsSpinLocker _(&sSemsSpinlock);
498 
499 	// get the first slot from the free list
500 	sem = sFreeSemsHead;
501 	if (sem) {
502 		// remove it from the free list
503 		sFreeSemsHead = sem->u.unused.next;
504 		if (!sFreeSemsHead)
505 			sFreeSemsTail = NULL;
506 
507 		// init the slot
508 		SpinLocker semLocker(sem->lock);
509 		sem->id = sem->u.unused.next_id;
510 		sem->u.used.count = count;
511 		sem->u.used.net_count = count;
512 		new(&sem->queue) ThreadQueue;
513 		sem->u.used.name = tempName;
514 		sem->u.used.owner = team->id;
515 		sem->u.used.select_infos = NULL;
516 		id = sem->id;
517 
518 		list_add_item(&team->sem_list, &sem->u.used.team_link);
519 
520 		semLocker.Unlock();
521 
522 		atomic_add(&sUsedSems, 1);
523 
524 		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
525 			count, name, owner, id);
526 
527 		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
528 		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
529 			name);
530 	}
531 
532 	if (sem == NULL)
533 		free(tempName);
534 
535 	return id;
536 }
537 
538 
539 status_t
540 select_sem(int32 id, struct select_info* info, bool kernel)
541 {
542 	if (id < 0)
543 		return B_BAD_SEM_ID;
544 
545 	int32 slot = id % sMaxSems;
546 
547 	InterruptsSpinLocker _(&sSems[slot].lock);
548 
549 	if (sSems[slot].id != id) {
550 		// bad sem ID
551 		return B_BAD_SEM_ID;
552 	} else if (!kernel
553 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
554 		// kernel semaphore, but call from userland
555 		return B_NOT_ALLOWED;
556 	} else {
557 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
558 
559 		if (info->selected_events != 0) {
560 			info->next = sSems[slot].u.used.select_infos;
561 			sSems[slot].u.used.select_infos = info;
562 
563 			if (sSems[slot].u.used.count > 0)
564 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
565 		}
566 	}
567 
568 	return B_OK;
569 }
570 
571 
572 status_t
573 deselect_sem(int32 id, struct select_info* info, bool kernel)
574 {
575 	if (id < 0)
576 		return B_BAD_SEM_ID;
577 
578 	if (info->selected_events == 0)
579 		return B_OK;
580 
581 	int32 slot = id % sMaxSems;
582 
583 	InterruptsSpinLocker _(&sSems[slot].lock);
584 
585 	if (sSems[slot].id == id) {
586 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
587 		while (*infoLocation != NULL && *infoLocation != info)
588 			infoLocation = &(*infoLocation)->next;
589 
590 		if (*infoLocation == info)
591 			*infoLocation = info->next;
592 	}
593 
594 	return B_OK;
595 }
596 
597 
598 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
599 	other threads in the process.
600 	Must be called with semaphore lock held. The thread lock must not be held.
601 */
602 static void
603 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
604 {
605 	if (!entry->queued)
606 		return;
607 
608 	sem->queue.Remove(entry);
609 	entry->queued = false;
610 	sem->u.used.count += entry->count;
611 
612 	// We're done with this entry. We only have to check, if other threads
613 	// need unblocking, too.
614 
615 	// Now see if more threads need to be woken up. We get the scheduler lock
616 	// for that time, so the blocking state of threads won't change (due to
617 	// interruption or timeout). We need that lock anyway when unblocking a
618 	// thread.
619 	while ((entry = sem->queue.Head()) != NULL) {
620 		SpinLocker schedulerLocker(entry->thread->scheduler_lock);
621 		if (thread_is_blocked(entry->thread)) {
622 			// The thread is still waiting. If its count is satisfied, unblock
623 			// it. Otherwise we can't unblock any other thread.
624 			if (entry->count > sem->u.used.net_count)
625 				break;
626 
627 			thread_unblock_locked(entry->thread, B_OK);
628 			sem->u.used.net_count -= entry->count;
629 		} else {
630 			// The thread is no longer waiting, but still queued, which means
631 			// acquiration failed and we can just remove it.
632 			sem->u.used.count += entry->count;
633 		}
634 
635 		sem->queue.Remove(entry);
636 		entry->queued = false;
637 	}
638 
639 	// select notification, if the semaphore is now acquirable
640 	if (sem->u.used.count > 0)
641 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
642 }
643 
644 
645 /*!	This function deletes all semaphores belonging to a particular team.
646 */
647 void
648 sem_delete_owned_sems(Team* team)
649 {
650 	while (true) {
651 		char* name;
652 
653 		{
654 			// get the next semaphore from the team's sem list
655 			InterruptsLocker _;
656 			SpinLocker semListLocker(sSemsSpinlock);
657 			sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list);
658 			if (sem == NULL)
659 				break;
660 
661 			// delete the semaphore
662 			SpinLocker semLocker(sem->lock);
663 			semListLocker.Unlock();
664 			uninit_sem_locked(*sem, &name, semLocker);
665 		}
666 
667 		free(name);
668 	}
669 
670 	scheduler_reschedule_if_necessary();
671 }
672 
673 
674 int32
675 sem_max_sems(void)
676 {
677 	return sMaxSems;
678 }
679 
680 
681 int32
682 sem_used_sems(void)
683 {
684 	return sUsedSems;
685 }
686 
687 
688 //	#pragma mark - Public Kernel API
689 
690 
691 sem_id
692 create_sem(int32 count, const char* name)
693 {
694 	return create_sem_etc(count, name, team_get_kernel_team_id());
695 }
696 
697 
698 status_t
699 delete_sem(sem_id id)
700 {
701 	return delete_sem_internal(id, false);
702 }
703 
704 
705 status_t
706 acquire_sem(sem_id id)
707 {
708 	return switch_sem_etc(-1, id, 1, 0, 0);
709 }
710 
711 
712 status_t
713 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
714 {
715 	return switch_sem_etc(-1, id, count, flags, timeout);
716 }
717 
718 
719 status_t
720 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
721 {
722 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
723 }
724 
725 
726 status_t
727 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
728 	uint32 flags, bigtime_t timeout)
729 {
730 	int slot = id % sMaxSems;
731 
732 	if (gKernelStartup)
733 		return B_OK;
734 	if (sSemsActive == false)
735 		return B_NO_MORE_SEMS;
736 #if KDEBUG
737 	if (!are_interrupts_enabled()) {
738 		panic("switch_sem_etc: called with interrupts disabled for sem "
739 			"%" B_PRId32 "\n", id);
740 	}
741 #endif
742 
743 	if (id < 0)
744 		return B_BAD_SEM_ID;
745 	if (count <= 0
746 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
747 		return B_BAD_VALUE;
748 	}
749 
750 	InterruptsLocker _;
751 	SpinLocker semLocker(sSems[slot].lock);
752 
753 	if (sSems[slot].id != id) {
754 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
755 		return B_BAD_SEM_ID;
756 	}
757 
758 	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
759 	//	doesn't have any use outside the kernel
760 	if ((flags & B_CHECK_PERMISSION) != 0
761 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
762 		dprintf("thread %" B_PRId32 " tried to acquire kernel semaphore "
763 			"%" B_PRId32 ".\n", thread_get_current_thread_id(), id);
764 #if 0
765 		_user_debugger("Thread tried to acquire kernel semaphore.");
766 #endif
767 		return B_NOT_ALLOWED;
768 	}
769 
770 	if (sSems[slot].u.used.count - count < 0) {
771 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
772 			// immediate timeout
773 			return B_WOULD_BLOCK;
774 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
775 			// absolute negative timeout
776 			return B_TIMED_OUT;
777 		}
778 	}
779 
780 	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
781 		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
782 		timeout);
783 
784 	if ((sSems[slot].u.used.count -= count) < 0) {
785 		// we need to block
786 		Thread *thread = thread_get_current_thread();
787 
788 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
789 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
790 
791 		// do a quick check to see if the thread has any pending signals
792 		// this should catch most of the cases where the thread had a signal
793 		SpinLocker schedulerLocker(thread->scheduler_lock);
794 		if (thread_is_interrupted(thread, flags)) {
795 			schedulerLocker.Unlock();
796 			sSems[slot].u.used.count += count;
797 			if (semToBeReleased >= B_OK) {
798 				// we need to still release the semaphore to always
799 				// leave in a consistent state
800 				release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
801 			}
802 			return B_INTERRUPTED;
803 		}
804 
805 		schedulerLocker.Unlock();
806 
807 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
808 			timeout = B_INFINITE_TIMEOUT;
809 
810 		// enqueue in the semaphore queue and get ready to wait
811 		queued_thread queueEntry(thread, count);
812 		sSems[slot].queue.Add(&queueEntry);
813 		queueEntry.queued = true;
814 
815 		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
816 			(void*)(addr_t)id);
817 
818 		semLocker.Unlock();
819 
820 		// release the other semaphore, if any
821 		if (semToBeReleased >= 0) {
822 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
823 			semToBeReleased = -1;
824 		}
825 
826 		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
827 			? thread_block() : thread_block_with_timeout(flags, timeout);
828 
829 		semLocker.Lock();
830 
831 		// If we're still queued, this means the acquiration failed, and we
832 		// need to remove our entry and (potentially) wake up other threads.
833 		if (queueEntry.queued)
834 			remove_thread_from_sem(&queueEntry, &sSems[slot]);
835 
836 		if (acquireStatus >= B_OK) {
837 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
838 #if DEBUG_SEM_LAST_ACQUIRER
839 			sSems[slot].u.used.last_acquire_count = count;
840 #endif
841 		}
842 
843 		semLocker.Unlock();
844 
845 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
846 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
847 			thread->name));
848 		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
849 		return acquireStatus;
850 	} else {
851 		sSems[slot].u.used.net_count -= count;
852 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
853 #if DEBUG_SEM_LAST_ACQUIRER
854 		sSems[slot].u.used.last_acquire_count = count;
855 #endif
856 	}
857 
858 	KTRACE("switch_sem_etc() done: 0x%lx", status);
859 
860 	return B_OK;
861 }
862 
863 
864 status_t
865 release_sem(sem_id id)
866 {
867 	return release_sem_etc(id, 1, 0);
868 }
869 
870 
871 status_t
872 release_sem_etc(sem_id id, int32 count, uint32 flags)
873 {
874 	int32 slot = id % sMaxSems;
875 
876 	if (gKernelStartup)
877 		return B_OK;
878 	if (sSemsActive == false)
879 		return B_NO_MORE_SEMS;
880 	if (id < 0)
881 		return B_BAD_SEM_ID;
882 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
883 		return B_BAD_VALUE;
884 #if KDEBUG
885 	if ((flags & B_DO_NOT_RESCHEDULE) == 0 && !are_interrupts_enabled()) {
886 		panic("release_sem_etc(): called with interrupts disabled and "
887 			"rescheduling allowed for sem_id %" B_PRId32, id);
888 	}
889 #endif
890 
891 	InterruptsLocker _;
892 	SpinLocker semLocker(sSems[slot].lock);
893 
894 	if (sSems[slot].id != id) {
895 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
896 		return B_BAD_SEM_ID;
897 	}
898 
899 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
900 	//	doesn't have any use outside the kernel
901 	if ((flags & B_CHECK_PERMISSION) != 0
902 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
903 		dprintf("thread %" B_PRId32 " tried to release kernel semaphore.\n",
904 			thread_get_current_thread_id());
905 		return B_NOT_ALLOWED;
906 	}
907 
908 	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
909 		flags);
910 
911 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
912 #if DEBUG_SEM_LAST_ACQUIRER
913 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
914 	sSems[slot].u.used.last_release_count = count;
915 #endif
916 
917 	if (flags & B_RELEASE_ALL) {
918 		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
919 
920 		// is there anything to do for us at all?
921 		if (count == 0)
922 			return B_OK;
923 
924 		// Don't release more than necessary -- there might be interrupted/
925 		// timed out threads in the queue.
926 		flags |= B_RELEASE_IF_WAITING_ONLY;
927 	}
928 
929 	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
930 	// possible interruptions or timeouts, it wouldn't be otherwise).
931 	while (count > 0) {
932 		queued_thread* entry = sSems[slot].queue.Head();
933 		if (entry == NULL) {
934 			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
935 				sSems[slot].u.used.count += count;
936 				sSems[slot].u.used.net_count += count;
937 			}
938 			break;
939 		}
940 
941 		SpinLocker schedulerLock(entry->thread->scheduler_lock);
942 		if (thread_is_blocked(entry->thread)) {
943 			// The thread is still waiting. If its count is satisfied,
944 			// unblock it. Otherwise we can't unblock any other thread.
945 			if (entry->count > sSems[slot].u.used.net_count + count) {
946 				sSems[slot].u.used.count += count;
947 				sSems[slot].u.used.net_count += count;
948 				break;
949 			}
950 
951 			thread_unblock_locked(entry->thread, B_OK);
952 
953 			int delta = min_c(count, entry->count);
954 			sSems[slot].u.used.count += delta;
955 			sSems[slot].u.used.net_count += delta - entry->count;
956 			count -= delta;
957 		} else {
958 			// The thread is no longer waiting, but still queued, which
959 			// means acquiration failed and we can just remove it.
960 			sSems[slot].u.used.count += entry->count;
961 		}
962 
963 		sSems[slot].queue.Remove(entry);
964 		entry->queued = false;
965 	}
966 
967 	if (sSems[slot].u.used.count > 0)
968 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
969 
970 	// If we've unblocked another thread reschedule, if we've not explicitly
971 	// been told not to.
972 	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
973 		semLocker.Unlock();
974 
975 		SpinLocker _(thread_get_current_thread()->scheduler_lock);
976 		scheduler_reschedule_if_necessary_locked();
977 	}
978 
979 	return B_OK;
980 }
981 
982 
983 status_t
984 get_sem_count(sem_id id, int32 *_count)
985 {
986 	if (sSemsActive == false)
987 		return B_NO_MORE_SEMS;
988 	if (id < 0)
989 		return B_BAD_SEM_ID;
990 	if (_count == NULL)
991 		return B_BAD_VALUE;
992 
993 	int slot = id % sMaxSems;
994 
995 	InterruptsSpinLocker _(sSems[slot].lock);
996 
997 	if (sSems[slot].id != id) {
998 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
999 		return B_BAD_SEM_ID;
1000 	}
1001 
1002 	*_count = sSems[slot].u.used.count;
1003 
1004 	return B_OK;
1005 }
1006 
1007 
1008 /*!	Called by the get_sem_info() macro. */
1009 status_t
1010 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1011 {
1012 	if (!sSemsActive)
1013 		return B_NO_MORE_SEMS;
1014 	if (id < 0)
1015 		return B_BAD_SEM_ID;
1016 	if (info == NULL || size != sizeof(sem_info))
1017 		return B_BAD_VALUE;
1018 
1019 	int slot = id % sMaxSems;
1020 
1021 	InterruptsSpinLocker _(sSems[slot].lock);
1022 
1023 	if (sSems[slot].id != id) {
1024 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1025 		return B_BAD_SEM_ID;
1026 	} else
1027 		fill_sem_info(&sSems[slot], info, size);
1028 
1029 	return B_OK;
1030 }
1031 
1032 
1033 /*!	Called by the get_next_sem_info() macro. */
1034 status_t
1035 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
1036 	size_t size)
1037 {
1038 	if (!sSemsActive)
1039 		return B_NO_MORE_SEMS;
1040 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1041 		return B_BAD_VALUE;
1042 	if (teamID < 0)
1043 		return B_BAD_TEAM_ID;
1044 
1045 	Team* team = Team::Get(teamID);
1046 	if (team == NULL)
1047 		return B_BAD_TEAM_ID;
1048 	BReference<Team> teamReference(team, true);
1049 
1050 	InterruptsSpinLocker semListLocker(sSemsSpinlock);
1051 
1052 	// TODO: find a way to iterate the list that is more reliable
1053 	sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
1054 	int32 newIndex = *_cookie;
1055 	int32 index = 0;
1056 	bool found = false;
1057 
1058 	while (!found) {
1059 		// find the next entry to be returned
1060 		while (sem != NULL && index < newIndex) {
1061 			sem = (sem_entry*)list_get_next_item(&team->sem_list, sem);
1062 			index++;
1063 		}
1064 
1065 		if (sem == NULL)
1066 			return B_BAD_VALUE;
1067 
1068 		SpinLocker _(sem->lock);
1069 
1070 		if (sem->id != -1 && sem->u.used.owner == team->id) {
1071 			// found one!
1072 			fill_sem_info(sem, info, size);
1073 			newIndex = index + 1;
1074 			found = true;
1075 		} else
1076 			newIndex++;
1077 	}
1078 
1079 	if (!found)
1080 		return B_BAD_VALUE;
1081 
1082 	*_cookie = newIndex;
1083 	return B_OK;
1084 }
1085 
1086 
1087 status_t
1088 set_sem_owner(sem_id id, team_id newTeamID)
1089 {
1090 	if (sSemsActive == false)
1091 		return B_NO_MORE_SEMS;
1092 	if (id < 0)
1093 		return B_BAD_SEM_ID;
1094 	if (newTeamID < 0)
1095 		return B_BAD_TEAM_ID;
1096 
1097 	int32 slot = id % sMaxSems;
1098 
1099 	// get the new team
1100 	Team* newTeam = Team::Get(newTeamID);
1101 	if (newTeam == NULL)
1102 		return B_BAD_TEAM_ID;
1103 	BReference<Team> newTeamReference(newTeam, true);
1104 
1105 	InterruptsSpinLocker semListLocker(sSemsSpinlock);
1106 	SpinLocker semLocker(sSems[slot].lock);
1107 
1108 	if (sSems[slot].id != id) {
1109 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1110 		return B_BAD_SEM_ID;
1111 	}
1112 
1113 	list_remove_link(&sSems[slot].u.used.team_link);
1114 	list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
1115 
1116 	sSems[slot].u.used.owner = newTeam->id;
1117 	return B_OK;
1118 }
1119 
1120 
1121 /*!	Returns the name of the semaphore. The name is not copied, so the caller
1122 	must make sure that the semaphore remains alive as long as the name is used.
1123 */
1124 const char*
1125 sem_get_name_unsafe(sem_id id)
1126 {
1127 	int slot = id % sMaxSems;
1128 
1129 	if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1130 		return NULL;
1131 
1132 	return sSems[slot].u.used.name;
1133 }
1134 
1135 
1136 //	#pragma mark - Syscalls
1137 
1138 
1139 sem_id
1140 _user_create_sem(int32 count, const char *userName)
1141 {
1142 	char name[B_OS_NAME_LENGTH];
1143 
1144 	if (userName == NULL)
1145 		return create_sem_etc(count, NULL, team_get_current_team_id());
1146 
1147 	if (!IS_USER_ADDRESS(userName)
1148 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1149 		return B_BAD_ADDRESS;
1150 
1151 	return create_sem_etc(count, name, team_get_current_team_id());
1152 }
1153 
1154 
1155 status_t
1156 _user_delete_sem(sem_id id)
1157 {
1158 	return delete_sem_internal(id, true);
1159 }
1160 
1161 
1162 status_t
1163 _user_acquire_sem(sem_id id)
1164 {
1165 	status_t error = switch_sem_etc(-1, id, 1,
1166 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1167 
1168 	return syscall_restart_handle_post(error);
1169 }
1170 
1171 
1172 status_t
1173 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1174 {
1175 	syscall_restart_handle_timeout_pre(flags, timeout);
1176 
1177 	status_t error = switch_sem_etc(-1, id, count,
1178 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1179 
1180 	return syscall_restart_handle_timeout_post(error, timeout);
1181 }
1182 
1183 
1184 status_t
1185 _user_switch_sem(sem_id releaseSem, sem_id id)
1186 {
1187 	status_t error = switch_sem_etc(releaseSem, id, 1,
1188 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1189 
1190 	if (releaseSem < 0)
1191 		return syscall_restart_handle_post(error);
1192 
1193 	return error;
1194 }
1195 
1196 
1197 status_t
1198 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1199 	bigtime_t timeout)
1200 {
1201 	if (releaseSem < 0)
1202 		syscall_restart_handle_timeout_pre(flags, timeout);
1203 
1204 	status_t error = switch_sem_etc(releaseSem, id, count,
1205 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1206 
1207 	if (releaseSem < 0)
1208 		return syscall_restart_handle_timeout_post(error, timeout);
1209 
1210 	return error;
1211 }
1212 
1213 
1214 status_t
1215 _user_release_sem(sem_id id)
1216 {
1217 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1218 }
1219 
1220 
1221 status_t
1222 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1223 {
1224 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1225 }
1226 
1227 
1228 status_t
1229 _user_get_sem_count(sem_id id, int32 *userCount)
1230 {
1231 	status_t status;
1232 	int32 count;
1233 
1234 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1235 		return B_BAD_ADDRESS;
1236 
1237 	status = get_sem_count(id, &count);
1238 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1239 		return B_BAD_ADDRESS;
1240 
1241 	return status;
1242 }
1243 
1244 
1245 status_t
1246 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1247 {
1248 	struct sem_info info;
1249 	status_t status;
1250 
1251 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1252 		return B_BAD_ADDRESS;
1253 
1254 	status = _get_sem_info(id, &info, size);
1255 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1256 		return B_BAD_ADDRESS;
1257 
1258 	return status;
1259 }
1260 
1261 
1262 status_t
1263 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1264 	size_t size)
1265 {
1266 	struct sem_info info;
1267 	int32 cookie;
1268 	status_t status;
1269 
1270 	if (userCookie == NULL || userInfo == NULL
1271 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1272 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1273 		return B_BAD_ADDRESS;
1274 
1275 	status = _get_next_sem_info(team, &cookie, &info, size);
1276 
1277 	if (status == B_OK) {
1278 		if (user_memcpy(userInfo, &info, size) < B_OK
1279 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1280 			return B_BAD_ADDRESS;
1281 	}
1282 
1283 	return status;
1284 }
1285 
1286 
1287 status_t
1288 _user_set_sem_owner(sem_id id, team_id team)
1289 {
1290 	return set_sem_owner(id, team);
1291 }
1292