xref: /haiku/src/system/kernel/sem.cpp (revision 89755088d790ff4fe36f8aa77dacb2bd15507108)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Semaphore code */
10 
11 
12 #include <OS.h>
13 
14 #include <sem.h>
15 #include <kernel.h>
16 #include <kscheduler.h>
17 #include <ksignal.h>
18 #include <smp.h>
19 #include <int.h>
20 #include <arch/int.h>
21 #include <debug.h>
22 #include <thread.h>
23 #include <team.h>
24 #include <vfs.h>
25 #include <vm_low_memory.h>
26 #include <vm_page.h>
27 #include <boot/kernel_args.h>
28 #include <syscall_restart.h>
29 #include <wait_for_objects.h>
30 
31 #include <string.h>
32 #include <stdlib.h>
33 
34 
35 //#define TRACE_SEM
36 #ifdef TRACE_SEM
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 #define DEBUG_LAST_ACQUIRER
43 
44 struct sem_entry {
45 	sem_id		id;
46 	spinlock	lock;	// protects only the id field when unused
47 	union {
48 		// when slot in use
49 		struct {
50 			int32				count;
51 			struct thread_queue queue;
52 			char				*name;
53 			team_id				owner;	// if set to -1, means owned by a port
54 			select_info			*select_infos;
55 #ifdef DEBUG_LAST_ACQUIRER
56 			thread_id			last_acquirer;
57 			int32				last_acquire_count;
58 			thread_id			last_releaser;
59 			int32				last_release_count;
60 #endif
61 		} used;
62 
63 		// when slot unused
64 		struct {
65 			sem_id				next_id;
66 			struct sem_entry	*next;
67 		} unused;
68 	} u;
69 };
70 
71 static const int32 kMaxSemaphores = 131072;
72 static int32 sMaxSems = 4096;
73 	// Final value is computed based on the amount of available memory
74 static int32 sUsedSems = 0;
75 
76 static struct sem_entry *sSems = NULL;
77 static bool sSemsActive = false;
78 static struct sem_entry	*sFreeSemsHead = NULL;
79 static struct sem_entry	*sFreeSemsTail = NULL;
80 
81 static spinlock sem_spinlock = 0;
82 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sem_spinlock)
83 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sem_spinlock)
84 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
85 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
86 
87 static int remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
88 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock);
89 
90 struct sem_timeout_args {
91 	thread_id	blocked_thread;
92 	sem_id		blocked_sem_id;
93 	int32		sem_count;
94 };
95 
96 
97 static int
98 dump_sem_list(int argc, char **argv)
99 {
100 	const char *name = NULL;
101 	team_id owner = -1;
102 #ifdef DEBUG_LAST_ACQUIRER
103 	thread_id last = -1;
104 #endif
105 	int32 i;
106 
107 	if (argc > 2) {
108 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
109 			owner = strtoul(argv[2], NULL, 0);
110 		else if (!strcmp(argv[1], "name"))
111 			name = argv[2];
112 #ifdef DEBUG_LAST_ACQUIRER
113 		else if (!strcmp(argv[1], "last"))
114 			last = strtoul(argv[2], NULL, 0);
115 #endif
116 	} else if (argc > 1)
117 		owner = strtoul(argv[1], NULL, 0);
118 
119 	kprintf("sem            id count   team"
120 #ifdef DEBUG_LAST_ACQUIRER
121 		"   last"
122 #endif
123 		"  name\n");
124 
125 	for (i = 0; i < sMaxSems; i++) {
126 		struct sem_entry *sem = &sSems[i];
127 		if (sem->id < 0
128 #ifdef DEBUG_LAST_ACQUIRER
129 			|| (last != -1 && sem->u.used.last_acquirer != last)
130 #endif
131 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
132 			|| (owner != -1 && sem->u.used.owner != owner))
133 			continue;
134 
135 		kprintf("%p %6ld %5ld %6ld "
136 #ifdef DEBUG_LAST_ACQUIRER
137 			"%6ld "
138 #endif
139 			" %s\n", sem, sem->id, sem->u.used.count,
140 			sem->u.used.owner,
141 #ifdef DEBUG_LAST_ACQUIRER
142 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
143 #endif
144 			sem->u.used.name);
145 	}
146 
147 	return 0;
148 }
149 
150 
151 static void
152 dump_sem(struct sem_entry *sem)
153 {
154 	kprintf("SEM: %p\n", sem);
155 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
156 	if (sem->id >= 0) {
157 		kprintf("name:    '%s'\n", sem->u.used.name);
158 		kprintf("owner:   %ld\n", sem->u.used.owner);
159 		kprintf("count:   %ld\n", sem->u.used.count);
160 		kprintf("queue:  ");
161 		if (sem->u.used.queue.head != NULL) {
162 			struct thread *thread = sem->u.used.queue.head;
163 			while (thread != NULL) {
164 				kprintf(" %ld", thread->id);
165 				thread = thread->queue_next;
166 			}
167 			kprintf("\n");
168 		} else
169 			kprintf(" -\n");
170 
171 		set_debug_variable("_sem", (addr_t)sem);
172 		set_debug_variable("_semID", sem->id);
173 		set_debug_variable("_owner", sem->u.used.owner);
174 
175 #ifdef DEBUG_LAST_ACQUIRER
176 		kprintf("last acquired by: %ld, count: %ld\n", sem->u.used.last_acquirer,
177 			sem->u.used.last_acquire_count);
178 		kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser,
179 			sem->u.used.last_release_count);
180 
181 		if (sem->u.used.last_acquirer != 0)
182 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
183 		else
184 			unset_debug_variable("_acquirer");
185 
186 		if (sem->u.used.last_releaser != 0)
187 			set_debug_variable("_releaser", sem->u.used.last_releaser);
188 		else
189 			unset_debug_variable("_releaser");
190 #endif
191 	} else {
192 		kprintf("next:    %p\n", sem->u.unused.next);
193 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
194 	}
195 }
196 
197 
198 static int
199 dump_sem_info(int argc, char **argv)
200 {
201 	bool found = false;
202 	addr_t num;
203 	int32 i;
204 
205 	if (argc < 2) {
206 		print_debugger_command_usage(argv[0]);
207 		return 0;
208 	}
209 
210 	num = strtoul(argv[1], NULL, 0);
211 
212 	if (IS_KERNEL_ADDRESS(num)) {
213 		dump_sem((struct sem_entry *)num);
214 		return 0;
215 	} else if (num > 0) {
216 		uint32 slot = num % sMaxSems;
217 		if (sSems[slot].id != (int)num) {
218 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
219 			return 0;
220 		}
221 
222 		dump_sem(&sSems[slot]);
223 		return 0;
224 	}
225 
226 	// walk through the sem list, trying to match name
227 	for (i = 0; i < sMaxSems; i++) {
228 		if (sSems[i].u.used.name != NULL
229 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
230 			dump_sem(&sSems[i]);
231 			found = true;
232 		}
233 	}
234 
235 	if (!found)
236 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
237 	return 0;
238 }
239 
240 
241 static inline void
242 clear_thread_queue(struct thread_queue *queue)
243 {
244 	queue->head = queue->tail = NULL;
245 }
246 
247 
248 /*!	\brief Appends a semaphore slot to the free list.
249 
250 	The semaphore list must be locked.
251 	The slot's id field is not changed. It should already be set to -1.
252 
253 	\param slot The index of the semaphore slot.
254 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
255 		   is used.
256 */
257 static void
258 free_sem_slot(int slot, sem_id nextID)
259 {
260 	struct sem_entry *sem = sSems + slot;
261 	// set next_id to the next possible value; for sanity check the current ID
262 	if (nextID < 0)
263 		sem->u.unused.next_id = slot;
264 	else
265 		sem->u.unused.next_id = nextID;
266 	// append the entry to the list
267 	if (sFreeSemsTail)
268 		sFreeSemsTail->u.unused.next = sem;
269 	else
270 		sFreeSemsHead = sem;
271 	sFreeSemsTail = sem;
272 	sem->u.unused.next = NULL;
273 }
274 
275 
276 static inline void
277 notify_sem_select_events(struct sem_entry* sem, uint16 events)
278 {
279 	if (sem->u.used.select_infos)
280 		notify_select_events_list(sem->u.used.select_infos, events);
281 }
282 
283 
284 /*! Called from a timer handler. Wakes up a semaphore */
285 static int32
286 sem_timeout(timer *data)
287 {
288 	struct sem_timeout_args *args = (struct sem_timeout_args *)data->entry.prev;
289 	struct thread *thread;
290 	int slot;
291 	int state;
292 	struct thread_queue wakeupQueue;
293 
294 	thread = thread_get_thread_struct(args->blocked_thread);
295 	if (thread == NULL)
296 		return B_HANDLED_INTERRUPT;
297 	slot = args->blocked_sem_id % sMaxSems;
298 
299 	state = disable_interrupts();
300 	GRAB_SEM_LOCK(sSems[slot]);
301 
302 	TRACE(("sem_timeout: called on %p sem %ld, thread %ld\n",
303 		data, args->blocked_sem_id, args->blocked_thread));
304 
305 	if (sSems[slot].id != args->blocked_sem_id) {
306 		// this thread was not waiting on this semaphore
307 		panic("sem_timeout: thread %ld was trying to wait on sem %ld which "
308 			"doesn't exist!\n", args->blocked_thread, args->blocked_sem_id);
309 
310 		RELEASE_SEM_LOCK(sSems[slot]);
311 		restore_interrupts(state);
312 		return B_HANDLED_INTERRUPT;
313 	}
314 
315 	clear_thread_queue(&wakeupQueue);
316 	remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue, B_TIMED_OUT,
317 		false);
318 
319 	RELEASE_SEM_LOCK(sSems[slot]);
320 
321 	GRAB_THREAD_LOCK();
322 	// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
323 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
324 		scheduler_enqueue_in_run_queue(thread);
325 
326 	RELEASE_THREAD_LOCK();
327 
328 	restore_interrupts(state);
329 
330 	return B_INVOKE_SCHEDULER;
331 }
332 
333 
334 /*!	Fills the thread_info structure with information from the specified
335 	thread.
336 	The thread lock must be held when called.
337 */
338 static void
339 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size)
340 {
341 	info->sem = sem->id;
342 	info->team = sem->u.used.owner;
343 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
344 	info->count = sem->u.used.count;
345 
346 	// ToDo: not sure if this is the latest holder, or the next
347 	// holder...
348 	if (sem->u.used.queue.head != NULL)
349 		info->latest_holder = sem->u.used.queue.head->id;
350 	else
351 		info->latest_holder = -1;
352 }
353 
354 
355 //	#pragma mark - Private Kernel API
356 
357 
358 status_t
359 sem_init(kernel_args *args)
360 {
361 	area_id area;
362 	int32 i;
363 
364 	TRACE(("sem_init: entry\n"));
365 
366 	// compute maximal number of semaphores depending on the available memory
367 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
368 	// 256 MB -> 32768, 896 kB
369 	// 512 MB -> 65536, 1.75 MB
370 	// 1024 MB and more -> 131072, 3.5 MB
371 	i = vm_page_num_pages() / 2;
372 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
373 		sMaxSems <<= 1;
374 
375 	// create and initialize semaphore table
376 	area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS,
377 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
378 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
379 	if (area < 0)
380 		panic("unable to allocate semaphore table!\n");
381 
382 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
383 	for (i = 0; i < sMaxSems; i++) {
384 		sSems[i].id = -1;
385 		free_sem_slot(i, i);
386 	}
387 
388 	// add debugger commands
389 	add_debugger_command_etc("sems", &dump_sem_list,
390 		"Dump a list of all active semaphores (for team, with name, etc.)",
391 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
392 #ifdef DEBUG_LAST_ACQUIRER
393 			" | (\"last\" <last acquirer>)"
394 #endif
395 		"\n"
396 		"Prints a list of all active semaphores meeting the given\n"
397 		"requirement. If no argument is given, all sems are listed.\n"
398 		"  <team>             - The team owning the semaphores.\n"
399 		"  <name>             - Part of the name of the semaphores.\n"
400 #ifdef DEBUG_LAST_ACQUIRER
401 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
402 #endif
403 		, 0);
404 	add_debugger_command_etc("sem", &dump_sem_info,
405 		"Dump info about a particular semaphore",
406 		"<sem>\n"
407 		"Prints info about the specified semaphore.\n"
408 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
409 		"           of the semaphore to print info for.\n", 0);
410 
411 	TRACE(("sem_init: exit\n"));
412 
413 	sSemsActive = true;
414 
415 	return 0;
416 }
417 
418 
419 /*!	Creates a semaphore with the given parameters.
420 	Note, the team_id is not checked, it must be correct, or else
421 	that semaphore might not be deleted.
422 	This function is only available from within the kernel, and
423 	should not be made public - if possible, we should remove it
424 	completely (and have only create_sem() exported).
425 */
426 sem_id
427 create_sem_etc(int32 count, const char *name, team_id owner)
428 {
429 	struct sem_entry *sem = NULL;
430 	cpu_status state;
431 	sem_id id = B_NO_MORE_SEMS;
432 	char *tempName;
433 	size_t nameLength;
434 
435 	if (sSemsActive == false)
436 		return B_NO_MORE_SEMS;
437 
438 #if 0
439 	// TODO: the code below might cause unwanted deadlocks,
440 	// we need an asynchronously running low resource handler.
441 	if (sUsedSems == sMaxSems) {
442 		// The vnode cache may have collected lots of semaphores.
443 		// Freeing some unused vnodes should improve our situation.
444 		// TODO: maybe create a generic "low resources" handler, instead
445 		//	of only the specialised low memory thing?
446 		vfs_free_unused_vnodes(B_LOW_MEMORY_WARNING);
447 	}
448 	if (sUsedSems == sMaxSems) {
449 		// try again with more enthusiasm
450 		vfs_free_unused_vnodes(B_LOW_MEMORY_CRITICAL);
451 	}
452 #endif
453 	if (sUsedSems == sMaxSems)
454 		return B_NO_MORE_SEMS;
455 
456 	if (name == NULL)
457 		name = "unnamed semaphore";
458 
459 	nameLength = strlen(name) + 1;
460 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
461 	tempName = (char *)malloc(nameLength);
462 	if (tempName == NULL)
463 		return B_NO_MEMORY;
464 	strlcpy(tempName, name, nameLength);
465 
466 	state = disable_interrupts();
467 	GRAB_SEM_LIST_LOCK();
468 
469 	// get the first slot from the free list
470 	sem = sFreeSemsHead;
471 	if (sem) {
472 		// remove it from the free list
473 		sFreeSemsHead = sem->u.unused.next;
474 		if (!sFreeSemsHead)
475 			sFreeSemsTail = NULL;
476 
477 		// init the slot
478 		GRAB_SEM_LOCK(*sem);
479 		sem->id = sem->u.unused.next_id;
480 		sem->u.used.count = count;
481 		clear_thread_queue(&sem->u.used.queue);
482 		sem->u.used.name = tempName;
483 		sem->u.used.owner = owner;
484 		sem->u.used.select_infos = NULL;
485 		id = sem->id;
486 		RELEASE_SEM_LOCK(*sem);
487 
488 		atomic_add(&sUsedSems, 1);
489 	}
490 
491 	RELEASE_SEM_LIST_LOCK();
492 	restore_interrupts(state);
493 
494 	if (!sem)
495 		free(tempName);
496 
497 	return id;
498 }
499 
500 
501 status_t
502 select_sem(int32 id, struct select_info* info, bool kernel)
503 {
504 	cpu_status state;
505 	int32 slot;
506 	status_t error = B_OK;
507 
508 	if (id < 0)
509 		return B_BAD_SEM_ID;
510 
511 	slot = id % sMaxSems;
512 
513 	state = disable_interrupts();
514 	GRAB_SEM_LOCK(sSems[slot]);
515 
516 	if (sSems[slot].id != id) {
517 		// bad sem ID
518 		error = B_BAD_SEM_ID;
519 	} else if (!kernel
520 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
521 		// kernel semaphore, but call from userland
522 		error = B_NOT_ALLOWED;
523 	} else {
524 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
525 
526 		if (info->selected_events != 0) {
527 			info->next = sSems[slot].u.used.select_infos;
528 			sSems[slot].u.used.select_infos = info;
529 
530 			if (sSems[slot].u.used.count > 0)
531 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
532 		}
533 	}
534 
535 	RELEASE_SEM_LOCK(sSems[slot]);
536 	restore_interrupts(state);
537 
538 	return error;
539 }
540 
541 
542 status_t
543 deselect_sem(int32 id, struct select_info* info, bool kernel)
544 {
545 	cpu_status state;
546 	int32 slot;
547 
548 	if (id < 0)
549 		return B_BAD_SEM_ID;
550 
551 	if (info->selected_events == 0)
552 		return B_OK;
553 
554 	slot = id % sMaxSems;
555 
556 	state = disable_interrupts();
557 	GRAB_SEM_LOCK(sSems[slot]);
558 
559 	if (sSems[slot].id == id) {
560 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
561 		while (*infoLocation != NULL && *infoLocation != info)
562 			infoLocation = &(*infoLocation)->next;
563 
564 		if (*infoLocation == info)
565 			*infoLocation = info->next;
566 	}
567 
568 	RELEASE_SEM_LOCK(sSems[slot]);
569 	restore_interrupts(state);
570 
571 	return B_OK;
572 }
573 
574 
575 /*! Wake up a thread that's blocked on a semaphore
576 	this function must be entered with interrupts disabled and THREADLOCK held
577 */
578 status_t
579 sem_interrupt_thread(struct thread *thread)
580 {
581 	struct thread_queue wakeupQueue;
582 	int32 slot;
583 
584 	TRACE(("sem_interrupt_thread: called on thread %p (%ld), blocked on sem %ld\n",
585 		thread, thread->id, thread->sem.blocking));
586 
587 	if (thread->state != B_THREAD_WAITING || thread->sem.blocking < 0)
588 		return B_BAD_VALUE;
589 	if ((thread->sem.flags & B_CAN_INTERRUPT) == 0
590 		&& ((thread->sem.flags & B_KILL_CAN_INTERRUPT) == 0
591 			|| (thread->sig_pending & KILL_SIGNALS) == 0)) {
592 		return B_NOT_ALLOWED;
593 	}
594 
595 	slot = thread->sem.blocking % sMaxSems;
596 
597 	GRAB_SEM_LOCK(sSems[slot]);
598 
599 	if (sSems[slot].id != thread->sem.blocking) {
600 		panic("sem_interrupt_thread: thread %ld blocks on sem %ld, but that "
601 			"sem doesn't exist!\n", thread->id, thread->sem.blocking);
602 	}
603 
604 	clear_thread_queue(&wakeupQueue);
605 	status_t result = remove_thread_from_sem(thread, &sSems[slot],
606 		&wakeupQueue, B_INTERRUPTED, true);
607 
608 	RELEASE_SEM_LOCK(sSems[slot]);
609 
610 	if (result != B_OK) {
611 		// The thread is not in the wait queue anymore. Probably it just timed
612 		// out before we locked the sem.
613 		return result;
614 	}
615 
616 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
617 		scheduler_enqueue_in_run_queue(thread);
618 
619 	return B_OK;
620 }
621 
622 
623 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
624 	other threads in the process. All threads that need to be woken up are added
625 	to the passed in thread_queue.
626 	Must be called with semaphore lock held.
627 */
628 static int
629 remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
630 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock)
631 {
632 	// remove the thread from the queue and place it in the supplied queue
633 	if (thread_dequeue_id(&sem->u.used.queue, thread->id) != thread)
634 		return B_ENTRY_NOT_FOUND;
635 
636 	sem->u.used.count += thread->sem.acquire_count;
637 	thread->sem.acquire_status = acquireStatus;
638 	thread_enqueue(thread, queue);
639 
640 	// now see if more threads need to be woken up
641 	while (sem->u.used.count > 0
642 		   && thread_lookat_queue(&sem->u.used.queue) != NULL) {
643 		int32 delta = min_c(thread->sem.count, sem->u.used.count);
644 
645 		thread->sem.count -= delta;
646 		if (thread->sem.count <= 0) {
647 			thread = thread_dequeue(&sem->u.used.queue);
648 			thread_enqueue(thread, queue);
649 		}
650 		sem->u.used.count -= delta;
651 	}
652 
653 	if (sem->u.used.count > 0 && sem->u.used.select_infos != NULL) {
654 		if (hasThreadLock)
655 			RELEASE_THREAD_LOCK();
656 
657 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
658 
659 		if (hasThreadLock)
660 			GRAB_THREAD_LOCK();
661 	}
662 
663 	return B_OK;
664 }
665 
666 
667 /*!	This function cycles through the sem table, deleting all the sems
668 	that are owned by the specified team.
669 */
670 int
671 sem_delete_owned_sems(team_id owner)
672 {
673 	int state;
674 	int i;
675 	int count = 0;
676 
677 	// ToDo: that looks horribly inefficient - maybe it would be better
678 	//	to have them in a list in the team
679 
680 	if (owner < 0)
681 		return B_BAD_TEAM_ID;
682 
683 	state = disable_interrupts();
684 	GRAB_SEM_LIST_LOCK();
685 
686 	for (i = 0; i < sMaxSems; i++) {
687 		if (sSems[i].id != -1 && sSems[i].u.used.owner == owner) {
688 			sem_id id = sSems[i].id;
689 
690 			RELEASE_SEM_LIST_LOCK();
691 			restore_interrupts(state);
692 
693 			delete_sem(id);
694 			count++;
695 
696 			state = disable_interrupts();
697 			GRAB_SEM_LIST_LOCK();
698 		}
699 	}
700 
701 	RELEASE_SEM_LIST_LOCK();
702 	restore_interrupts(state);
703 
704 	return count;
705 }
706 
707 
708 int32
709 sem_max_sems(void)
710 {
711 	return sMaxSems;
712 }
713 
714 
715 int32
716 sem_used_sems(void)
717 {
718 	return sUsedSems;
719 }
720 
721 
722 //	#pragma mark - Public Kernel API
723 
724 
725 sem_id
726 create_sem(int32 count, const char *name)
727 {
728 	return create_sem_etc(count, name, team_get_kernel_team_id());
729 }
730 
731 
732 status_t
733 delete_sem(sem_id id)
734 {
735 	struct thread_queue releaseQueue;
736 	int32 releasedThreads;
737 	struct thread *thread;
738 	cpu_status state;
739 	int32 slot;
740 	char *name;
741 
742 	if (sSemsActive == false)
743 		return B_NO_MORE_SEMS;
744 	if (id < 0)
745 		return B_BAD_SEM_ID;
746 
747 	slot = id % sMaxSems;
748 
749 	state = disable_interrupts();
750 	GRAB_SEM_LOCK(sSems[slot]);
751 
752 	if (sSems[slot].id != id) {
753 		RELEASE_SEM_LOCK(sSems[slot]);
754 		restore_interrupts(state);
755 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
756 		return B_BAD_SEM_ID;
757 	}
758 
759 	notify_sem_select_events(&sSems[slot], B_EVENT_INVALID);
760 	sSems[slot].u.used.select_infos = NULL;
761 
762 	releasedThreads = 0;
763 	clear_thread_queue(&releaseQueue);
764 
765 	// free any threads waiting for this semaphore
766 	while ((thread = thread_dequeue(&sSems[slot].u.used.queue)) != NULL) {
767 		thread->sem.acquire_status = B_BAD_SEM_ID;
768 		thread->sem.count = 0;
769 		thread_enqueue(thread, &releaseQueue);
770 		releasedThreads++;
771 	}
772 
773 	sSems[slot].id = -1;
774 	name = sSems[slot].u.used.name;
775 	sSems[slot].u.used.name = NULL;
776 
777 	RELEASE_SEM_LOCK(sSems[slot]);
778 
779 	// append slot to the free list
780 	GRAB_SEM_LIST_LOCK();
781 	free_sem_slot(slot, id + sMaxSems);
782 	atomic_add(&sUsedSems, -1);
783 	RELEASE_SEM_LIST_LOCK();
784 
785 	if (releasedThreads > 0) {
786 		GRAB_THREAD_LOCK();
787 		while ((thread = thread_dequeue(&releaseQueue)) != NULL)
788 			scheduler_enqueue_in_run_queue(thread);
789 
790 		scheduler_reschedule();
791 		RELEASE_THREAD_LOCK();
792 	}
793 
794 	restore_interrupts(state);
795 
796 	free(name);
797 
798 	return B_OK;
799 }
800 
801 
802 status_t
803 acquire_sem(sem_id id)
804 {
805 	return switch_sem_etc(-1, id, 1, 0, 0);
806 }
807 
808 
809 status_t
810 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
811 {
812 	return switch_sem_etc(-1, id, count, flags, timeout);
813 }
814 
815 
816 status_t
817 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
818 {
819 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
820 }
821 
822 
823 status_t
824 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
825 	uint32 flags, bigtime_t timeout)
826 {
827 	int slot = id % sMaxSems;
828 	int state;
829 	status_t status = B_OK;
830 
831 	if (kernel_startup)
832 		return B_OK;
833 	if (sSemsActive == false)
834 		return B_NO_MORE_SEMS;
835 
836 	if (!are_interrupts_enabled()) {
837 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
838 			id);
839 	}
840 
841 	if (id < 0)
842 		return B_BAD_SEM_ID;
843 	if (count <= 0
844 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
845 		return B_BAD_VALUE;
846 	}
847 
848 	state = disable_interrupts();
849 	GRAB_SEM_LOCK(sSems[slot]);
850 
851 	if (sSems[slot].id != id) {
852 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
853 		status = B_BAD_SEM_ID;
854 		goto err;
855 	}
856 
857 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
858 	//	doesn't have any use outside the kernel
859 	if ((flags & B_CHECK_PERMISSION) != 0
860 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
861 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
862 			thread_get_current_thread_id(), id);
863 		status = B_NOT_ALLOWED;
864 		goto err;
865 	}
866 
867 	if (sSems[slot].u.used.count - count < 0) {
868 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
869 			// immediate timeout
870 			status = B_WOULD_BLOCK;
871 			goto err;
872 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
873 			// absolute negative timeout
874 			status = B_TIMED_OUT;
875 			goto err;
876 		}
877 	}
878 
879 	if ((sSems[slot].u.used.count -= count) < 0) {
880 		// we need to block
881 		struct thread *thread = thread_get_current_thread();
882 		timer timeout_timer; // stick it on the stack, since we may be blocking here
883 		struct sem_timeout_args args;
884 
885 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
886 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
887 
888 		// do a quick check to see if the thread has any pending signals
889 		// this should catch most of the cases where the thread had a signal
890 		if (((flags & B_CAN_INTERRUPT) && (thread->sig_pending & ~thread->sig_block_mask) != 0)
891 			|| ((flags & B_KILL_CAN_INTERRUPT)
892 				&& (thread->sig_pending & KILL_SIGNALS))) {
893 			sSems[slot].u.used.count += count;
894 			status = B_INTERRUPTED;
895 				// the other semaphore will be released later
896 			goto err;
897 		}
898 
899 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
900 			timeout = B_INFINITE_TIMEOUT;
901 
902 		thread->next_state = B_THREAD_WAITING;
903 		thread->sem.flags = flags;
904 		thread->sem.blocking = id;
905 		thread->sem.acquire_count = count;
906 		thread->sem.count = min_c(-sSems[slot].u.used.count, count);
907 			// store the count we need to restore upon release
908 		thread->sem.acquire_status = B_NO_ERROR;
909 		thread_enqueue(thread, &sSems[slot].u.used.queue);
910 
911 		if (timeout != B_INFINITE_TIMEOUT) {
912 			TRACE(("switch_sem_etc: setting timeout sem for %Ld usecs, sem %ld, thread %ld\n",
913 				timeout, id, thread->id));
914 
915 			// set up an event to go off with the thread struct as the data
916 			args.blocked_sem_id = id;
917 			args.blocked_thread = thread->id;
918 			args.sem_count = count;
919 
920 			// ToDo: another evil hack: pass the args into timer->entry.prev
921 			timeout_timer.entry.prev = (qent *)&args;
922 			add_timer(&timeout_timer, &sem_timeout, timeout,
923 				flags & B_RELATIVE_TIMEOUT ?
924 					B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
925 		}
926 
927 		RELEASE_SEM_LOCK(sSems[slot]);
928 
929 		if (semToBeReleased >= B_OK) {
930 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
931 			semToBeReleased = -1;
932 		}
933 
934 		GRAB_THREAD_LOCK();
935 		// check again to see if a signal is pending.
936 		// it may have been delivered while setting up the sem, though it's pretty unlikely
937 		if (((flags & B_CAN_INTERRUPT)
938 				&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
939 			|| ((flags & B_KILL_CAN_INTERRUPT)
940 				&& (thread->sig_pending & KILL_SIGNALS))) {
941 			struct thread_queue wakeupQueue;
942 			// ok, so a tiny race happened where a signal was delivered to this thread while
943 			// it was setting up the sem. We can only be sure a signal wasn't delivered
944 			// here, since the threadlock is held. The previous check would have found most
945 			// instances, but there was a race, so we have to handle it. It'll be more messy...
946 			clear_thread_queue(&wakeupQueue);
947 			GRAB_SEM_LOCK(sSems[slot]);
948 			if (sSems[slot].id == id) {
949 				remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue,
950 					B_INTERRUPTED, true);
951 			}
952 			RELEASE_SEM_LOCK(sSems[slot]);
953 
954 			struct thread *wakeupThread;
955 			while ((wakeupThread = thread_dequeue(&wakeupQueue)) != NULL)
956 				scheduler_enqueue_in_run_queue(wakeupThread);
957 
958 			// fall through and reschedule since another thread with a higher priority may have been woken up
959 		}
960 		scheduler_reschedule();
961 		RELEASE_THREAD_LOCK();
962 
963 		if (timeout != B_INFINITE_TIMEOUT) {
964 			if (thread->sem.acquire_status != B_TIMED_OUT) {
965 				// cancel the timer event, the sem may have been deleted or interrupted
966 				// with the timer still active
967 				cancel_timer(&timeout_timer);
968 			}
969 		}
970 
971 #ifdef DEBUG_LAST_ACQUIRER
972 		if (thread->sem.acquire_status >= B_OK) {
973 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
974 			sSems[slot].u.used.last_acquire_count = count;
975 		}
976 #endif
977 
978 		restore_interrupts(state);
979 
980 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
981 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
982 			thread->name));
983 		return thread->sem.acquire_status;
984 	} else {
985 #ifdef DEBUG_LAST_ACQUIRER
986 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
987 		sSems[slot].u.used.last_acquire_count = count;
988 #endif
989 	}
990 
991 err:
992 	RELEASE_SEM_LOCK(sSems[slot]);
993 	restore_interrupts(state);
994 
995 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
996 		// depending on when we were interrupted, we need to still
997 		// release the semaphore to always leave in a consistent
998 		// state
999 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
1000 	}
1001 
1002 #if 0
1003 	if (status == B_NOT_ALLOWED)
1004 	_user_debugger("Thread tried to acquire kernel semaphore.");
1005 #endif
1006 
1007 	return status;
1008 }
1009 
1010 
1011 status_t
1012 release_sem(sem_id id)
1013 {
1014 	return release_sem_etc(id, 1, 0);
1015 }
1016 
1017 
1018 status_t
1019 release_sem_etc(sem_id id, int32 count, uint32 flags)
1020 {
1021 	struct thread_queue releaseQueue;
1022 	int32 slot = id % sMaxSems;
1023 	cpu_status state;
1024 	status_t status = B_OK;
1025 
1026 	if (kernel_startup)
1027 		return B_OK;
1028 	if (sSemsActive == false)
1029 		return B_NO_MORE_SEMS;
1030 	if (id < 0)
1031 		return B_BAD_SEM_ID;
1032 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
1033 		return B_BAD_VALUE;
1034 
1035 	state = disable_interrupts();
1036 	GRAB_SEM_LOCK(sSems[slot]);
1037 
1038 	if (sSems[slot].id != id) {
1039 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
1040 		status = B_BAD_SEM_ID;
1041 		goto err;
1042 	}
1043 
1044 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
1045 	//	doesn't have any use outside the kernel
1046 	if ((flags & B_CHECK_PERMISSION) != 0
1047 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
1048 		dprintf("thread %ld tried to release kernel semaphore.\n",
1049 			thread_get_current_thread_id());
1050 		status = B_NOT_ALLOWED;
1051 		goto err;
1052 	}
1053 
1054 #ifdef DEBUG_LAST_ACQUIRER
1055 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
1056 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
1057 	sSems[slot].u.used.last_release_count = count;
1058 #endif
1059 
1060 	// clear out a queue we will use to hold all of the threads that we will have to
1061 	// put back into the run list. This is done so the thread lock wont be held
1062 	// while this sems lock is held since the two locks are grabbed in the other
1063 	// order in sem_interrupt_thread.
1064 	clear_thread_queue(&releaseQueue);
1065 
1066 	if (flags & B_RELEASE_ALL) {
1067 		count = -sSems[slot].u.used.count;
1068 
1069 		// is there anything to do for us at all?
1070 		if (count == 0)
1071 			goto err;
1072 	}
1073 
1074 	while (count > 0) {
1075 		int delta = count;
1076 		if (sSems[slot].u.used.count < 0) {
1077 			struct thread *thread = thread_lookat_queue(&sSems[slot].u.used.queue);
1078 
1079 			delta = min_c(count, thread->sem.count);
1080 			thread->sem.count -= delta;
1081 			if (thread->sem.count <= 0) {
1082 				// release this thread
1083 				thread = thread_dequeue(&sSems[slot].u.used.queue);
1084 				thread_enqueue(thread, &releaseQueue);
1085 				thread->sem.count = 0;
1086 			}
1087 		} else if (flags & B_RELEASE_IF_WAITING_ONLY)
1088 			break;
1089 
1090 		sSems[slot].u.used.count += delta;
1091 		count -= delta;
1092 	}
1093 
1094 	if (sSems[slot].u.used.count > 0)
1095 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1096 
1097 	RELEASE_SEM_LOCK(sSems[slot]);
1098 
1099 	// pull off any items in the release queue and put them in the run queue
1100 	if (releaseQueue.head != NULL) {
1101 		struct thread *thread;
1102 
1103 		GRAB_THREAD_LOCK();
1104 		while ((thread = thread_dequeue(&releaseQueue)) != NULL) {
1105 #if 0
1106 			// temporarily place thread in a run queue with a higher priority to boost it up
1107 			thread->next_priority = thread->priority >= B_FIRST_REAL_TIME_PRIORITY ?
1108 				thread->priority : thread->priority + 1;
1109 #endif
1110 			scheduler_enqueue_in_run_queue(thread);
1111 		}
1112 		if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1113 			scheduler_reschedule();
1114 
1115 		RELEASE_THREAD_LOCK();
1116 	}
1117 	goto outnolock;
1118 
1119 err:
1120 	RELEASE_SEM_LOCK(sSems[slot]);
1121 outnolock:
1122 	restore_interrupts(state);
1123 
1124 	return status;
1125 }
1126 
1127 
1128 status_t
1129 get_sem_count(sem_id id, int32 *_count)
1130 {
1131 	int slot;
1132 	int state;
1133 
1134 	if (sSemsActive == false)
1135 		return B_NO_MORE_SEMS;
1136 	if (id < 0)
1137 		return B_BAD_SEM_ID;
1138 	if (_count == NULL)
1139 		return B_BAD_VALUE;
1140 
1141 	slot = id % sMaxSems;
1142 
1143 	state = disable_interrupts();
1144 	GRAB_SEM_LOCK(sSems[slot]);
1145 
1146 	if (sSems[slot].id != id) {
1147 		RELEASE_SEM_LOCK(sSems[slot]);
1148 		restore_interrupts(state);
1149 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1150 		return B_BAD_SEM_ID;
1151 	}
1152 
1153 	*_count = sSems[slot].u.used.count;
1154 
1155 	RELEASE_SEM_LOCK(sSems[slot]);
1156 	restore_interrupts(state);
1157 
1158 	return B_OK;
1159 }
1160 
1161 
1162 /*!	Called by the get_sem_info() macro. */
1163 status_t
1164 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1165 {
1166 	status_t status = B_OK;
1167 	int state;
1168 	int slot;
1169 
1170 	if (!sSemsActive)
1171 		return B_NO_MORE_SEMS;
1172 	if (id < 0)
1173 		return B_BAD_SEM_ID;
1174 	if (info == NULL || size != sizeof(sem_info))
1175 		return B_BAD_VALUE;
1176 
1177 	slot = id % sMaxSems;
1178 
1179 	state = disable_interrupts();
1180 	GRAB_SEM_LOCK(sSems[slot]);
1181 
1182 	if (sSems[slot].id != id) {
1183 		status = B_BAD_SEM_ID;
1184 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1185 	} else
1186 		fill_sem_info(&sSems[slot], info, size);
1187 
1188 	RELEASE_SEM_LOCK(sSems[slot]);
1189 	restore_interrupts(state);
1190 
1191 	return status;
1192 }
1193 
1194 
1195 /*!	Called by the get_next_sem_info() macro. */
1196 status_t
1197 _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info,
1198 	size_t size)
1199 {
1200 	int state;
1201 	int slot;
1202 	bool found = false;
1203 
1204 	if (!sSemsActive)
1205 		return B_NO_MORE_SEMS;
1206 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1207 		return B_BAD_VALUE;
1208 
1209 	if (team == B_CURRENT_TEAM)
1210 		team = team_get_current_team_id();
1211 	/* prevents sSems[].owner == -1 >= means owned by a port */
1212 	if (team < 0 || !team_is_valid(team))
1213 		return B_BAD_TEAM_ID;
1214 
1215 	slot = *_cookie;
1216 	if (slot >= sMaxSems)
1217 		return B_BAD_VALUE;
1218 
1219 	state = disable_interrupts();
1220 	GRAB_SEM_LIST_LOCK();
1221 
1222 	while (slot < sMaxSems) {
1223 		if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1224 			GRAB_SEM_LOCK(sSems[slot]);
1225 			if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1226 				// found one!
1227 				fill_sem_info(&sSems[slot], info, size);
1228 
1229 				RELEASE_SEM_LOCK(sSems[slot]);
1230 				slot++;
1231 				found = true;
1232 				break;
1233 			}
1234 			RELEASE_SEM_LOCK(sSems[slot]);
1235 		}
1236 		slot++;
1237 	}
1238 	RELEASE_SEM_LIST_LOCK();
1239 	restore_interrupts(state);
1240 
1241 	if (!found)
1242 		return B_BAD_VALUE;
1243 
1244 	*_cookie = slot;
1245 	return B_OK;
1246 }
1247 
1248 
1249 status_t
1250 set_sem_owner(sem_id id, team_id team)
1251 {
1252 	int state;
1253 	int slot;
1254 
1255 	if (sSemsActive == false)
1256 		return B_NO_MORE_SEMS;
1257 	if (id < 0)
1258 		return B_BAD_SEM_ID;
1259 	if (team < 0 || !team_is_valid(team))
1260 		return B_BAD_TEAM_ID;
1261 
1262 	slot = id % sMaxSems;
1263 
1264 	state = disable_interrupts();
1265 	GRAB_SEM_LOCK(sSems[slot]);
1266 
1267 	if (sSems[slot].id != id) {
1268 		RELEASE_SEM_LOCK(sSems[slot]);
1269 		restore_interrupts(state);
1270 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1271 		return B_BAD_SEM_ID;
1272 	}
1273 
1274 	// ToDo: this is a small race condition: the team ID could already
1275 	// be invalid at this point - we would lose one semaphore slot in
1276 	// this case!
1277 	// The only safe way to do this is to prevent either team (the new
1278 	// or the old owner) from dying until we leave the spinlock.
1279 	sSems[slot].u.used.owner = team;
1280 
1281 	RELEASE_SEM_LOCK(sSems[slot]);
1282 	restore_interrupts(state);
1283 
1284 	return B_NO_ERROR;
1285 }
1286 
1287 
1288 //	#pragma mark - Syscalls
1289 
1290 
1291 sem_id
1292 _user_create_sem(int32 count, const char *userName)
1293 {
1294 	char name[B_OS_NAME_LENGTH];
1295 
1296 	if (userName == NULL)
1297 		return create_sem_etc(count, NULL, team_get_current_team_id());
1298 
1299 	if (!IS_USER_ADDRESS(userName)
1300 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1301 		return B_BAD_ADDRESS;
1302 
1303 	return create_sem_etc(count, name, team_get_current_team_id());
1304 }
1305 
1306 
1307 status_t
1308 _user_delete_sem(sem_id id)
1309 {
1310 	return delete_sem(id);
1311 }
1312 
1313 
1314 status_t
1315 _user_acquire_sem(sem_id id)
1316 {
1317 	status_t error = switch_sem_etc(-1, id, 1,
1318 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1319 
1320 	return syscall_restart_handle_post(error);
1321 }
1322 
1323 
1324 status_t
1325 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1326 {
1327 	syscall_restart_handle_timeout_pre(flags, timeout);
1328 
1329 	status_t error = switch_sem_etc(-1, id, count,
1330 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1331 
1332 	return syscall_restart_handle_timeout_post(error, timeout);
1333 }
1334 
1335 
1336 status_t
1337 _user_switch_sem(sem_id releaseSem, sem_id id)
1338 {
1339 	status_t error = switch_sem_etc(releaseSem, id, 1,
1340 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1341 
1342 	if (releaseSem < 0)
1343 		return syscall_restart_handle_post(error);
1344 
1345 	return error;
1346 }
1347 
1348 
1349 status_t
1350 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1351 	bigtime_t timeout)
1352 {
1353 	if (releaseSem < 0)
1354 		syscall_restart_handle_timeout_pre(flags, timeout);
1355 
1356 	status_t error = switch_sem_etc(releaseSem, id, count,
1357 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1358 
1359 	if (releaseSem < 0)
1360 		return syscall_restart_handle_timeout_post(error, timeout);
1361 
1362 	return error;
1363 }
1364 
1365 
1366 status_t
1367 _user_release_sem(sem_id id)
1368 {
1369 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1370 }
1371 
1372 
1373 status_t
1374 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1375 {
1376 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1377 }
1378 
1379 
1380 status_t
1381 _user_get_sem_count(sem_id id, int32 *userCount)
1382 {
1383 	status_t status;
1384 	int32 count;
1385 
1386 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1387 		return B_BAD_ADDRESS;
1388 
1389 	status = get_sem_count(id, &count);
1390 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1391 		return B_BAD_ADDRESS;
1392 
1393 	return status;
1394 }
1395 
1396 
1397 status_t
1398 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1399 {
1400 	struct sem_info info;
1401 	status_t status;
1402 
1403 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1404 		return B_BAD_ADDRESS;
1405 
1406 	status = _get_sem_info(id, &info, size);
1407 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1408 		return B_BAD_ADDRESS;
1409 
1410 	return status;
1411 }
1412 
1413 
1414 status_t
1415 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1416 	size_t size)
1417 {
1418 	struct sem_info info;
1419 	int32 cookie;
1420 	status_t status;
1421 
1422 	if (userCookie == NULL || userInfo == NULL
1423 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1424 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1425 		return B_BAD_ADDRESS;
1426 
1427 	status = _get_next_sem_info(team, &cookie, &info, size);
1428 
1429 	if (status == B_OK) {
1430 		if (user_memcpy(userInfo, &info, size) < B_OK
1431 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1432 			return B_BAD_ADDRESS;
1433 	}
1434 
1435 	return status;
1436 }
1437 
1438 
1439 status_t
1440 _user_set_sem_owner(sem_id id, team_id team)
1441 {
1442 	return set_sem_owner(id, team);
1443 }
1444