xref: /haiku/src/system/kernel/sem.cpp (revision ed6250c95736c0b55da79d6e9dd01369532260c0)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Semaphore code */
10 
11 
12 #include <OS.h>
13 
14 #include <sem.h>
15 #include <kernel.h>
16 #include <kscheduler.h>
17 #include <ksignal.h>
18 #include <smp.h>
19 #include <int.h>
20 #include <arch/int.h>
21 #include <debug.h>
22 #include <thread.h>
23 #include <team.h>
24 #include <vfs.h>
25 #include <vm_low_memory.h>
26 #include <vm_page.h>
27 #include <boot/kernel_args.h>
28 #include <syscall_restart.h>
29 #include <wait_for_objects.h>
30 
31 #include <string.h>
32 #include <stdlib.h>
33 
34 
35 //#define TRACE_SEM
36 #ifdef TRACE_SEM
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 #define DEBUG_LAST_ACQUIRER
43 
44 struct sem_entry {
45 	sem_id		id;
46 	spinlock	lock;	// protects only the id field when unused
47 	union {
48 		// when slot in use
49 		struct {
50 			int32				count;
51 			struct thread_queue queue;
52 			char				*name;
53 			team_id				owner;	// if set to -1, means owned by a port
54 			select_info			*select_infos;
55 #ifdef DEBUG_LAST_ACQUIRER
56 			thread_id			last_acquirer;
57 			int32				last_acquire_count;
58 			thread_id			last_releaser;
59 			int32				last_release_count;
60 #endif
61 		} used;
62 
63 		// when slot unused
64 		struct {
65 			sem_id				next_id;
66 			struct sem_entry	*next;
67 		} unused;
68 	} u;
69 };
70 
71 static const int32 kMaxSemaphores = 131072;
72 static int32 sMaxSems = 4096;
73 	// Final value is computed based on the amount of available memory
74 static int32 sUsedSems = 0;
75 
76 static struct sem_entry *sSems = NULL;
77 static bool sSemsActive = false;
78 static struct sem_entry	*sFreeSemsHead = NULL;
79 static struct sem_entry	*sFreeSemsTail = NULL;
80 
81 static spinlock sem_spinlock = 0;
82 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sem_spinlock)
83 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sem_spinlock)
84 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
85 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
86 
87 static int remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
88 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock);
89 
90 struct sem_timeout_args {
91 	thread_id	blocked_thread;
92 	sem_id		blocked_sem_id;
93 	int32		sem_count;
94 };
95 
96 
97 static int
98 dump_sem_list(int argc, char **argv)
99 {
100 	const char *name = NULL;
101 	team_id owner = -1;
102 #ifdef DEBUG_LAST_ACQUIRER
103 	thread_id last = -1;
104 #endif
105 	int32 i;
106 
107 	if (argc > 2) {
108 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
109 			owner = strtoul(argv[2], NULL, 0);
110 		else if (!strcmp(argv[1], "name"))
111 			name = argv[2];
112 #ifdef DEBUG_LAST_ACQUIRER
113 		else if (!strcmp(argv[1], "last"))
114 			last = strtoul(argv[2], NULL, 0);
115 #endif
116 	} else if (argc > 1)
117 		owner = strtoul(argv[1], NULL, 0);
118 
119 	kprintf("sem            id count   team"
120 #ifdef DEBUG_LAST_ACQUIRER
121 		"   last"
122 #endif
123 		"  name\n");
124 
125 	for (i = 0; i < sMaxSems; i++) {
126 		struct sem_entry *sem = &sSems[i];
127 		if (sem->id < 0
128 #ifdef DEBUG_LAST_ACQUIRER
129 			|| (last != -1 && sem->u.used.last_acquirer != last)
130 #endif
131 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
132 			|| (owner != -1 && sem->u.used.owner != owner))
133 			continue;
134 
135 		kprintf("%p %6ld %5ld %6ld "
136 #ifdef DEBUG_LAST_ACQUIRER
137 			"%6ld "
138 #endif
139 			" %s\n", sem, sem->id, sem->u.used.count,
140 			sem->u.used.owner,
141 #ifdef DEBUG_LAST_ACQUIRER
142 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
143 #endif
144 			sem->u.used.name);
145 	}
146 
147 	return 0;
148 }
149 
150 
151 static void
152 dump_sem(struct sem_entry *sem)
153 {
154 	kprintf("SEM: %p\n", sem);
155 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
156 	if (sem->id >= 0) {
157 		kprintf("name:    '%s'\n", sem->u.used.name);
158 		kprintf("owner:   %ld\n", sem->u.used.owner);
159 		kprintf("count:   %ld\n", sem->u.used.count);
160 		kprintf("queue:  ");
161 		if (sem->u.used.queue.head != NULL) {
162 			struct thread *thread = sem->u.used.queue.head;
163 			while (thread != NULL) {
164 				kprintf(" %ld", thread->id);
165 				thread = thread->queue_next;
166 			}
167 			kprintf("\n");
168 		} else
169 			kprintf(" -\n");
170 
171 		set_debug_variable("_sem", (addr_t)sem);
172 		set_debug_variable("_semID", sem->id);
173 		set_debug_variable("_owner", sem->u.used.owner);
174 
175 #ifdef DEBUG_LAST_ACQUIRER
176 		kprintf("last acquired by: %ld, count: %ld\n", sem->u.used.last_acquirer,
177 			sem->u.used.last_acquire_count);
178 		kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser,
179 			sem->u.used.last_release_count);
180 
181 		if (sem->u.used.last_acquirer != 0)
182 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
183 		else
184 			unset_debug_variable("_acquirer");
185 
186 		if (sem->u.used.last_releaser != 0)
187 			set_debug_variable("_releaser", sem->u.used.last_releaser);
188 		else
189 			unset_debug_variable("_releaser");
190 #endif
191 	} else {
192 		kprintf("next:    %p\n", sem->u.unused.next);
193 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
194 	}
195 }
196 
197 
198 static int
199 dump_sem_info(int argc, char **argv)
200 {
201 	bool found = false;
202 	addr_t num;
203 	int32 i;
204 
205 	if (argc < 2) {
206 		print_debugger_command_usage(argv[0]);
207 		return 0;
208 	}
209 
210 	num = strtoul(argv[1], NULL, 0);
211 
212 	if (IS_KERNEL_ADDRESS(num)) {
213 		dump_sem((struct sem_entry *)num);
214 		return 0;
215 	} else if (num > 0) {
216 		uint32 slot = num % sMaxSems;
217 		if (sSems[slot].id != (int)num) {
218 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
219 			return 0;
220 		}
221 
222 		dump_sem(&sSems[slot]);
223 		return 0;
224 	}
225 
226 	// walk through the sem list, trying to match name
227 	for (i = 0; i < sMaxSems; i++) {
228 		if (sSems[i].u.used.name != NULL
229 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
230 			dump_sem(&sSems[i]);
231 			found = true;
232 		}
233 	}
234 
235 	if (!found)
236 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
237 	return 0;
238 }
239 
240 
241 static inline void
242 clear_thread_queue(struct thread_queue *queue)
243 {
244 	queue->head = queue->tail = NULL;
245 }
246 
247 
248 /*!	\brief Appends a semaphore slot to the free list.
249 
250 	The semaphore list must be locked.
251 	The slot's id field is not changed. It should already be set to -1.
252 
253 	\param slot The index of the semaphore slot.
254 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
255 		   is used.
256 */
257 static void
258 free_sem_slot(int slot, sem_id nextID)
259 {
260 	struct sem_entry *sem = sSems + slot;
261 	// set next_id to the next possible value; for sanity check the current ID
262 	if (nextID < 0)
263 		sem->u.unused.next_id = slot;
264 	else
265 		sem->u.unused.next_id = nextID;
266 	// append the entry to the list
267 	if (sFreeSemsTail)
268 		sFreeSemsTail->u.unused.next = sem;
269 	else
270 		sFreeSemsHead = sem;
271 	sFreeSemsTail = sem;
272 	sem->u.unused.next = NULL;
273 }
274 
275 
276 static inline void
277 notify_sem_select_events(struct sem_entry* sem, uint16 events)
278 {
279 	if (sem->u.used.select_infos)
280 		notify_select_events_list(sem->u.used.select_infos, events);
281 }
282 
283 
284 /*! Called from a timer handler. Wakes up a semaphore */
285 static int32
286 sem_timeout(timer *data)
287 {
288 	struct sem_timeout_args *args = (struct sem_timeout_args *)data->entry.prev;
289 	struct thread *thread;
290 	int slot;
291 	int state;
292 	struct thread_queue wakeupQueue;
293 
294 	thread = thread_get_thread_struct(args->blocked_thread);
295 	if (thread == NULL)
296 		return B_HANDLED_INTERRUPT;
297 	slot = args->blocked_sem_id % sMaxSems;
298 
299 	state = disable_interrupts();
300 	GRAB_SEM_LOCK(sSems[slot]);
301 
302 	TRACE(("sem_timeout: called on %p sem %ld, thread %ld\n",
303 		data, args->blocked_sem_id, args->blocked_thread));
304 
305 	if (sSems[slot].id != args->blocked_sem_id) {
306 		// this thread was not waiting on this semaphore
307 		panic("sem_timeout: thread %ld was trying to wait on sem %ld which "
308 			"doesn't exist!\n", args->blocked_thread, args->blocked_sem_id);
309 
310 		RELEASE_SEM_LOCK(sSems[slot]);
311 		restore_interrupts(state);
312 		return B_HANDLED_INTERRUPT;
313 	}
314 
315 	clear_thread_queue(&wakeupQueue);
316 	remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue, B_TIMED_OUT,
317 		false);
318 
319 	RELEASE_SEM_LOCK(sSems[slot]);
320 
321 	GRAB_THREAD_LOCK();
322 	// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
323 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
324 		scheduler_enqueue_in_run_queue(thread);
325 
326 	RELEASE_THREAD_LOCK();
327 
328 	restore_interrupts(state);
329 
330 	return B_INVOKE_SCHEDULER;
331 }
332 
333 
334 /*!	Fills the thread_info structure with information from the specified
335 	thread.
336 	The thread lock must be held when called.
337 */
338 static void
339 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size)
340 {
341 	info->sem = sem->id;
342 	info->team = sem->u.used.owner;
343 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
344 	info->count = sem->u.used.count;
345 
346 	// ToDo: not sure if this is the latest holder, or the next
347 	// holder...
348 	if (sem->u.used.queue.head != NULL)
349 		info->latest_holder = sem->u.used.queue.head->id;
350 	else
351 		info->latest_holder = -1;
352 }
353 
354 
355 //	#pragma mark - Private Kernel API
356 
357 
358 status_t
359 sem_init(kernel_args *args)
360 {
361 	area_id area;
362 	int32 i;
363 
364 	TRACE(("sem_init: entry\n"));
365 
366 	// compute maximal number of semaphores depending on the available memory
367 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
368 	// 256 MB -> 32768, 896 kB
369 	// 512 MB -> 65536, 1.75 MB
370 	// 1024 MB and more -> 131072, 3.5 MB
371 	i = vm_page_num_pages() / 2;
372 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
373 		sMaxSems <<= 1;
374 
375 	// create and initialize semaphore table
376 	area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS,
377 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
378 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
379 	if (area < 0)
380 		panic("unable to allocate semaphore table!\n");
381 
382 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
383 	for (i = 0; i < sMaxSems; i++) {
384 		sSems[i].id = -1;
385 		free_sem_slot(i, i);
386 	}
387 
388 	// add debugger commands
389 	add_debugger_command_etc("sems", &dump_sem_list,
390 		"Dump a list of all active semaphores (for team, with name, etc.)",
391 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
392 #ifdef DEBUG_LAST_ACQUIRER
393 			" | (\"last\" <last acquirer>)"
394 #endif
395 		"\n"
396 		"Prints a list of all active semaphores meeting the given\n"
397 		"requirement. If no argument is given, all sems are listed.\n"
398 		"  <team>             - The team owning the semaphores.\n"
399 		"  <name>             - Part of the name of the semaphores.\n"
400 #ifdef DEBUG_LAST_ACQUIRER
401 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
402 #endif
403 		, 0);
404 	add_debugger_command_etc("sem", &dump_sem_info,
405 		"Dump info about a particular semaphore",
406 		"<sem>\n"
407 		"Prints info about the specified semaphore.\n"
408 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
409 		"           of the semaphore to print info for.\n", 0);
410 
411 	TRACE(("sem_init: exit\n"));
412 
413 	sSemsActive = true;
414 
415 	return 0;
416 }
417 
418 
419 /*!	Creates a semaphore with the given parameters.
420 	Note, the team_id is not checked, it must be correct, or else
421 	that semaphore might not be deleted.
422 	This function is only available from within the kernel, and
423 	should not be made public - if possible, we should remove it
424 	completely (and have only create_sem() exported).
425 */
426 sem_id
427 create_sem_etc(int32 count, const char *name, team_id owner)
428 {
429 	struct sem_entry *sem = NULL;
430 	cpu_status state;
431 	sem_id id = B_NO_MORE_SEMS;
432 	char *tempName;
433 	size_t nameLength;
434 
435 	if (sSemsActive == false)
436 		return B_NO_MORE_SEMS;
437 
438 #if 0
439 	// TODO: the code below might cause unwanted deadlocks,
440 	// we need an asynchronously running low resource handler.
441 	if (sUsedSems == sMaxSems) {
442 		// The vnode cache may have collected lots of semaphores.
443 		// Freeing some unused vnodes should improve our situation.
444 		// TODO: maybe create a generic "low resources" handler, instead
445 		//	of only the specialised low memory thing?
446 		vfs_free_unused_vnodes(B_LOW_MEMORY_WARNING);
447 	}
448 	if (sUsedSems == sMaxSems) {
449 		// try again with more enthusiasm
450 		vfs_free_unused_vnodes(B_LOW_MEMORY_CRITICAL);
451 	}
452 #endif
453 	if (sUsedSems == sMaxSems)
454 		return B_NO_MORE_SEMS;
455 
456 	if (name == NULL)
457 		name = "unnamed semaphore";
458 
459 	nameLength = strlen(name) + 1;
460 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
461 	tempName = (char *)malloc(nameLength);
462 	if (tempName == NULL)
463 		return B_NO_MEMORY;
464 	strlcpy(tempName, name, nameLength);
465 
466 	state = disable_interrupts();
467 	GRAB_SEM_LIST_LOCK();
468 
469 	// get the first slot from the free list
470 	sem = sFreeSemsHead;
471 	if (sem) {
472 		// remove it from the free list
473 		sFreeSemsHead = sem->u.unused.next;
474 		if (!sFreeSemsHead)
475 			sFreeSemsTail = NULL;
476 
477 		// init the slot
478 		GRAB_SEM_LOCK(*sem);
479 		sem->id = sem->u.unused.next_id;
480 		sem->u.used.count = count;
481 		clear_thread_queue(&sem->u.used.queue);
482 		sem->u.used.name = tempName;
483 		sem->u.used.owner = owner;
484 		sem->u.used.select_infos = NULL;
485 		id = sem->id;
486 		RELEASE_SEM_LOCK(*sem);
487 
488 		atomic_add(&sUsedSems, 1);
489 	}
490 
491 	RELEASE_SEM_LIST_LOCK();
492 	restore_interrupts(state);
493 
494 	if (!sem)
495 		free(tempName);
496 
497 	return id;
498 }
499 
500 
501 status_t
502 select_sem(int32 id, struct select_info* info, bool kernel)
503 {
504 	cpu_status state;
505 	int32 slot;
506 	status_t error = B_OK;
507 
508 	if (id < 0)
509 		return B_BAD_SEM_ID;
510 
511 	slot = id % sMaxSems;
512 
513 	state = disable_interrupts();
514 	GRAB_SEM_LOCK(sSems[slot]);
515 
516 	if (sSems[slot].id != id) {
517 		// bad sem ID
518 		error = B_BAD_SEM_ID;
519 	} else if (!kernel
520 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
521 		// kernel semaphore, but call from userland
522 		error = B_NOT_ALLOWED;
523 	} else {
524 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
525 
526 		if (info->selected_events != 0) {
527 			info->next = sSems[slot].u.used.select_infos;
528 			sSems[slot].u.used.select_infos = info;
529 
530 			if (sSems[slot].u.used.count > 0)
531 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
532 		}
533 	}
534 
535 	RELEASE_SEM_LOCK(sSems[slot]);
536 	restore_interrupts(state);
537 
538 	return error;
539 }
540 
541 
542 status_t
543 deselect_sem(int32 id, struct select_info* info, bool kernel)
544 {
545 	cpu_status state;
546 	int32 slot;
547 
548 	if (id < 0)
549 		return B_BAD_SEM_ID;
550 
551 	if (info->selected_events == 0)
552 		return B_OK;
553 
554 	slot = id % sMaxSems;
555 
556 	state = disable_interrupts();
557 	GRAB_SEM_LOCK(sSems[slot]);
558 
559 	if (sSems[slot].id == id) {
560 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
561 		while (*infoLocation != NULL && *infoLocation != info)
562 			infoLocation = &(*infoLocation)->next;
563 
564 		if (*infoLocation == info)
565 			*infoLocation = info->next;
566 	}
567 
568 	RELEASE_SEM_LOCK(sSems[slot]);
569 	restore_interrupts(state);
570 
571 	return B_OK;
572 }
573 
574 
575 /*! Wake up a thread that's blocked on a semaphore
576 	this function must be entered with interrupts disabled and THREADLOCK held
577 */
578 status_t
579 sem_interrupt_thread(struct thread *thread)
580 {
581 	struct thread_queue wakeupQueue;
582 	int32 slot;
583 
584 	TRACE(("sem_interrupt_thread: called on thread %p (%ld), blocked on sem %ld\n",
585 		thread, thread->id, thread->sem.blocking));
586 
587 	if (thread->state != B_THREAD_WAITING || thread->sem.blocking < 0)
588 		return B_BAD_VALUE;
589 	if ((thread->sem.flags & B_CAN_INTERRUPT) == 0
590 		&& ((thread->sem.flags & B_KILL_CAN_INTERRUPT) == 0
591 			|| (thread->sig_pending & KILL_SIGNALS) == 0)) {
592 		return B_NOT_ALLOWED;
593 	}
594 
595 	slot = thread->sem.blocking % sMaxSems;
596 
597 	GRAB_SEM_LOCK(sSems[slot]);
598 
599 	if (sSems[slot].id != thread->sem.blocking) {
600 		panic("sem_interrupt_thread: thread %ld blocks on sem %ld, but that "
601 			"sem doesn't exist!\n", thread->id, thread->sem.blocking);
602 	}
603 
604 	clear_thread_queue(&wakeupQueue);
605 	status_t result = remove_thread_from_sem(thread, &sSems[slot],
606 		&wakeupQueue, B_INTERRUPTED, true);
607 
608 	RELEASE_SEM_LOCK(sSems[slot]);
609 
610 	if (result != B_OK) {
611 		// The thread is not in the wait queue anymore. Probably it just timed
612 		// out before we locked the sem.
613 		return result;
614 	}
615 
616 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL)
617 		scheduler_enqueue_in_run_queue(thread);
618 
619 	return B_OK;
620 }
621 
622 
623 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
624 	other threads in the process. All threads that need to be woken up are added
625 	to the passed in thread_queue.
626 	Must be called with semaphore lock held.
627 */
628 static int
629 remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
630 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock)
631 {
632 	// remove the thread from the queue and place it in the supplied queue
633 	if (thread_dequeue_id(&sem->u.used.queue, thread->id) != thread)
634 		return B_ENTRY_NOT_FOUND;
635 
636 	sem->u.used.count += thread->sem.acquire_count;
637 	thread->sem.acquire_status = acquireStatus;
638 	thread_enqueue(thread, queue);
639 
640 	// now see if more threads need to be woken up
641 	while (sem->u.used.count > 0
642 		   && thread_lookat_queue(&sem->u.used.queue) != NULL) {
643 		int32 delta = min_c(thread->sem.count, sem->u.used.count);
644 
645 		thread->sem.count -= delta;
646 		if (thread->sem.count <= 0) {
647 			thread = thread_dequeue(&sem->u.used.queue);
648 			thread_enqueue(thread, queue);
649 		}
650 		sem->u.used.count -= delta;
651 	}
652 
653 	if (sem->u.used.count > 0 && sem->u.used.select_infos != NULL) {
654 		if (hasThreadLock)
655 			RELEASE_THREAD_LOCK();
656 
657 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
658 
659 		if (hasThreadLock)
660 			GRAB_THREAD_LOCK();
661 	}
662 
663 	return B_OK;
664 }
665 
666 
667 /*!	This function cycles through the sem table, deleting all the sems
668 	that are owned by the specified team.
669 */
670 int
671 sem_delete_owned_sems(team_id owner)
672 {
673 	int state;
674 	int i;
675 	int count = 0;
676 
677 	// ToDo: that looks horribly inefficient - maybe it would be better
678 	//	to have them in a list in the team
679 
680 	if (owner < 0)
681 		return B_BAD_TEAM_ID;
682 
683 	state = disable_interrupts();
684 	GRAB_SEM_LIST_LOCK();
685 
686 	for (i = 0; i < sMaxSems; i++) {
687 		if (sSems[i].id != -1 && sSems[i].u.used.owner == owner) {
688 			sem_id id = sSems[i].id;
689 
690 			RELEASE_SEM_LIST_LOCK();
691 			restore_interrupts(state);
692 
693 			delete_sem(id);
694 			count++;
695 
696 			state = disable_interrupts();
697 			GRAB_SEM_LIST_LOCK();
698 		}
699 	}
700 
701 	RELEASE_SEM_LIST_LOCK();
702 	restore_interrupts(state);
703 
704 	return count;
705 }
706 
707 
708 int32
709 sem_max_sems(void)
710 {
711 	return sMaxSems;
712 }
713 
714 
715 int32
716 sem_used_sems(void)
717 {
718 	return sUsedSems;
719 }
720 
721 
722 //	#pragma mark - Public Kernel API
723 
724 
725 sem_id
726 create_sem(int32 count, const char *name)
727 {
728 	return create_sem_etc(count, name, team_get_kernel_team_id());
729 }
730 
731 
732 status_t
733 delete_sem(sem_id id)
734 {
735 	struct thread_queue releaseQueue;
736 	int32 releasedThreads;
737 	struct thread *thread;
738 	cpu_status state;
739 	int32 slot;
740 	char *name;
741 
742 	if (sSemsActive == false)
743 		return B_NO_MORE_SEMS;
744 	if (id < 0)
745 		return B_BAD_SEM_ID;
746 
747 	slot = id % sMaxSems;
748 
749 	state = disable_interrupts();
750 	GRAB_SEM_LOCK(sSems[slot]);
751 
752 	if (sSems[slot].id != id) {
753 		RELEASE_SEM_LOCK(sSems[slot]);
754 		restore_interrupts(state);
755 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
756 		return B_BAD_SEM_ID;
757 	}
758 
759 	notify_sem_select_events(&sSems[slot], B_EVENT_INVALID);
760 	sSems[slot].u.used.select_infos = NULL;
761 
762 	releasedThreads = 0;
763 	clear_thread_queue(&releaseQueue);
764 
765 	// free any threads waiting for this semaphore
766 	while ((thread = thread_dequeue(&sSems[slot].u.used.queue)) != NULL) {
767 		thread->sem.acquire_status = B_BAD_SEM_ID;
768 		thread->sem.count = 0;
769 		thread_enqueue(thread, &releaseQueue);
770 		releasedThreads++;
771 	}
772 
773 	sSems[slot].id = -1;
774 	name = sSems[slot].u.used.name;
775 	sSems[slot].u.used.name = NULL;
776 
777 	RELEASE_SEM_LOCK(sSems[slot]);
778 
779 	// append slot to the free list
780 	GRAB_SEM_LIST_LOCK();
781 	free_sem_slot(slot, id + sMaxSems);
782 	atomic_add(&sUsedSems, -1);
783 	RELEASE_SEM_LIST_LOCK();
784 
785 	if (releasedThreads > 0) {
786 		GRAB_THREAD_LOCK();
787 		while ((thread = thread_dequeue(&releaseQueue)) != NULL)
788 			scheduler_enqueue_in_run_queue(thread);
789 
790 		scheduler_reschedule();
791 		RELEASE_THREAD_LOCK();
792 	}
793 
794 	restore_interrupts(state);
795 
796 	free(name);
797 
798 	return B_OK;
799 }
800 
801 
802 status_t
803 acquire_sem(sem_id id)
804 {
805 	return switch_sem_etc(-1, id, 1, 0, 0);
806 }
807 
808 
809 status_t
810 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
811 {
812 	return switch_sem_etc(-1, id, count, flags, timeout);
813 }
814 
815 
816 status_t
817 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
818 {
819 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
820 }
821 
822 
823 status_t
824 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
825 	uint32 flags, bigtime_t timeout)
826 {
827 	int slot = id % sMaxSems;
828 	int state;
829 	status_t status = B_OK;
830 
831 	if (kernel_startup)
832 		return B_OK;
833 	if (sSemsActive == false)
834 		return B_NO_MORE_SEMS;
835 
836 	if (!are_interrupts_enabled()) {
837 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
838 			id);
839 	}
840 
841 	if (id < 0)
842 		return B_BAD_SEM_ID;
843 	if (count <= 0
844 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
845 		return B_BAD_VALUE;
846 	}
847 
848 	state = disable_interrupts();
849 	GRAB_SEM_LOCK(sSems[slot]);
850 
851 	if (sSems[slot].id != id) {
852 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
853 		status = B_BAD_SEM_ID;
854 		goto err;
855 	}
856 
857 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
858 	//	doesn't have any use outside the kernel
859 	if ((flags & B_CHECK_PERMISSION) != 0
860 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
861 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
862 			thread_get_current_thread_id(), id);
863 		status = B_NOT_ALLOWED;
864 		goto err;
865 	}
866 
867 	if (sSems[slot].u.used.count - count < 0 && (flags & B_RELATIVE_TIMEOUT) != 0
868 		&& timeout <= 0) {
869 		// immediate timeout
870 		status = B_WOULD_BLOCK;
871 		goto err;
872 	}
873 
874 	if ((sSems[slot].u.used.count -= count) < 0) {
875 		// we need to block
876 		struct thread *thread = thread_get_current_thread();
877 		timer timeout_timer; // stick it on the stack, since we may be blocking here
878 		struct sem_timeout_args args;
879 
880 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
881 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
882 
883 		// do a quick check to see if the thread has any pending signals
884 		// this should catch most of the cases where the thread had a signal
885 		if (((flags & B_CAN_INTERRUPT) && (thread->sig_pending & ~thread->sig_block_mask) != 0)
886 			|| ((flags & B_KILL_CAN_INTERRUPT)
887 				&& (thread->sig_pending & KILL_SIGNALS))) {
888 			sSems[slot].u.used.count += count;
889 			status = B_INTERRUPTED;
890 				// the other semaphore will be released later
891 			goto err;
892 		}
893 
894 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
895 			timeout = B_INFINITE_TIMEOUT;
896 
897 		thread->next_state = B_THREAD_WAITING;
898 		thread->sem.flags = flags;
899 		thread->sem.blocking = id;
900 		thread->sem.acquire_count = count;
901 		thread->sem.count = min_c(-sSems[slot].u.used.count, count);
902 			// store the count we need to restore upon release
903 		thread->sem.acquire_status = B_NO_ERROR;
904 		thread_enqueue(thread, &sSems[slot].u.used.queue);
905 
906 		if (timeout != B_INFINITE_TIMEOUT) {
907 			TRACE(("switch_sem_etc: setting timeout sem for %Ld usecs, sem %ld, thread %ld\n",
908 				timeout, id, thread->id));
909 
910 			// set up an event to go off with the thread struct as the data
911 			args.blocked_sem_id = id;
912 			args.blocked_thread = thread->id;
913 			args.sem_count = count;
914 
915 			// ToDo: another evil hack: pass the args into timer->entry.prev
916 			timeout_timer.entry.prev = (qent *)&args;
917 			add_timer(&timeout_timer, &sem_timeout, timeout,
918 				flags & B_RELATIVE_TIMEOUT ?
919 					B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
920 		}
921 
922 		RELEASE_SEM_LOCK(sSems[slot]);
923 
924 		if (semToBeReleased >= B_OK) {
925 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
926 			semToBeReleased = -1;
927 		}
928 
929 		GRAB_THREAD_LOCK();
930 		// check again to see if a signal is pending.
931 		// it may have been delivered while setting up the sem, though it's pretty unlikely
932 		if (((flags & B_CAN_INTERRUPT)
933 				&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
934 			|| ((flags & B_KILL_CAN_INTERRUPT)
935 				&& (thread->sig_pending & KILL_SIGNALS))) {
936 			struct thread_queue wakeupQueue;
937 			// ok, so a tiny race happened where a signal was delivered to this thread while
938 			// it was setting up the sem. We can only be sure a signal wasn't delivered
939 			// here, since the threadlock is held. The previous check would have found most
940 			// instances, but there was a race, so we have to handle it. It'll be more messy...
941 			clear_thread_queue(&wakeupQueue);
942 			GRAB_SEM_LOCK(sSems[slot]);
943 			if (sSems[slot].id == id) {
944 				remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue,
945 					B_INTERRUPTED, true);
946 			}
947 			RELEASE_SEM_LOCK(sSems[slot]);
948 
949 			struct thread *wakeupThread;
950 			while ((wakeupThread = thread_dequeue(&wakeupQueue)) != NULL)
951 				scheduler_enqueue_in_run_queue(wakeupThread);
952 
953 			// fall through and reschedule since another thread with a higher priority may have been woken up
954 		}
955 		scheduler_reschedule();
956 		RELEASE_THREAD_LOCK();
957 
958 		if (timeout != B_INFINITE_TIMEOUT) {
959 			if (thread->sem.acquire_status != B_TIMED_OUT) {
960 				// cancel the timer event, the sem may have been deleted or interrupted
961 				// with the timer still active
962 				cancel_timer(&timeout_timer);
963 			}
964 		}
965 
966 #ifdef DEBUG_LAST_ACQUIRER
967 		if (thread->sem.acquire_status >= B_OK) {
968 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
969 			sSems[slot].u.used.last_acquire_count = count;
970 		}
971 #endif
972 
973 		restore_interrupts(state);
974 
975 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
976 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
977 			thread->name));
978 		return thread->sem.acquire_status;
979 	} else {
980 #ifdef DEBUG_LAST_ACQUIRER
981 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
982 		sSems[slot].u.used.last_acquire_count = count;
983 #endif
984 	}
985 
986 err:
987 	RELEASE_SEM_LOCK(sSems[slot]);
988 	restore_interrupts(state);
989 
990 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
991 		// depending on when we were interrupted, we need to still
992 		// release the semaphore to always leave in a consistent
993 		// state
994 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
995 	}
996 
997 #if 0
998 	if (status == B_NOT_ALLOWED)
999 	_user_debugger("Thread tried to acquire kernel semaphore.");
1000 #endif
1001 
1002 	return status;
1003 }
1004 
1005 
1006 status_t
1007 release_sem(sem_id id)
1008 {
1009 	return release_sem_etc(id, 1, 0);
1010 }
1011 
1012 
1013 status_t
1014 release_sem_etc(sem_id id, int32 count, uint32 flags)
1015 {
1016 	struct thread_queue releaseQueue;
1017 	int32 slot = id % sMaxSems;
1018 	cpu_status state;
1019 	status_t status = B_OK;
1020 
1021 	if (kernel_startup)
1022 		return B_OK;
1023 	if (sSemsActive == false)
1024 		return B_NO_MORE_SEMS;
1025 	if (id < 0)
1026 		return B_BAD_SEM_ID;
1027 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
1028 		return B_BAD_VALUE;
1029 
1030 	state = disable_interrupts();
1031 	GRAB_SEM_LOCK(sSems[slot]);
1032 
1033 	if (sSems[slot].id != id) {
1034 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
1035 		status = B_BAD_SEM_ID;
1036 		goto err;
1037 	}
1038 
1039 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
1040 	//	doesn't have any use outside the kernel
1041 	if ((flags & B_CHECK_PERMISSION) != 0
1042 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
1043 		dprintf("thread %ld tried to release kernel semaphore.\n",
1044 			thread_get_current_thread_id());
1045 		status = B_NOT_ALLOWED;
1046 		goto err;
1047 	}
1048 
1049 #ifdef DEBUG_LAST_ACQUIRER
1050 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
1051 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
1052 	sSems[slot].u.used.last_release_count = count;
1053 #endif
1054 
1055 	// clear out a queue we will use to hold all of the threads that we will have to
1056 	// put back into the run list. This is done so the thread lock wont be held
1057 	// while this sems lock is held since the two locks are grabbed in the other
1058 	// order in sem_interrupt_thread.
1059 	clear_thread_queue(&releaseQueue);
1060 
1061 	if (flags & B_RELEASE_ALL) {
1062 		count = -sSems[slot].u.used.count;
1063 
1064 		// is there anything to do for us at all?
1065 		if (count == 0)
1066 			goto err;
1067 	}
1068 
1069 	while (count > 0) {
1070 		int delta = count;
1071 		if (sSems[slot].u.used.count < 0) {
1072 			struct thread *thread = thread_lookat_queue(&sSems[slot].u.used.queue);
1073 
1074 			delta = min_c(count, thread->sem.count);
1075 			thread->sem.count -= delta;
1076 			if (thread->sem.count <= 0) {
1077 				// release this thread
1078 				thread = thread_dequeue(&sSems[slot].u.used.queue);
1079 				thread_enqueue(thread, &releaseQueue);
1080 				thread->sem.count = 0;
1081 			}
1082 		} else if (flags & B_RELEASE_IF_WAITING_ONLY)
1083 			break;
1084 
1085 		sSems[slot].u.used.count += delta;
1086 		count -= delta;
1087 	}
1088 
1089 	if (sSems[slot].u.used.count > 0)
1090 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1091 
1092 	RELEASE_SEM_LOCK(sSems[slot]);
1093 
1094 	// pull off any items in the release queue and put them in the run queue
1095 	if (releaseQueue.head != NULL) {
1096 		struct thread *thread;
1097 
1098 		GRAB_THREAD_LOCK();
1099 		while ((thread = thread_dequeue(&releaseQueue)) != NULL) {
1100 #if 0
1101 			// temporarily place thread in a run queue with a higher priority to boost it up
1102 			thread->next_priority = thread->priority >= B_FIRST_REAL_TIME_PRIORITY ?
1103 				thread->priority : thread->priority + 1;
1104 #endif
1105 			scheduler_enqueue_in_run_queue(thread);
1106 		}
1107 		if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1108 			scheduler_reschedule();
1109 
1110 		RELEASE_THREAD_LOCK();
1111 	}
1112 	goto outnolock;
1113 
1114 err:
1115 	RELEASE_SEM_LOCK(sSems[slot]);
1116 outnolock:
1117 	restore_interrupts(state);
1118 
1119 	return status;
1120 }
1121 
1122 
1123 status_t
1124 get_sem_count(sem_id id, int32 *_count)
1125 {
1126 	int slot;
1127 	int state;
1128 
1129 	if (sSemsActive == false)
1130 		return B_NO_MORE_SEMS;
1131 	if (id < 0)
1132 		return B_BAD_SEM_ID;
1133 	if (_count == NULL)
1134 		return B_BAD_VALUE;
1135 
1136 	slot = id % sMaxSems;
1137 
1138 	state = disable_interrupts();
1139 	GRAB_SEM_LOCK(sSems[slot]);
1140 
1141 	if (sSems[slot].id != id) {
1142 		RELEASE_SEM_LOCK(sSems[slot]);
1143 		restore_interrupts(state);
1144 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1145 		return B_BAD_SEM_ID;
1146 	}
1147 
1148 	*_count = sSems[slot].u.used.count;
1149 
1150 	RELEASE_SEM_LOCK(sSems[slot]);
1151 	restore_interrupts(state);
1152 
1153 	return B_OK;
1154 }
1155 
1156 
1157 /*!	Called by the get_sem_info() macro. */
1158 status_t
1159 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1160 {
1161 	status_t status = B_OK;
1162 	int state;
1163 	int slot;
1164 
1165 	if (!sSemsActive)
1166 		return B_NO_MORE_SEMS;
1167 	if (id < 0)
1168 		return B_BAD_SEM_ID;
1169 	if (info == NULL || size != sizeof(sem_info))
1170 		return B_BAD_VALUE;
1171 
1172 	slot = id % sMaxSems;
1173 
1174 	state = disable_interrupts();
1175 	GRAB_SEM_LOCK(sSems[slot]);
1176 
1177 	if (sSems[slot].id != id) {
1178 		status = B_BAD_SEM_ID;
1179 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1180 	} else
1181 		fill_sem_info(&sSems[slot], info, size);
1182 
1183 	RELEASE_SEM_LOCK(sSems[slot]);
1184 	restore_interrupts(state);
1185 
1186 	return status;
1187 }
1188 
1189 
1190 /*!	Called by the get_next_sem_info() macro. */
1191 status_t
1192 _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info,
1193 	size_t size)
1194 {
1195 	int state;
1196 	int slot;
1197 	bool found = false;
1198 
1199 	if (!sSemsActive)
1200 		return B_NO_MORE_SEMS;
1201 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1202 		return B_BAD_VALUE;
1203 
1204 	if (team == B_CURRENT_TEAM)
1205 		team = team_get_current_team_id();
1206 	/* prevents sSems[].owner == -1 >= means owned by a port */
1207 	if (team < 0 || !team_is_valid(team))
1208 		return B_BAD_TEAM_ID;
1209 
1210 	slot = *_cookie;
1211 	if (slot >= sMaxSems)
1212 		return B_BAD_VALUE;
1213 
1214 	state = disable_interrupts();
1215 	GRAB_SEM_LIST_LOCK();
1216 
1217 	while (slot < sMaxSems) {
1218 		if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1219 			GRAB_SEM_LOCK(sSems[slot]);
1220 			if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1221 				// found one!
1222 				fill_sem_info(&sSems[slot], info, size);
1223 
1224 				RELEASE_SEM_LOCK(sSems[slot]);
1225 				slot++;
1226 				found = true;
1227 				break;
1228 			}
1229 			RELEASE_SEM_LOCK(sSems[slot]);
1230 		}
1231 		slot++;
1232 	}
1233 	RELEASE_SEM_LIST_LOCK();
1234 	restore_interrupts(state);
1235 
1236 	if (!found)
1237 		return B_BAD_VALUE;
1238 
1239 	*_cookie = slot;
1240 	return B_OK;
1241 }
1242 
1243 
1244 status_t
1245 set_sem_owner(sem_id id, team_id team)
1246 {
1247 	int state;
1248 	int slot;
1249 
1250 	if (sSemsActive == false)
1251 		return B_NO_MORE_SEMS;
1252 	if (id < 0)
1253 		return B_BAD_SEM_ID;
1254 	if (team < 0 || !team_is_valid(team))
1255 		return B_BAD_TEAM_ID;
1256 
1257 	slot = id % sMaxSems;
1258 
1259 	state = disable_interrupts();
1260 	GRAB_SEM_LOCK(sSems[slot]);
1261 
1262 	if (sSems[slot].id != id) {
1263 		RELEASE_SEM_LOCK(sSems[slot]);
1264 		restore_interrupts(state);
1265 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1266 		return B_BAD_SEM_ID;
1267 	}
1268 
1269 	// ToDo: this is a small race condition: the team ID could already
1270 	// be invalid at this point - we would lose one semaphore slot in
1271 	// this case!
1272 	// The only safe way to do this is to prevent either team (the new
1273 	// or the old owner) from dying until we leave the spinlock.
1274 	sSems[slot].u.used.owner = team;
1275 
1276 	RELEASE_SEM_LOCK(sSems[slot]);
1277 	restore_interrupts(state);
1278 
1279 	return B_NO_ERROR;
1280 }
1281 
1282 
1283 //	#pragma mark - Syscalls
1284 
1285 
1286 sem_id
1287 _user_create_sem(int32 count, const char *userName)
1288 {
1289 	char name[B_OS_NAME_LENGTH];
1290 
1291 	if (userName == NULL)
1292 		return create_sem_etc(count, NULL, team_get_current_team_id());
1293 
1294 	if (!IS_USER_ADDRESS(userName)
1295 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1296 		return B_BAD_ADDRESS;
1297 
1298 	return create_sem_etc(count, name, team_get_current_team_id());
1299 }
1300 
1301 
1302 status_t
1303 _user_delete_sem(sem_id id)
1304 {
1305 	return delete_sem(id);
1306 }
1307 
1308 
1309 status_t
1310 _user_acquire_sem(sem_id id)
1311 {
1312 	status_t error = switch_sem_etc(-1, id, 1,
1313 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1314 
1315 	return syscall_restart_handle_post(error);
1316 }
1317 
1318 
1319 status_t
1320 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1321 {
1322 	syscall_restart_handle_timeout_pre(flags, timeout);
1323 
1324 	status_t error = switch_sem_etc(-1, id, count,
1325 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1326 
1327 	return syscall_restart_handle_timeout_post(error, timeout);
1328 }
1329 
1330 
1331 status_t
1332 _user_switch_sem(sem_id releaseSem, sem_id id)
1333 {
1334 	status_t error = switch_sem_etc(releaseSem, id, 1,
1335 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1336 
1337 	if (releaseSem < 0)
1338 		return syscall_restart_handle_post(error);
1339 
1340 	return error;
1341 }
1342 
1343 
1344 status_t
1345 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1346 	bigtime_t timeout)
1347 {
1348 	if (releaseSem < 0)
1349 		syscall_restart_handle_timeout_pre(flags, timeout);
1350 
1351 	status_t error = switch_sem_etc(releaseSem, id, count,
1352 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1353 
1354 	if (releaseSem < 0)
1355 		return syscall_restart_handle_timeout_post(error, timeout);
1356 
1357 	return error;
1358 }
1359 
1360 
1361 status_t
1362 _user_release_sem(sem_id id)
1363 {
1364 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1365 }
1366 
1367 
1368 status_t
1369 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1370 {
1371 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1372 }
1373 
1374 
1375 status_t
1376 _user_get_sem_count(sem_id id, int32 *userCount)
1377 {
1378 	status_t status;
1379 	int32 count;
1380 
1381 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1382 		return B_BAD_ADDRESS;
1383 
1384 	status = get_sem_count(id, &count);
1385 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1386 		return B_BAD_ADDRESS;
1387 
1388 	return status;
1389 }
1390 
1391 
1392 status_t
1393 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1394 {
1395 	struct sem_info info;
1396 	status_t status;
1397 
1398 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1399 		return B_BAD_ADDRESS;
1400 
1401 	status = _get_sem_info(id, &info, size);
1402 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1403 		return B_BAD_ADDRESS;
1404 
1405 	return status;
1406 }
1407 
1408 
1409 status_t
1410 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1411 	size_t size)
1412 {
1413 	struct sem_info info;
1414 	int32 cookie;
1415 	status_t status;
1416 
1417 	if (userCookie == NULL || userInfo == NULL
1418 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1419 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1420 		return B_BAD_ADDRESS;
1421 
1422 	status = _get_next_sem_info(team, &cookie, &info, size);
1423 
1424 	if (status == B_OK) {
1425 		if (user_memcpy(userInfo, &info, size) < B_OK
1426 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1427 			return B_BAD_ADDRESS;
1428 	}
1429 
1430 	return status;
1431 }
1432 
1433 
1434 status_t
1435 _user_set_sem_owner(sem_id id, team_id team)
1436 {
1437 	return set_sem_owner(id, team);
1438 }
1439