xref: /haiku/src/system/kernel/sem.cpp (revision 020cbad9d40235a2c50a81a42d69912a5ff8fbc4)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Semaphore code */
10 
11 
12 #include <OS.h>
13 
14 #include <sem.h>
15 #include <kernel.h>
16 #include <kscheduler.h>
17 #include <ksignal.h>
18 #include <smp.h>
19 #include <int.h>
20 #include <arch/int.h>
21 #include <debug.h>
22 #include <thread.h>
23 #include <team.h>
24 #include <vfs.h>
25 #include <vm_low_memory.h>
26 #include <vm_page.h>
27 #include <boot/kernel_args.h>
28 #include <wait_for_objects.h>
29 
30 #include <string.h>
31 #include <stdlib.h>
32 
33 
34 //#define TRACE_SEM
35 #ifdef TRACE_SEM
36 #	define TRACE(x) dprintf x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 #define DEBUG_LAST_ACQUIRER
42 
43 struct sem_entry {
44 	sem_id		id;
45 	spinlock	lock;	// protects only the id field when unused
46 	union {
47 		// when slot in use
48 		struct {
49 			int32				count;
50 			struct thread_queue queue;
51 			char				*name;
52 			team_id				owner;	// if set to -1, means owned by a port
53 			select_info			*select_infos;
54 #ifdef DEBUG_LAST_ACQUIRER
55 			thread_id			last_acquirer;
56 			int32				last_acquire_count;
57 			thread_id			last_releaser;
58 			int32				last_release_count;
59 #endif
60 		} used;
61 
62 		// when slot unused
63 		struct {
64 			sem_id				next_id;
65 			struct sem_entry	*next;
66 		} unused;
67 	} u;
68 };
69 
70 static const int32 kMaxSemaphores = 131072;
71 static int32 sMaxSems = 4096;
72 	// Final value is computed based on the amount of available memory
73 static int32 sUsedSems = 0;
74 
75 static struct sem_entry *sSems = NULL;
76 static bool sSemsActive = false;
77 static struct sem_entry	*sFreeSemsHead = NULL;
78 static struct sem_entry	*sFreeSemsTail = NULL;
79 
80 static spinlock sem_spinlock = 0;
81 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sem_spinlock)
82 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sem_spinlock)
83 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
84 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
85 
86 static int remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
87 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock);
88 
89 struct sem_timeout_args {
90 	thread_id	blocked_thread;
91 	sem_id		blocked_sem_id;
92 	int32		sem_count;
93 };
94 
95 
96 static int
97 dump_sem_list(int argc, char **argv)
98 {
99 	const char *name = NULL;
100 	team_id owner = -1;
101 #ifdef DEBUG_LAST_ACQUIRER
102 	thread_id last = -1;
103 #endif
104 	int32 i;
105 
106 	if (argc > 2) {
107 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
108 			owner = strtoul(argv[2], NULL, 0);
109 		else if (!strcmp(argv[1], "name"))
110 			name = argv[2];
111 #ifdef DEBUG_LAST_ACQUIRER
112 		else if (!strcmp(argv[1], "last"))
113 			last = strtoul(argv[2], NULL, 0);
114 #endif
115 	} else if (argc > 1)
116 		owner = strtoul(argv[1], NULL, 0);
117 
118 	kprintf("sem            id count   team"
119 #ifdef DEBUG_LAST_ACQUIRER
120 		"   last"
121 #endif
122 		"  name\n");
123 
124 	for (i = 0; i < sMaxSems; i++) {
125 		struct sem_entry *sem = &sSems[i];
126 		if (sem->id < 0
127 #ifdef DEBUG_LAST_ACQUIRER
128 			|| (last != -1 && sem->u.used.last_acquirer != last)
129 #endif
130 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
131 			|| (owner != -1 && sem->u.used.owner != owner))
132 			continue;
133 
134 		kprintf("%p %6ld %5ld %6ld "
135 #ifdef DEBUG_LAST_ACQUIRER
136 			"%6ld "
137 #endif
138 			" %s\n", sem, sem->id, sem->u.used.count,
139 			sem->u.used.owner,
140 #ifdef DEBUG_LAST_ACQUIRER
141 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
142 #endif
143 			sem->u.used.name);
144 	}
145 
146 	return 0;
147 }
148 
149 
150 static void
151 dump_sem(struct sem_entry *sem)
152 {
153 	kprintf("SEM: %p\n", sem);
154 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
155 	if (sem->id >= 0) {
156 		kprintf("name:    '%s'\n", sem->u.used.name);
157 		kprintf("owner:   %ld\n", sem->u.used.owner);
158 		kprintf("count:   %ld\n", sem->u.used.count);
159 		kprintf("queue:  ");
160 		if (sem->u.used.queue.head != NULL) {
161 			struct thread *thread = sem->u.used.queue.head;
162 			while (thread != NULL) {
163 				kprintf(" %ld", thread->id);
164 				thread = thread->queue_next;
165 			}
166 			kprintf("\n");
167 		} else
168 			kprintf(" -\n");
169 
170 		set_debug_variable("_sem", (addr_t)sem);
171 		set_debug_variable("_semID", sem->id);
172 		set_debug_variable("_owner", sem->u.used.owner);
173 
174 #ifdef DEBUG_LAST_ACQUIRER
175 		kprintf("last acquired by: %ld, count: %ld\n", sem->u.used.last_acquirer,
176 			sem->u.used.last_acquire_count);
177 		kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser,
178 			sem->u.used.last_release_count);
179 
180 		if (sem->u.used.last_acquirer != 0)
181 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
182 		else
183 			unset_debug_variable("_acquirer");
184 
185 		if (sem->u.used.last_releaser != 0)
186 			set_debug_variable("_releaser", sem->u.used.last_releaser);
187 		else
188 			unset_debug_variable("_releaser");
189 #endif
190 	} else {
191 		kprintf("next:    %p\n", sem->u.unused.next);
192 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
193 	}
194 }
195 
196 
197 static int
198 dump_sem_info(int argc, char **argv)
199 {
200 	bool found = false;
201 	addr_t num;
202 	int32 i;
203 
204 	if (argc < 2) {
205 		print_debugger_command_usage(argv[0]);
206 		return 0;
207 	}
208 
209 	num = strtoul(argv[1], NULL, 0);
210 
211 	if (IS_KERNEL_ADDRESS(num)) {
212 		dump_sem((struct sem_entry *)num);
213 		return 0;
214 	} else if (num > 0) {
215 		uint32 slot = num % sMaxSems;
216 		if (sSems[slot].id != (int)num) {
217 			kprintf("sem 0x%lx (%ld) doesn't exist!\n", num, num);
218 			return 0;
219 		}
220 
221 		dump_sem(&sSems[slot]);
222 		return 0;
223 	}
224 
225 	// walk through the sem list, trying to match name
226 	for (i = 0; i < sMaxSems; i++) {
227 		if (sSems[i].u.used.name != NULL
228 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
229 			dump_sem(&sSems[i]);
230 			found = true;
231 		}
232 	}
233 
234 	if (!found)
235 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
236 	return 0;
237 }
238 
239 
240 static inline void
241 clear_thread_queue(struct thread_queue *queue)
242 {
243 	queue->head = queue->tail = NULL;
244 }
245 
246 
247 /*!	\brief Appends a semaphore slot to the free list.
248 
249 	The semaphore list must be locked.
250 	The slot's id field is not changed. It should already be set to -1.
251 
252 	\param slot The index of the semaphore slot.
253 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
254 		   is used.
255 */
256 static void
257 free_sem_slot(int slot, sem_id nextID)
258 {
259 	struct sem_entry *sem = sSems + slot;
260 	// set next_id to the next possible value; for sanity check the current ID
261 	if (nextID < 0)
262 		sem->u.unused.next_id = slot;
263 	else
264 		sem->u.unused.next_id = nextID;
265 	// append the entry to the list
266 	if (sFreeSemsTail)
267 		sFreeSemsTail->u.unused.next = sem;
268 	else
269 		sFreeSemsHead = sem;
270 	sFreeSemsTail = sem;
271 	sem->u.unused.next = NULL;
272 }
273 
274 
275 static inline void
276 notify_sem_select_events(struct sem_entry* sem, uint16 events)
277 {
278 	if (sem->u.used.select_infos)
279 		notify_select_events_list(sem->u.used.select_infos, events);
280 }
281 
282 
283 /*! Called from a timer handler. Wakes up a semaphore */
284 static int32
285 sem_timeout(timer *data)
286 {
287 	struct sem_timeout_args *args = (struct sem_timeout_args *)data->entry.prev;
288 	struct thread *thread;
289 	int slot;
290 	int state;
291 	struct thread_queue wakeupQueue;
292 
293 	thread = thread_get_thread_struct(args->blocked_thread);
294 	if (thread == NULL)
295 		return B_HANDLED_INTERRUPT;
296 	slot = args->blocked_sem_id % sMaxSems;
297 
298 	state = disable_interrupts();
299 	GRAB_SEM_LOCK(sSems[slot]);
300 
301 	TRACE(("sem_timeout: called on %p sem %ld, thread %ld\n",
302 		data, args->blocked_sem_id, args->blocked_thread));
303 
304 	if (sSems[slot].id != args->blocked_sem_id) {
305 		// this thread was not waiting on this semaphore
306 		panic("sem_timeout: thread %ld was trying to wait on sem %ld which "
307 			"doesn't exist!\n", args->blocked_thread, args->blocked_sem_id);
308 
309 		RELEASE_SEM_LOCK(sSems[slot]);
310 		restore_interrupts(state);
311 		return B_HANDLED_INTERRUPT;
312 	}
313 
314 	clear_thread_queue(&wakeupQueue);
315 	remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue, B_TIMED_OUT,
316 		false);
317 
318 	RELEASE_SEM_LOCK(sSems[slot]);
319 
320 	GRAB_THREAD_LOCK();
321 	// put the threads in the run q here to make sure we dont deadlock in sem_interrupt_thread
322 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL) {
323 		scheduler_enqueue_in_run_queue(thread);
324 	}
325 	RELEASE_THREAD_LOCK();
326 
327 	restore_interrupts(state);
328 
329 	return B_INVOKE_SCHEDULER;
330 }
331 
332 
333 /*!	Fills the thread_info structure with information from the specified
334 	thread.
335 	The thread lock must be held when called.
336 */
337 static void
338 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size)
339 {
340 	info->sem = sem->id;
341 	info->team = sem->u.used.owner;
342 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
343 	info->count = sem->u.used.count;
344 
345 	// ToDo: not sure if this is the latest holder, or the next
346 	// holder...
347 	if (sem->u.used.queue.head != NULL)
348 		info->latest_holder = sem->u.used.queue.head->id;
349 	else
350 		info->latest_holder = -1;
351 }
352 
353 
354 //	#pragma mark - Private Kernel API
355 
356 
357 status_t
358 sem_init(kernel_args *args)
359 {
360 	area_id area;
361 	int32 i;
362 
363 	TRACE(("sem_init: entry\n"));
364 
365 	// compute maximal number of semaphores depending on the available memory
366 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
367 	// 256 MB -> 32768, 896 kB
368 	// 512 MB -> 65536, 1.75 MB
369 	// 1024 MB and more -> 131072, 3.5 MB
370 	i = vm_page_num_pages() / 2;
371 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
372 		sMaxSems <<= 1;
373 
374 	// create and initialize semaphore table
375 	area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS,
376 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
377 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
378 	if (area < 0)
379 		panic("unable to allocate semaphore table!\n");
380 
381 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
382 	for (i = 0; i < sMaxSems; i++) {
383 		sSems[i].id = -1;
384 		free_sem_slot(i, i);
385 	}
386 
387 	// add debugger commands
388 	add_debugger_command_etc("sems", &dump_sem_list,
389 		"Dump a list of all active semaphores (for team, with name, etc.)",
390 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
391 #ifdef DEBUG_LAST_ACQUIRER
392 			" | (\"last\" <last acquirer>)"
393 #endif
394 		"\n"
395 		"Prints a list of all active semaphores meeting the given\n"
396 		"requirement. If no argument is given, all sems are listed.\n"
397 		"  <team>             - The team owning the semaphores.\n"
398 		"  <name>             - Part of the name of the semaphores.\n"
399 #ifdef DEBUG_LAST_ACQUIRER
400 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
401 #endif
402 		, 0);
403 	add_debugger_command_etc("sem", &dump_sem_info,
404 		"Dump info about a particular semaphore",
405 		"<sem>\n"
406 		"Prints info about the specified semaphore.\n"
407 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
408 		"           of the semaphore to print info for.\n", 0);
409 
410 	TRACE(("sem_init: exit\n"));
411 
412 	sSemsActive = true;
413 
414 	return 0;
415 }
416 
417 
418 /*!	Creates a semaphore with the given parameters.
419 	Note, the team_id is not checked, it must be correct, or else
420 	that semaphore might not be deleted.
421 	This function is only available from within the kernel, and
422 	should not be made public - if possible, we should remove it
423 	completely (and have only create_sem() exported).
424 */
425 sem_id
426 create_sem_etc(int32 count, const char *name, team_id owner)
427 {
428 	struct sem_entry *sem = NULL;
429 	cpu_status state;
430 	sem_id id = B_NO_MORE_SEMS;
431 	char *tempName;
432 	size_t nameLength;
433 
434 	if (sSemsActive == false)
435 		return B_NO_MORE_SEMS;
436 
437 #if 0
438 	// TODO: the code below might cause unwanted deadlocks,
439 	// we need an asynchronously running low resource handler.
440 	if (sUsedSems == sMaxSems) {
441 		// The vnode cache may have collected lots of semaphores.
442 		// Freeing some unused vnodes should improve our situation.
443 		// TODO: maybe create a generic "low resources" handler, instead
444 		//	of only the specialised low memory thing?
445 		vfs_free_unused_vnodes(B_LOW_MEMORY_WARNING);
446 	}
447 	if (sUsedSems == sMaxSems) {
448 		// try again with more enthusiasm
449 		vfs_free_unused_vnodes(B_LOW_MEMORY_CRITICAL);
450 	}
451 #endif
452 	if (sUsedSems == sMaxSems)
453 		return B_NO_MORE_SEMS;
454 
455 	if (name == NULL)
456 		name = "unnamed semaphore";
457 
458 	nameLength = strlen(name) + 1;
459 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
460 	tempName = (char *)malloc(nameLength);
461 	if (tempName == NULL)
462 		return B_NO_MEMORY;
463 	strlcpy(tempName, name, nameLength);
464 
465 	state = disable_interrupts();
466 	GRAB_SEM_LIST_LOCK();
467 
468 	// get the first slot from the free list
469 	sem = sFreeSemsHead;
470 	if (sem) {
471 		// remove it from the free list
472 		sFreeSemsHead = sem->u.unused.next;
473 		if (!sFreeSemsHead)
474 			sFreeSemsTail = NULL;
475 
476 		// init the slot
477 		GRAB_SEM_LOCK(*sem);
478 		sem->id = sem->u.unused.next_id;
479 		sem->u.used.count = count;
480 		clear_thread_queue(&sem->u.used.queue);
481 		sem->u.used.name = tempName;
482 		sem->u.used.owner = owner;
483 		sem->u.used.select_infos = NULL;
484 		id = sem->id;
485 		RELEASE_SEM_LOCK(*sem);
486 
487 		atomic_add(&sUsedSems, 1);
488 	}
489 
490 	RELEASE_SEM_LIST_LOCK();
491 	restore_interrupts(state);
492 
493 	if (!sem)
494 		free(tempName);
495 
496 	return id;
497 }
498 
499 
500 status_t
501 select_sem(int32 id, struct select_info* info, bool kernel)
502 {
503 	cpu_status state;
504 	int32 slot;
505 	status_t error = B_OK;
506 
507 	if (id < 0)
508 		return B_BAD_SEM_ID;
509 
510 	slot = id % sMaxSems;
511 
512 	state = disable_interrupts();
513 	GRAB_SEM_LOCK(sSems[slot]);
514 
515 	if (sSems[slot].id != id) {
516 		// bad sem ID
517 		error = B_BAD_SEM_ID;
518 	} else if (!kernel
519 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
520 		// kernel semaphore, but call from userland
521 		error = B_NOT_ALLOWED;
522 	} else {
523 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
524 
525 		if (info->selected_events != 0) {
526 			info->next = sSems[slot].u.used.select_infos;
527 			sSems[slot].u.used.select_infos = info;
528 
529 			if (sSems[slot].u.used.count > 0)
530 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
531 		}
532 	}
533 
534 	RELEASE_SEM_LOCK(sSems[slot]);
535 	restore_interrupts(state);
536 
537 	return error;
538 }
539 
540 
541 status_t
542 deselect_sem(int32 id, struct select_info* info, bool kernel)
543 {
544 	cpu_status state;
545 	int32 slot;
546 
547 	if (id < 0)
548 		return B_BAD_SEM_ID;
549 
550 	if (info->selected_events == 0)
551 		return B_OK;
552 
553 	slot = id % sMaxSems;
554 
555 	state = disable_interrupts();
556 	GRAB_SEM_LOCK(sSems[slot]);
557 
558 	if (sSems[slot].id == id) {
559 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
560 		while (*infoLocation != NULL && *infoLocation != info)
561 			infoLocation = &(*infoLocation)->next;
562 
563 		if (*infoLocation == info)
564 			*infoLocation = info->next;
565 	}
566 
567 	RELEASE_SEM_LOCK(sSems[slot]);
568 	restore_interrupts(state);
569 
570 	return B_OK;
571 }
572 
573 
574 /*! Wake up a thread that's blocked on a semaphore
575 	this function must be entered with interrupts disabled and THREADLOCK held
576 */
577 status_t
578 sem_interrupt_thread(struct thread *thread)
579 {
580 	struct thread_queue wakeupQueue;
581 	int32 slot;
582 
583 	TRACE(("sem_interrupt_thread: called on thread %p (%ld), blocked on sem %ld\n",
584 		thread, thread->id, thread->sem.blocking));
585 
586 	if (thread->state != B_THREAD_WAITING || thread->sem.blocking < 0)
587 		return B_BAD_VALUE;
588 	if ((thread->sem.flags & B_CAN_INTERRUPT) == 0
589 		&& ((thread->sem.flags & B_KILL_CAN_INTERRUPT) == 0
590 			|| (thread->sig_pending & KILL_SIGNALS) == 0)) {
591 		return B_NOT_ALLOWED;
592 	}
593 
594 	slot = thread->sem.blocking % sMaxSems;
595 
596 	GRAB_SEM_LOCK(sSems[slot]);
597 
598 	if (sSems[slot].id != thread->sem.blocking) {
599 		panic("sem_interrupt_thread: thread %ld blocks on sem %ld, but that "
600 			"sem doesn't exist!\n", thread->id, thread->sem.blocking);
601 	}
602 
603 	clear_thread_queue(&wakeupQueue);
604 	if (remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue,
605 			B_INTERRUPTED, true) != B_OK) {
606 		panic("sem_interrupt_thread: thread %ld not found in sem %ld's wait "
607 			"queue\n", thread->id, thread->sem.blocking);
608 	}
609 
610 	RELEASE_SEM_LOCK(sSems[slot]);
611 
612 	while ((thread = thread_dequeue(&wakeupQueue)) != NULL) {
613 		scheduler_enqueue_in_run_queue(thread);
614 	}
615 
616 	return B_NO_ERROR;
617 }
618 
619 
620 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
621 	other threads in the process. All threads that need to be woken up are added
622 	to the passed in thread_queue.
623 	Must be called with semaphore lock held.
624 */
625 static int
626 remove_thread_from_sem(struct thread *thread, struct sem_entry *sem,
627 	struct thread_queue *queue, status_t acquireStatus, bool hasThreadLock)
628 {
629 	// remove the thread from the queue and place it in the supplied queue
630 	if (thread_dequeue_id(&sem->u.used.queue, thread->id) != thread)
631 		return B_ENTRY_NOT_FOUND;
632 
633 	sem->u.used.count += thread->sem.acquire_count;
634 	thread->state = thread->next_state = B_THREAD_READY;
635 	thread->sem.acquire_status = acquireStatus;
636 	thread_enqueue(thread, queue);
637 
638 	// now see if more threads need to be woken up
639 	while (sem->u.used.count > 0
640 		   && thread_lookat_queue(&sem->u.used.queue) != NULL) {
641 		int32 delta = min_c(thread->sem.count, sem->u.used.count);
642 
643 		thread->sem.count -= delta;
644 		if (thread->sem.count <= 0) {
645 			thread = thread_dequeue(&sem->u.used.queue);
646 			thread->state = thread->next_state = B_THREAD_READY;
647 			thread_enqueue(thread, queue);
648 		}
649 		sem->u.used.count -= delta;
650 	}
651 
652 	if (sem->u.used.count > 0 && sem->u.used.select_infos != NULL) {
653 		if (hasThreadLock)
654 			RELEASE_THREAD_LOCK();
655 
656 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
657 
658 		if (hasThreadLock)
659 			GRAB_THREAD_LOCK();
660 	}
661 
662 	return B_OK;
663 }
664 
665 
666 /*!	This function cycles through the sem table, deleting all the sems
667 	that are owned by the specified team.
668 */
669 int
670 sem_delete_owned_sems(team_id owner)
671 {
672 	int state;
673 	int i;
674 	int count = 0;
675 
676 	// ToDo: that looks horribly inefficient - maybe it would be better
677 	//	to have them in a list in the team
678 
679 	if (owner < 0)
680 		return B_BAD_TEAM_ID;
681 
682 	state = disable_interrupts();
683 	GRAB_SEM_LIST_LOCK();
684 
685 	for (i = 0; i < sMaxSems; i++) {
686 		if (sSems[i].id != -1 && sSems[i].u.used.owner == owner) {
687 			sem_id id = sSems[i].id;
688 
689 			RELEASE_SEM_LIST_LOCK();
690 			restore_interrupts(state);
691 
692 			delete_sem(id);
693 			count++;
694 
695 			state = disable_interrupts();
696 			GRAB_SEM_LIST_LOCK();
697 		}
698 	}
699 
700 	RELEASE_SEM_LIST_LOCK();
701 	restore_interrupts(state);
702 
703 	return count;
704 }
705 
706 
707 int32
708 sem_max_sems(void)
709 {
710 	return sMaxSems;
711 }
712 
713 
714 int32
715 sem_used_sems(void)
716 {
717 	return sUsedSems;
718 }
719 
720 
721 //	#pragma mark - Public Kernel API
722 
723 
724 sem_id
725 create_sem(int32 count, const char *name)
726 {
727 	return create_sem_etc(count, name, team_get_kernel_team_id());
728 }
729 
730 
731 status_t
732 delete_sem(sem_id id)
733 {
734 	struct thread_queue releaseQueue;
735 	int32 releasedThreads;
736 	struct thread *thread;
737 	cpu_status state;
738 	int32 slot;
739 	char *name;
740 
741 	if (sSemsActive == false)
742 		return B_NO_MORE_SEMS;
743 	if (id < 0)
744 		return B_BAD_SEM_ID;
745 
746 	slot = id % sMaxSems;
747 
748 	state = disable_interrupts();
749 	GRAB_SEM_LOCK(sSems[slot]);
750 
751 	if (sSems[slot].id != id) {
752 		RELEASE_SEM_LOCK(sSems[slot]);
753 		restore_interrupts(state);
754 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
755 		return B_BAD_SEM_ID;
756 	}
757 
758 	notify_sem_select_events(&sSems[slot], B_EVENT_INVALID);
759 	sSems[slot].u.used.select_infos = NULL;
760 
761 	releasedThreads = 0;
762 	clear_thread_queue(&releaseQueue);
763 
764 	// free any threads waiting for this semaphore
765 	while ((thread = thread_dequeue(&sSems[slot].u.used.queue)) != NULL) {
766 		thread->state = B_THREAD_READY;
767 		thread->sem.acquire_status = B_BAD_SEM_ID;
768 		thread->sem.count = 0;
769 		thread_enqueue(thread, &releaseQueue);
770 		releasedThreads++;
771 	}
772 
773 	sSems[slot].id = -1;
774 	name = sSems[slot].u.used.name;
775 	sSems[slot].u.used.name = NULL;
776 
777 	RELEASE_SEM_LOCK(sSems[slot]);
778 
779 	// append slot to the free list
780 	GRAB_SEM_LIST_LOCK();
781 	free_sem_slot(slot, id + sMaxSems);
782 	atomic_add(&sUsedSems, -1);
783 	RELEASE_SEM_LIST_LOCK();
784 
785 	if (releasedThreads > 0) {
786 		GRAB_THREAD_LOCK();
787 		while ((thread = thread_dequeue(&releaseQueue)) != NULL) {
788 			scheduler_enqueue_in_run_queue(thread);
789 		}
790 		scheduler_reschedule();
791 		RELEASE_THREAD_LOCK();
792 	}
793 
794 	restore_interrupts(state);
795 
796 	free(name);
797 
798 	return B_OK;
799 }
800 
801 
802 status_t
803 acquire_sem(sem_id id)
804 {
805 	return switch_sem_etc(-1, id, 1, 0, 0);
806 }
807 
808 
809 status_t
810 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
811 {
812 	return switch_sem_etc(-1, id, count, flags, timeout);
813 }
814 
815 
816 status_t
817 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
818 {
819 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
820 }
821 
822 
823 status_t
824 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
825 	uint32 flags, bigtime_t timeout)
826 {
827 	int slot = id % sMaxSems;
828 	int state;
829 	status_t status = B_OK;
830 
831 	if (kernel_startup)
832 		return B_OK;
833 	if (sSemsActive == false)
834 		return B_NO_MORE_SEMS;
835 
836 	if (!are_interrupts_enabled()) {
837 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
838 			id);
839 	}
840 
841 	if (id < 0)
842 		return B_BAD_SEM_ID;
843 	if (count <= 0
844 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
845 		return B_BAD_VALUE;
846 	}
847 
848 	state = disable_interrupts();
849 	GRAB_SEM_LOCK(sSems[slot]);
850 
851 	if (sSems[slot].id != id) {
852 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
853 		status = B_BAD_SEM_ID;
854 		goto err;
855 	}
856 
857 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
858 	//	doesn't have any use outside the kernel
859 	if ((flags & B_CHECK_PERMISSION) != 0
860 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
861 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
862 			thread_get_current_thread_id(), id);
863 		status = B_NOT_ALLOWED;
864 		goto err;
865 	}
866 
867 	if (sSems[slot].u.used.count - count < 0 && (flags & B_RELATIVE_TIMEOUT) != 0
868 		&& timeout <= 0) {
869 		// immediate timeout
870 		status = B_WOULD_BLOCK;
871 		goto err;
872 	}
873 
874 	if ((sSems[slot].u.used.count -= count) < 0) {
875 		// we need to block
876 		struct thread *thread = thread_get_current_thread();
877 		timer timeout_timer; // stick it on the stack, since we may be blocking here
878 		struct sem_timeout_args args;
879 
880 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
881 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
882 
883 		// do a quick check to see if the thread has any pending signals
884 		// this should catch most of the cases where the thread had a signal
885 		if (((flags & B_CAN_INTERRUPT) && (thread->sig_pending & ~thread->sig_block_mask) != 0)
886 			|| ((flags & B_KILL_CAN_INTERRUPT)
887 				&& (thread->sig_pending & KILL_SIGNALS))) {
888 			sSems[slot].u.used.count += count;
889 			status = B_INTERRUPTED;
890 			goto err;
891 		}
892 
893 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
894 			timeout = B_INFINITE_TIMEOUT;
895 
896 		thread->next_state = B_THREAD_WAITING;
897 		thread->sem.flags = flags;
898 		thread->sem.blocking = id;
899 		thread->sem.acquire_count = count;
900 		thread->sem.count = min_c(-sSems[slot].u.used.count, count);
901 			// store the count we need to restore upon release
902 		thread->sem.acquire_status = B_NO_ERROR;
903 		thread_enqueue(thread, &sSems[slot].u.used.queue);
904 
905 		if (timeout != B_INFINITE_TIMEOUT) {
906 			TRACE(("switch_sem_etc: setting timeout sem for %Ld usecs, sem %ld, thread %ld\n",
907 				timeout, id, thread->id));
908 
909 			// set up an event to go off with the thread struct as the data
910 			args.blocked_sem_id = id;
911 			args.blocked_thread = thread->id;
912 			args.sem_count = count;
913 
914 			// ToDo: another evil hack: pass the args into timer->entry.prev
915 			timeout_timer.entry.prev = (qent *)&args;
916 			add_timer(&timeout_timer, &sem_timeout, timeout,
917 				flags & B_RELATIVE_TIMEOUT ?
918 					B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER);
919 		}
920 
921 		RELEASE_SEM_LOCK(sSems[slot]);
922 
923 		if (semToBeReleased >= B_OK)
924 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
925 
926 		GRAB_THREAD_LOCK();
927 		// check again to see if a signal is pending.
928 		// it may have been delivered while setting up the sem, though it's pretty unlikely
929 		if (((flags & B_CAN_INTERRUPT)
930 				&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
931 			|| ((flags & B_KILL_CAN_INTERRUPT)
932 				&& (thread->sig_pending & KILL_SIGNALS))) {
933 			struct thread_queue wakeupQueue;
934 			// ok, so a tiny race happened where a signal was delivered to this thread while
935 			// it was setting up the sem. We can only be sure a signal wasn't delivered
936 			// here, since the threadlock is held. The previous check would have found most
937 			// instances, but there was a race, so we have to handle it. It'll be more messy...
938 			clear_thread_queue(&wakeupQueue);
939 			GRAB_SEM_LOCK(sSems[slot]);
940 			if (sSems[slot].id == id) {
941 				remove_thread_from_sem(thread, &sSems[slot], &wakeupQueue,
942 					B_INTERRUPTED, true);
943 			}
944 			RELEASE_SEM_LOCK(sSems[slot]);
945 			while ((thread = thread_dequeue(&wakeupQueue)) != NULL) {
946 				scheduler_enqueue_in_run_queue(thread);
947 			}
948 			// fall through and reschedule since another thread with a higher priority may have been woken up
949 		}
950 		scheduler_reschedule();
951 		RELEASE_THREAD_LOCK();
952 
953 		if (timeout != B_INFINITE_TIMEOUT) {
954 			if (thread->sem.acquire_status != B_TIMED_OUT) {
955 				// cancel the timer event, the sem may have been deleted or interrupted
956 				// with the timer still active
957 				cancel_timer(&timeout_timer);
958 			}
959 		}
960 
961 #ifdef DEBUG_LAST_ACQUIRER
962 		if (thread->sem.acquire_status >= B_OK) {
963 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
964 			sSems[slot].u.used.last_acquire_count = count;
965 		}
966 #endif
967 
968 		restore_interrupts(state);
969 
970 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
971 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
972 			thread->name));
973 		return thread->sem.acquire_status;
974 	} else {
975 #ifdef DEBUG_LAST_ACQUIRER
976 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
977 		sSems[slot].u.used.last_acquire_count = count;
978 #endif
979 	}
980 
981 err:
982 	RELEASE_SEM_LOCK(sSems[slot]);
983 	restore_interrupts(state);
984 
985 #if 0
986 	if (status == B_NOT_ALLOWED)
987 	_user_debugger("Thread tried to acquire kernel semaphore.");
988 #endif
989 
990 	return status;
991 }
992 
993 
994 status_t
995 release_sem(sem_id id)
996 {
997 	return release_sem_etc(id, 1, 0);
998 }
999 
1000 
1001 status_t
1002 release_sem_etc(sem_id id, int32 count, uint32 flags)
1003 {
1004 	struct thread_queue releaseQueue;
1005 	int32 slot = id % sMaxSems;
1006 	cpu_status state;
1007 	status_t status = B_OK;
1008 
1009 	if (kernel_startup)
1010 		return B_OK;
1011 	if (sSemsActive == false)
1012 		return B_NO_MORE_SEMS;
1013 	if (id < 0)
1014 		return B_BAD_SEM_ID;
1015 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
1016 		return B_BAD_VALUE;
1017 
1018 	state = disable_interrupts();
1019 	GRAB_SEM_LOCK(sSems[slot]);
1020 
1021 	if (sSems[slot].id != id) {
1022 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
1023 		status = B_BAD_SEM_ID;
1024 		goto err;
1025 	}
1026 
1027 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
1028 	//	doesn't have any use outside the kernel
1029 	if ((flags & B_CHECK_PERMISSION) != 0
1030 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
1031 		dprintf("thread %ld tried to release kernel semaphore.\n",
1032 			thread_get_current_thread_id());
1033 		status = B_NOT_ALLOWED;
1034 		goto err;
1035 	}
1036 
1037 #ifdef DEBUG_LAST_ACQUIRER
1038 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
1039 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
1040 	sSems[slot].u.used.last_release_count = count;
1041 #endif
1042 
1043 	// clear out a queue we will use to hold all of the threads that we will have to
1044 	// put back into the run list. This is done so the thread lock wont be held
1045 	// while this sems lock is held since the two locks are grabbed in the other
1046 	// order in sem_interrupt_thread.
1047 	clear_thread_queue(&releaseQueue);
1048 
1049 	if (flags & B_RELEASE_ALL) {
1050 		count = -sSems[slot].u.used.count;
1051 
1052 		// is there anything to do for us at all?
1053 		if (count == 0)
1054 			goto err;
1055 	}
1056 
1057 	while (count > 0) {
1058 		int delta = count;
1059 		if (sSems[slot].u.used.count < 0) {
1060 			struct thread *thread = thread_lookat_queue(&sSems[slot].u.used.queue);
1061 
1062 			delta = min_c(count, thread->sem.count);
1063 			thread->sem.count -= delta;
1064 			if (thread->sem.count <= 0) {
1065 				// release this thread
1066 				thread = thread_dequeue(&sSems[slot].u.used.queue);
1067 				thread_enqueue(thread, &releaseQueue);
1068 				thread->state = B_THREAD_READY;
1069 				thread->sem.count = 0;
1070 			}
1071 		} else if (flags & B_RELEASE_IF_WAITING_ONLY)
1072 			break;
1073 
1074 		sSems[slot].u.used.count += delta;
1075 		count -= delta;
1076 	}
1077 
1078 	if (sSems[slot].u.used.count > 0)
1079 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1080 
1081 	RELEASE_SEM_LOCK(sSems[slot]);
1082 
1083 	// pull off any items in the release queue and put them in the run queue
1084 	if (releaseQueue.head != NULL) {
1085 		struct thread *thread;
1086 
1087 		GRAB_THREAD_LOCK();
1088 		while ((thread = thread_dequeue(&releaseQueue)) != NULL) {
1089 #if 0
1090 			// temporarily place thread in a run queue with a higher priority to boost it up
1091 			thread->next_priority = thread->priority >= B_FIRST_REAL_TIME_PRIORITY ?
1092 				thread->priority : thread->priority + 1;
1093 #endif
1094 			scheduler_enqueue_in_run_queue(thread);
1095 		}
1096 		if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1097 			scheduler_reschedule();
1098 
1099 		RELEASE_THREAD_LOCK();
1100 	}
1101 	goto outnolock;
1102 
1103 err:
1104 	RELEASE_SEM_LOCK(sSems[slot]);
1105 outnolock:
1106 	restore_interrupts(state);
1107 
1108 	return status;
1109 }
1110 
1111 
1112 status_t
1113 get_sem_count(sem_id id, int32 *_count)
1114 {
1115 	int slot;
1116 	int state;
1117 
1118 	if (sSemsActive == false)
1119 		return B_NO_MORE_SEMS;
1120 	if (id < 0)
1121 		return B_BAD_SEM_ID;
1122 	if (_count == NULL)
1123 		return B_BAD_VALUE;
1124 
1125 	slot = id % sMaxSems;
1126 
1127 	state = disable_interrupts();
1128 	GRAB_SEM_LOCK(sSems[slot]);
1129 
1130 	if (sSems[slot].id != id) {
1131 		RELEASE_SEM_LOCK(sSems[slot]);
1132 		restore_interrupts(state);
1133 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1134 		return B_BAD_SEM_ID;
1135 	}
1136 
1137 	*_count = sSems[slot].u.used.count;
1138 
1139 	RELEASE_SEM_LOCK(sSems[slot]);
1140 	restore_interrupts(state);
1141 
1142 	return B_OK;
1143 }
1144 
1145 
1146 /*!	Called by the get_sem_info() macro. */
1147 status_t
1148 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1149 {
1150 	status_t status = B_OK;
1151 	int state;
1152 	int slot;
1153 
1154 	if (!sSemsActive)
1155 		return B_NO_MORE_SEMS;
1156 	if (id < 0)
1157 		return B_BAD_SEM_ID;
1158 	if (info == NULL || size != sizeof(sem_info))
1159 		return B_BAD_VALUE;
1160 
1161 	slot = id % sMaxSems;
1162 
1163 	state = disable_interrupts();
1164 	GRAB_SEM_LOCK(sSems[slot]);
1165 
1166 	if (sSems[slot].id != id) {
1167 		status = B_BAD_SEM_ID;
1168 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1169 	} else
1170 		fill_sem_info(&sSems[slot], info, size);
1171 
1172 	RELEASE_SEM_LOCK(sSems[slot]);
1173 	restore_interrupts(state);
1174 
1175 	return status;
1176 }
1177 
1178 
1179 /*!	Called by the get_next_sem_info() macro. */
1180 status_t
1181 _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info,
1182 	size_t size)
1183 {
1184 	int state;
1185 	int slot;
1186 	bool found = false;
1187 
1188 	if (!sSemsActive)
1189 		return B_NO_MORE_SEMS;
1190 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1191 		return B_BAD_VALUE;
1192 
1193 	if (team == B_CURRENT_TEAM)
1194 		team = team_get_current_team_id();
1195 	/* prevents sSems[].owner == -1 >= means owned by a port */
1196 	if (team < 0 || !team_is_valid(team))
1197 		return B_BAD_TEAM_ID;
1198 
1199 	slot = *_cookie;
1200 	if (slot >= sMaxSems)
1201 		return B_BAD_VALUE;
1202 
1203 	state = disable_interrupts();
1204 	GRAB_SEM_LIST_LOCK();
1205 
1206 	while (slot < sMaxSems) {
1207 		if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1208 			GRAB_SEM_LOCK(sSems[slot]);
1209 			if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1210 				// found one!
1211 				fill_sem_info(&sSems[slot], info, size);
1212 
1213 				RELEASE_SEM_LOCK(sSems[slot]);
1214 				slot++;
1215 				found = true;
1216 				break;
1217 			}
1218 			RELEASE_SEM_LOCK(sSems[slot]);
1219 		}
1220 		slot++;
1221 	}
1222 	RELEASE_SEM_LIST_LOCK();
1223 	restore_interrupts(state);
1224 
1225 	if (!found)
1226 		return B_BAD_VALUE;
1227 
1228 	*_cookie = slot;
1229 	return B_OK;
1230 }
1231 
1232 
1233 status_t
1234 set_sem_owner(sem_id id, team_id team)
1235 {
1236 	int state;
1237 	int slot;
1238 
1239 	if (sSemsActive == false)
1240 		return B_NO_MORE_SEMS;
1241 	if (id < 0)
1242 		return B_BAD_SEM_ID;
1243 	if (team < 0 || !team_is_valid(team))
1244 		return B_BAD_TEAM_ID;
1245 
1246 	slot = id % sMaxSems;
1247 
1248 	state = disable_interrupts();
1249 	GRAB_SEM_LOCK(sSems[slot]);
1250 
1251 	if (sSems[slot].id != id) {
1252 		RELEASE_SEM_LOCK(sSems[slot]);
1253 		restore_interrupts(state);
1254 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1255 		return B_BAD_SEM_ID;
1256 	}
1257 
1258 	// ToDo: this is a small race condition: the team ID could already
1259 	// be invalid at this point - we would lose one semaphore slot in
1260 	// this case!
1261 	// The only safe way to do this is to prevent either team (the new
1262 	// or the old owner) from dying until we leave the spinlock.
1263 	sSems[slot].u.used.owner = team;
1264 
1265 	RELEASE_SEM_LOCK(sSems[slot]);
1266 	restore_interrupts(state);
1267 
1268 	return B_NO_ERROR;
1269 }
1270 
1271 
1272 //	#pragma mark - Syscalls
1273 
1274 
1275 sem_id
1276 _user_create_sem(int32 count, const char *userName)
1277 {
1278 	char name[B_OS_NAME_LENGTH];
1279 
1280 	if (userName == NULL)
1281 		return create_sem_etc(count, NULL, team_get_current_team_id());
1282 
1283 	if (!IS_USER_ADDRESS(userName)
1284 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1285 		return B_BAD_ADDRESS;
1286 
1287 	return create_sem_etc(count, name, team_get_current_team_id());
1288 }
1289 
1290 
1291 status_t
1292 _user_delete_sem(sem_id id)
1293 {
1294 	return delete_sem(id);
1295 }
1296 
1297 
1298 status_t
1299 _user_acquire_sem(sem_id id)
1300 {
1301 	return switch_sem_etc(-1, id, 1, B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1302 }
1303 
1304 
1305 status_t
1306 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1307 {
1308 	return switch_sem_etc(-1, id, count, flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1309 }
1310 
1311 
1312 status_t
1313 _user_switch_sem(sem_id releaseSem, sem_id id)
1314 {
1315 	return switch_sem_etc(releaseSem, id, 1, B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1316 }
1317 
1318 
1319 status_t
1320 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1321 {
1322 	return switch_sem_etc(releaseSem, id, count, flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1323 }
1324 
1325 
1326 status_t
1327 _user_release_sem(sem_id id)
1328 {
1329 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1330 }
1331 
1332 
1333 status_t
1334 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1335 {
1336 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1337 }
1338 
1339 
1340 status_t
1341 _user_get_sem_count(sem_id id, int32 *userCount)
1342 {
1343 	status_t status;
1344 	int32 count;
1345 
1346 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1347 		return B_BAD_ADDRESS;
1348 
1349 	status = get_sem_count(id, &count);
1350 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1351 		return B_BAD_ADDRESS;
1352 
1353 	return status;
1354 }
1355 
1356 
1357 status_t
1358 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1359 {
1360 	struct sem_info info;
1361 	status_t status;
1362 
1363 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1364 		return B_BAD_ADDRESS;
1365 
1366 	status = _get_sem_info(id, &info, size);
1367 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1368 		return B_BAD_ADDRESS;
1369 
1370 	return status;
1371 }
1372 
1373 
1374 status_t
1375 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1376 	size_t size)
1377 {
1378 	struct sem_info info;
1379 	int32 cookie;
1380 	status_t status;
1381 
1382 	if (userCookie == NULL || userInfo == NULL
1383 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1384 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1385 		return B_BAD_ADDRESS;
1386 
1387 	status = _get_next_sem_info(team, &cookie, &info, size);
1388 
1389 	if (status == B_OK) {
1390 		if (user_memcpy(userInfo, &info, size) < B_OK
1391 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1392 			return B_BAD_ADDRESS;
1393 	}
1394 
1395 	return status;
1396 }
1397 
1398 
1399 status_t
1400 _user_set_sem_owner(sem_id id, team_id team)
1401 {
1402 	return set_sem_owner(id, team);
1403 }
1404