xref: /haiku/src/system/kernel/sem.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*! Semaphore code */
11 
12 
13 #include <OS.h>
14 
15 #include <sem.h>
16 #include <kernel.h>
17 #include <kscheduler.h>
18 #include <ksignal.h>
19 #include <smp.h>
20 #include <int.h>
21 #include <arch/int.h>
22 #include <debug.h>
23 #include <listeners.h>
24 #include <scheduling_analysis.h>
25 #include <thread.h>
26 #include <team.h>
27 #include <util/AutoLock.h>
28 #include <util/DoublyLinkedList.h>
29 #include <vfs.h>
30 #include <vm_page.h>
31 #include <boot/kernel_args.h>
32 #include <syscall_restart.h>
33 #include <wait_for_objects.h>
34 
35 #include <string.h>
36 #include <stdlib.h>
37 
38 #include "kernel_debug_config.h"
39 
40 
41 //#define TRACE_SEM
42 #ifdef TRACE_SEM
43 #	define TRACE(x) dprintf_no_syslog x
44 #else
45 #	define TRACE(x) ;
46 #endif
47 
48 //#define KTRACE_SEM
49 #ifdef KTRACE_SEM
50 #	define KTRACE(x...) ktrace_printf(x)
51 #else
52 #	define KTRACE(x...) do {} while (false)
53 #endif
54 
55 
56 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
57 	queued_thread(struct thread *thread, int32 count)
58 		:
59 		thread(thread),
60 		count(count),
61 		queued(false)
62 	{
63 	}
64 
65 	struct thread	*thread;
66 	int32			count;
67 	bool			queued;
68 };
69 
70 typedef DoublyLinkedList<queued_thread> ThreadQueue;
71 
72 struct sem_entry {
73 	sem_id		id;
74 	spinlock	lock;	// protects only the id field when unused
75 	union {
76 		// when slot in use
77 		struct {
78 			int32				count;
79 			int32				net_count;
80 									// count + acquisition count of all blocked
81 									// threads
82 			char				*name;
83 			team_id				owner;	// if set to -1, means owned by a port
84 			select_info			*select_infos;
85 			thread_id			last_acquirer;
86 #if DEBUG_SEM_LAST_ACQUIRER
87 			int32				last_acquire_count;
88 			thread_id			last_releaser;
89 			int32				last_release_count;
90 #endif
91 		} used;
92 
93 		// when slot unused
94 		struct {
95 			sem_id				next_id;
96 			struct sem_entry	*next;
97 		} unused;
98 	} u;
99 
100 	ThreadQueue			queue;	// should be in u.used, but has a constructor
101 };
102 
103 static const int32 kMaxSemaphores = 131072;
104 static int32 sMaxSems = 4096;
105 	// Final value is computed based on the amount of available memory
106 static int32 sUsedSems = 0;
107 
108 static struct sem_entry *sSems = NULL;
109 static bool sSemsActive = false;
110 static struct sem_entry	*sFreeSemsHead = NULL;
111 static struct sem_entry	*sFreeSemsTail = NULL;
112 
113 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
114 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sSemsSpinlock)
115 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sSemsSpinlock)
116 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
117 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
118 
119 
120 static int
121 dump_sem_list(int argc, char **argv)
122 {
123 	const char *name = NULL;
124 	team_id owner = -1;
125 	thread_id last = -1;
126 	int32 i;
127 
128 	if (argc > 2) {
129 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
130 			owner = strtoul(argv[2], NULL, 0);
131 		else if (!strcmp(argv[1], "name"))
132 			name = argv[2];
133 		else if (!strcmp(argv[1], "last"))
134 			last = strtoul(argv[2], NULL, 0);
135 	} else if (argc > 1)
136 		owner = strtoul(argv[1], NULL, 0);
137 
138 	kprintf("sem            id count   team   last  name\n");
139 
140 	for (i = 0; i < sMaxSems; i++) {
141 		struct sem_entry *sem = &sSems[i];
142 		if (sem->id < 0
143 			|| (last != -1 && sem->u.used.last_acquirer != last)
144 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
145 			|| (owner != -1 && sem->u.used.owner != owner))
146 			continue;
147 
148 		kprintf("%p %6ld %5ld %6ld "
149 			"%6ld "
150 			" %s\n", sem, sem->id, sem->u.used.count,
151 			sem->u.used.owner,
152 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
153 			sem->u.used.name);
154 	}
155 
156 	return 0;
157 }
158 
159 
160 static void
161 dump_sem(struct sem_entry *sem)
162 {
163 	kprintf("SEM: %p\n", sem);
164 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
165 	if (sem->id >= 0) {
166 		kprintf("name:    '%s'\n", sem->u.used.name);
167 		kprintf("owner:   %ld\n", sem->u.used.owner);
168 		kprintf("count:   %ld\n", sem->u.used.count);
169 		kprintf("queue:  ");
170 		if (!sem->queue.IsEmpty()) {
171 			ThreadQueue::Iterator it = sem->queue.GetIterator();
172 			while (queued_thread* entry = it.Next())
173 				kprintf(" %ld", entry->thread->id);
174 			kprintf("\n");
175 		} else
176 			kprintf(" -\n");
177 
178 		set_debug_variable("_sem", (addr_t)sem);
179 		set_debug_variable("_semID", sem->id);
180 		set_debug_variable("_owner", sem->u.used.owner);
181 
182 #if DEBUG_SEM_LAST_ACQUIRER
183 		kprintf("last acquired by: %ld, count: %ld\n", sem->u.used.last_acquirer,
184 			sem->u.used.last_acquire_count);
185 		kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser,
186 			sem->u.used.last_release_count);
187 
188 		if (sem->u.used.last_releaser != 0)
189 			set_debug_variable("_releaser", sem->u.used.last_releaser);
190 		else
191 			unset_debug_variable("_releaser");
192 #else
193 		kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer);
194 #endif
195 
196 		if (sem->u.used.last_acquirer != 0)
197 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
198 		else
199 			unset_debug_variable("_acquirer");
200 
201 	} else {
202 		kprintf("next:    %p\n", sem->u.unused.next);
203 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
204 	}
205 }
206 
207 
208 static int
209 dump_sem_info(int argc, char **argv)
210 {
211 	bool found = false;
212 	addr_t num;
213 	int32 i;
214 
215 	if (argc < 2) {
216 		print_debugger_command_usage(argv[0]);
217 		return 0;
218 	}
219 
220 	num = strtoul(argv[1], NULL, 0);
221 
222 	if (IS_KERNEL_ADDRESS(num)) {
223 		dump_sem((struct sem_entry *)num);
224 		return 0;
225 	} else if (num >= 0) {
226 		uint32 slot = num % sMaxSems;
227 		if (sSems[slot].id != (int)num) {
228 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
229 			return 0;
230 		}
231 
232 		dump_sem(&sSems[slot]);
233 		return 0;
234 	}
235 
236 	// walk through the sem list, trying to match name
237 	for (i = 0; i < sMaxSems; i++) {
238 		if (sSems[i].u.used.name != NULL
239 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
240 			dump_sem(&sSems[i]);
241 			found = true;
242 		}
243 	}
244 
245 	if (!found)
246 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
247 	return 0;
248 }
249 
250 
251 /*!	\brief Appends a semaphore slot to the free list.
252 
253 	The semaphore list must be locked.
254 	The slot's id field is not changed. It should already be set to -1.
255 
256 	\param slot The index of the semaphore slot.
257 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
258 		   is used.
259 */
260 static void
261 free_sem_slot(int slot, sem_id nextID)
262 {
263 	struct sem_entry *sem = sSems + slot;
264 	// set next_id to the next possible value; for sanity check the current ID
265 	if (nextID < 0)
266 		sem->u.unused.next_id = slot;
267 	else
268 		sem->u.unused.next_id = nextID;
269 	// append the entry to the list
270 	if (sFreeSemsTail)
271 		sFreeSemsTail->u.unused.next = sem;
272 	else
273 		sFreeSemsHead = sem;
274 	sFreeSemsTail = sem;
275 	sem->u.unused.next = NULL;
276 }
277 
278 
279 static inline void
280 notify_sem_select_events(struct sem_entry* sem, uint16 events)
281 {
282 	if (sem->u.used.select_infos)
283 		notify_select_events_list(sem->u.used.select_infos, events);
284 }
285 
286 
287 /*!	Fills the thread_info structure with information from the specified
288 	thread.
289 	The thread lock must be held when called.
290 */
291 static void
292 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size)
293 {
294 	info->sem = sem->id;
295 	info->team = sem->u.used.owner;
296 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
297 	info->count = sem->u.used.count;
298 	info->latest_holder = sem->u.used.last_acquirer;
299 }
300 
301 
302 static status_t
303 delete_sem_internal(sem_id id, bool checkPermission)
304 {
305 	if (sSemsActive == false)
306 		return B_NO_MORE_SEMS;
307 	if (id < 0)
308 		return B_BAD_SEM_ID;
309 
310 	int32 slot = id % sMaxSems;
311 
312 	cpu_status state = disable_interrupts();
313 	GRAB_SEM_LOCK(sSems[slot]);
314 
315 	if (sSems[slot].id != id) {
316 		RELEASE_SEM_LOCK(sSems[slot]);
317 		restore_interrupts(state);
318 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
319 		return B_BAD_SEM_ID;
320 	}
321 
322 	if (checkPermission
323 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
324 		RELEASE_SEM_LOCK(sSems[slot]);
325 		restore_interrupts(state);
326 		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
327 			thread_get_current_thread_id(), id);
328 		return B_NOT_ALLOWED;
329 	}
330 
331 	KTRACE("delete_sem(sem: %ld)", id);
332 
333 	notify_sem_select_events(&sSems[slot], B_EVENT_INVALID);
334 	sSems[slot].u.used.select_infos = NULL;
335 
336 	// free any threads waiting for this semaphore
337 	GRAB_THREAD_LOCK();
338 	while (queued_thread* entry = sSems[slot].queue.RemoveHead()) {
339 		entry->queued = false;
340 		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
341 	}
342 	RELEASE_THREAD_LOCK();
343 
344 	sSems[slot].id = -1;
345 	char *name = sSems[slot].u.used.name;
346 	sSems[slot].u.used.name = NULL;
347 
348 	RELEASE_SEM_LOCK(sSems[slot]);
349 
350 	// append slot to the free list
351 	GRAB_SEM_LIST_LOCK();
352 	free_sem_slot(slot, id + sMaxSems);
353 	atomic_add(&sUsedSems, -1);
354 	RELEASE_SEM_LIST_LOCK();
355 
356 	restore_interrupts(state);
357 
358 	free(name);
359 
360 	return B_OK;
361 }
362 
363 
364 //	#pragma mark - Private Kernel API
365 
366 
367 status_t
368 haiku_sem_init(kernel_args *args)
369 {
370 	area_id area;
371 	int32 i;
372 
373 	TRACE(("sem_init: entry\n"));
374 
375 	// compute maximal number of semaphores depending on the available memory
376 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
377 	// 256 MB -> 32768, 896 kB
378 	// 512 MB -> 65536, 1.75 MB
379 	// 1024 MB and more -> 131072, 3.5 MB
380 	i = vm_page_num_pages() / 2;
381 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
382 		sMaxSems <<= 1;
383 
384 	// create and initialize semaphore table
385 	area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS,
386 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
387 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
388 	if (area < 0)
389 		panic("unable to allocate semaphore table!\n");
390 
391 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
392 	for (i = 0; i < sMaxSems; i++) {
393 		sSems[i].id = -1;
394 		free_sem_slot(i, i);
395 	}
396 
397 	// add debugger commands
398 	add_debugger_command_etc("sems", &dump_sem_list,
399 		"Dump a list of all active semaphores (for team, with name, etc.)",
400 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
401 			" | (\"last\" <last acquirer>)\n"
402 		"Prints a list of all active semaphores meeting the given\n"
403 		"requirement. If no argument is given, all sems are listed.\n"
404 		"  <team>             - The team owning the semaphores.\n"
405 		"  <name>             - Part of the name of the semaphores.\n"
406 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
407 		, 0);
408 	add_debugger_command_etc("sem", &dump_sem_info,
409 		"Dump info about a particular semaphore",
410 		"<sem>\n"
411 		"Prints info about the specified semaphore.\n"
412 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
413 		"           of the semaphore to print info for.\n", 0);
414 
415 	TRACE(("sem_init: exit\n"));
416 
417 	sSemsActive = true;
418 
419 	return 0;
420 }
421 
422 
423 /*!	Creates a semaphore with the given parameters.
424 	Note, the team_id is not checked, it must be correct, or else
425 	that semaphore might not be deleted.
426 	This function is only available from within the kernel, and
427 	should not be made public - if possible, we should remove it
428 	completely (and have only create_sem() exported).
429 */
430 sem_id
431 create_sem_etc(int32 count, const char *name, team_id owner)
432 {
433 	struct sem_entry *sem = NULL;
434 	cpu_status state;
435 	sem_id id = B_NO_MORE_SEMS;
436 	char *tempName;
437 	size_t nameLength;
438 
439 	if (sSemsActive == false)
440 		return B_NO_MORE_SEMS;
441 
442 #if 0
443 	// TODO: the code below might cause unwanted deadlocks,
444 	// we need an asynchronously running low resource handler.
445 	if (sUsedSems == sMaxSems) {
446 		// The vnode cache may have collected lots of semaphores.
447 		// Freeing some unused vnodes should improve our situation.
448 		// TODO: maybe create a generic "low resources" handler, instead
449 		//	of only the specialised low memory thing?
450 		vfs_free_unused_vnodes(B_LOW_MEMORY_WARNING);
451 	}
452 	if (sUsedSems == sMaxSems) {
453 		// try again with more enthusiasm
454 		vfs_free_unused_vnodes(B_LOW_MEMORY_CRITICAL);
455 	}
456 #endif
457 	if (sUsedSems == sMaxSems)
458 		return B_NO_MORE_SEMS;
459 
460 	if (name == NULL)
461 		name = "unnamed semaphore";
462 
463 	nameLength = strlen(name) + 1;
464 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
465 	tempName = (char *)malloc(nameLength);
466 	if (tempName == NULL)
467 		return B_NO_MEMORY;
468 	strlcpy(tempName, name, nameLength);
469 
470 	state = disable_interrupts();
471 	GRAB_SEM_LIST_LOCK();
472 
473 	// get the first slot from the free list
474 	sem = sFreeSemsHead;
475 	if (sem) {
476 		// remove it from the free list
477 		sFreeSemsHead = sem->u.unused.next;
478 		if (!sFreeSemsHead)
479 			sFreeSemsTail = NULL;
480 
481 		// init the slot
482 		GRAB_SEM_LOCK(*sem);
483 		sem->id = sem->u.unused.next_id;
484 		sem->u.used.count = count;
485 		sem->u.used.net_count = count;
486 		new(&sem->queue) ThreadQueue;
487 		sem->u.used.name = tempName;
488 		sem->u.used.owner = owner;
489 		sem->u.used.select_infos = NULL;
490 		id = sem->id;
491 		RELEASE_SEM_LOCK(*sem);
492 
493 		atomic_add(&sUsedSems, 1);
494 
495 		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
496 			count, name, owner, id);
497 
498 		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
499 		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
500 			name);
501 	}
502 
503 	RELEASE_SEM_LIST_LOCK();
504 	restore_interrupts(state);
505 
506 	if (!sem)
507 		free(tempName);
508 
509 	return id;
510 }
511 
512 
513 status_t
514 select_sem(int32 id, struct select_info* info, bool kernel)
515 {
516 	cpu_status state;
517 	int32 slot;
518 	status_t error = B_OK;
519 
520 	if (id < 0)
521 		return B_BAD_SEM_ID;
522 
523 	slot = id % sMaxSems;
524 
525 	state = disable_interrupts();
526 	GRAB_SEM_LOCK(sSems[slot]);
527 
528 	if (sSems[slot].id != id) {
529 		// bad sem ID
530 		error = B_BAD_SEM_ID;
531 	} else if (!kernel
532 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
533 		// kernel semaphore, but call from userland
534 		error = B_NOT_ALLOWED;
535 	} else {
536 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
537 
538 		if (info->selected_events != 0) {
539 			info->next = sSems[slot].u.used.select_infos;
540 			sSems[slot].u.used.select_infos = info;
541 
542 			if (sSems[slot].u.used.count > 0)
543 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
544 		}
545 	}
546 
547 	RELEASE_SEM_LOCK(sSems[slot]);
548 	restore_interrupts(state);
549 
550 	return error;
551 }
552 
553 
554 status_t
555 deselect_sem(int32 id, struct select_info* info, bool kernel)
556 {
557 	cpu_status state;
558 	int32 slot;
559 
560 	if (id < 0)
561 		return B_BAD_SEM_ID;
562 
563 	if (info->selected_events == 0)
564 		return B_OK;
565 
566 	slot = id % sMaxSems;
567 
568 	state = disable_interrupts();
569 	GRAB_SEM_LOCK(sSems[slot]);
570 
571 	if (sSems[slot].id == id) {
572 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
573 		while (*infoLocation != NULL && *infoLocation != info)
574 			infoLocation = &(*infoLocation)->next;
575 
576 		if (*infoLocation == info)
577 			*infoLocation = info->next;
578 	}
579 
580 	RELEASE_SEM_LOCK(sSems[slot]);
581 	restore_interrupts(state);
582 
583 	return B_OK;
584 }
585 
586 
587 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
588 	other threads in the process.
589 	Must be called with semaphore lock held. The thread lock must not be held.
590 */
591 static void
592 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
593 {
594 	if (!entry->queued)
595 		return;
596 
597 	sem->queue.Remove(entry);
598 	entry->queued = false;
599 	sem->u.used.count += entry->count;
600 
601 	// We're done with this entry. We only have to check, if other threads
602 	// need unblocking, too.
603 
604 	// Now see if more threads need to be woken up. We get the thread lock for
605 	// that time, so the blocking state of threads won't change. We need that
606 	// lock anyway when unblocking a thread.
607 	GRAB_THREAD_LOCK();
608 
609 	while ((entry = sem->queue.Head()) != NULL) {
610 		if (thread_is_blocked(entry->thread)) {
611 			// The thread is still waiting. If its count is satisfied, unblock
612 			// it. Otherwise we can't unblock any other thread.
613 			if (entry->count > sem->u.used.net_count)
614 				break;
615 
616 			thread_unblock_locked(entry->thread, B_OK);
617 			sem->u.used.net_count -= entry->count;
618 		} else {
619 			// The thread is no longer waiting, but still queued, which means
620 			// acquiration failed and we can just remove it.
621 			sem->u.used.count += entry->count;
622 		}
623 
624 		sem->queue.Remove(entry);
625 		entry->queued = false;
626 	}
627 
628 	RELEASE_THREAD_LOCK();
629 
630 	// select notification, if the semaphore is now acquirable
631 	if (sem->u.used.count > 0)
632 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
633 }
634 
635 
636 /*!	This function cycles through the sem table, deleting all the sems
637 	that are owned by the specified team.
638 */
639 int
640 sem_delete_owned_sems(team_id owner)
641 {
642 	int state;
643 	int i;
644 	int count = 0;
645 
646 	// ToDo: that looks horribly inefficient - maybe it would be better
647 	//	to have them in a list in the team
648 
649 	if (owner < 0)
650 		return B_BAD_TEAM_ID;
651 
652 	state = disable_interrupts();
653 	GRAB_SEM_LIST_LOCK();
654 
655 	for (i = 0; i < sMaxSems; i++) {
656 		if (sSems[i].id != -1 && sSems[i].u.used.owner == owner) {
657 			sem_id id = sSems[i].id;
658 
659 			RELEASE_SEM_LIST_LOCK();
660 			restore_interrupts(state);
661 
662 			delete_sem(id);
663 			count++;
664 
665 			state = disable_interrupts();
666 			GRAB_SEM_LIST_LOCK();
667 		}
668 	}
669 
670 	RELEASE_SEM_LIST_LOCK();
671 	restore_interrupts(state);
672 
673 	return count;
674 }
675 
676 
677 int32
678 sem_max_sems(void)
679 {
680 	return sMaxSems;
681 }
682 
683 
684 int32
685 sem_used_sems(void)
686 {
687 	return sUsedSems;
688 }
689 
690 
691 //	#pragma mark - Public Kernel API
692 
693 
694 sem_id
695 create_sem(int32 count, const char *name)
696 {
697 	return create_sem_etc(count, name, team_get_kernel_team_id());
698 }
699 
700 
701 status_t
702 delete_sem(sem_id id)
703 {
704 	return delete_sem_internal(id, false);
705 }
706 
707 
708 status_t
709 acquire_sem(sem_id id)
710 {
711 	return switch_sem_etc(-1, id, 1, 0, 0);
712 }
713 
714 
715 status_t
716 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
717 {
718 	return switch_sem_etc(-1, id, count, flags, timeout);
719 }
720 
721 
722 status_t
723 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
724 {
725 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
726 }
727 
728 
729 status_t
730 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
731 	uint32 flags, bigtime_t timeout)
732 {
733 	int slot = id % sMaxSems;
734 	int state;
735 	status_t status = B_OK;
736 
737 	if (gKernelStartup)
738 		return B_OK;
739 	if (sSemsActive == false)
740 		return B_NO_MORE_SEMS;
741 
742 	if (!are_interrupts_enabled()) {
743 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
744 			id);
745 	}
746 
747 	if (id < 0)
748 		return B_BAD_SEM_ID;
749 	if (count <= 0
750 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
751 		return B_BAD_VALUE;
752 	}
753 
754 	state = disable_interrupts();
755 	GRAB_SEM_LOCK(sSems[slot]);
756 
757 	if (sSems[slot].id != id) {
758 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
759 		status = B_BAD_SEM_ID;
760 		goto err;
761 	}
762 
763 	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
764 	//	doesn't have any use outside the kernel
765 	if ((flags & B_CHECK_PERMISSION) != 0
766 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
767 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
768 			thread_get_current_thread_id(), id);
769 		status = B_NOT_ALLOWED;
770 		goto err;
771 	}
772 
773 	if (sSems[slot].u.used.count - count < 0) {
774 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
775 			// immediate timeout
776 			status = B_WOULD_BLOCK;
777 			goto err;
778 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
779 			// absolute negative timeout
780 			status = B_TIMED_OUT;
781 			goto err;
782 		}
783 	}
784 
785 	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
786 		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
787 		timeout);
788 
789 	if ((sSems[slot].u.used.count -= count) < 0) {
790 		// we need to block
791 		struct thread *thread = thread_get_current_thread();
792 
793 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
794 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
795 
796 		// do a quick check to see if the thread has any pending signals
797 		// this should catch most of the cases where the thread had a signal
798 		if (thread_is_interrupted(thread, flags)) {
799 			sSems[slot].u.used.count += count;
800 			status = B_INTERRUPTED;
801 				// the other semaphore will be released later
802 			goto err;
803 		}
804 
805 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
806 			timeout = B_INFINITE_TIMEOUT;
807 
808 		// enqueue in the semaphore queue and get ready to wait
809 		queued_thread queueEntry(thread, count);
810 		sSems[slot].queue.Add(&queueEntry);
811 		queueEntry.queued = true;
812 
813 		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
814 			(void*)(addr_t)id);
815 
816 		RELEASE_SEM_LOCK(sSems[slot]);
817 
818 		// release the other semaphore, if any
819 		if (semToBeReleased >= 0) {
820 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
821 			semToBeReleased = -1;
822 		}
823 
824 		GRAB_THREAD_LOCK();
825 
826 		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
827 			? thread_block_locked(thread)
828 			: thread_block_with_timeout_locked(flags, timeout);
829 
830 		RELEASE_THREAD_LOCK();
831 		GRAB_SEM_LOCK(sSems[slot]);
832 
833 		// If we're still queued, this means the acquiration failed, and we
834 		// need to remove our entry and (potentially) wake up other threads.
835 		if (queueEntry.queued)
836 			remove_thread_from_sem(&queueEntry, &sSems[slot]);
837 
838 		if (acquireStatus >= B_OK) {
839 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
840 #if DEBUG_SEM_LAST_ACQUIRER
841 			sSems[slot].u.used.last_acquire_count = count;
842 #endif
843 		}
844 
845 		RELEASE_SEM_LOCK(sSems[slot]);
846 		restore_interrupts(state);
847 
848 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
849 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
850 			thread->name));
851 		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
852 		return acquireStatus;
853 	} else {
854 		sSems[slot].u.used.net_count -= count;
855 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
856 #if DEBUG_SEM_LAST_ACQUIRER
857 		sSems[slot].u.used.last_acquire_count = count;
858 #endif
859 	}
860 
861 err:
862 	RELEASE_SEM_LOCK(sSems[slot]);
863 	restore_interrupts(state);
864 
865 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
866 		// depending on when we were interrupted, we need to still
867 		// release the semaphore to always leave in a consistent
868 		// state
869 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
870 	}
871 
872 #if 0
873 	if (status == B_NOT_ALLOWED)
874 	_user_debugger("Thread tried to acquire kernel semaphore.");
875 #endif
876 
877 	KTRACE("switch_sem_etc() done: 0x%lx", status);
878 
879 	return status;
880 }
881 
882 
883 status_t
884 release_sem(sem_id id)
885 {
886 	return release_sem_etc(id, 1, 0);
887 }
888 
889 
890 status_t
891 release_sem_etc(sem_id id, int32 count, uint32 flags)
892 {
893 	int32 slot = id % sMaxSems;
894 
895 	if (gKernelStartup)
896 		return B_OK;
897 	if (sSemsActive == false)
898 		return B_NO_MORE_SEMS;
899 	if (id < 0)
900 		return B_BAD_SEM_ID;
901 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
902 		return B_BAD_VALUE;
903 
904 	InterruptsLocker _;
905 	SpinLocker semLocker(sSems[slot].lock);
906 
907 	if (sSems[slot].id != id) {
908 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
909 		return B_BAD_SEM_ID;
910 	}
911 
912 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
913 	//	doesn't have any use outside the kernel
914 	if ((flags & B_CHECK_PERMISSION) != 0
915 		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
916 		dprintf("thread %ld tried to release kernel semaphore.\n",
917 			thread_get_current_thread_id());
918 		return B_NOT_ALLOWED;
919 	}
920 
921 	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
922 		flags);
923 
924 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
925 #if DEBUG_SEM_LAST_ACQUIRER
926 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
927 	sSems[slot].u.used.last_release_count = count;
928 #endif
929 
930 	if (flags & B_RELEASE_ALL) {
931 		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
932 
933 		// is there anything to do for us at all?
934 		if (count == 0)
935 			return B_OK;
936 
937 		// Don't release more than necessary -- there might be interrupted/
938 		// timed out threads in the queue.
939 		flags |= B_RELEASE_IF_WAITING_ONLY;
940 	}
941 
942 	bool reschedule = false;
943 
944 	SpinLocker threadLocker(gThreadSpinlock);
945 
946 	while (count > 0) {
947 		queued_thread* entry = sSems[slot].queue.Head();
948 		if (entry == NULL) {
949 			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
950 				sSems[slot].u.used.count += count;
951 				sSems[slot].u.used.net_count += count;
952 			}
953 			break;
954 		}
955 
956 		if (thread_is_blocked(entry->thread)) {
957 			// The thread is still waiting. If its count is satisfied,
958 			// unblock it. Otherwise we can't unblock any other thread.
959 			if (entry->count > sSems[slot].u.used.net_count + count) {
960 				sSems[slot].u.used.count += count;
961 				sSems[slot].u.used.net_count += count;
962 				break;
963 			}
964 
965 			reschedule |= thread_unblock_locked(entry->thread, B_OK);
966 
967 			int delta = min_c(count, entry->count);
968 			sSems[slot].u.used.count += delta;
969 			sSems[slot].u.used.net_count += delta - entry->count;
970 			count -= delta;
971 		} else {
972 			// The thread is no longer waiting, but still queued, which
973 			// means acquiration failed and we can just remove it.
974 			sSems[slot].u.used.count += entry->count;
975 		}
976 
977 		sSems[slot].queue.Remove(entry);
978 		entry->queued = false;
979 	}
980 
981 	threadLocker.Unlock();
982 
983 	if (sSems[slot].u.used.count > 0)
984 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
985 
986 	// If we've unblocked another thread reschedule, if we've not explicitly
987 	// been told not to.
988 	if (reschedule && (flags & B_DO_NOT_RESCHEDULE) == 0) {
989 		semLocker.Unlock();
990 		threadLocker.Lock();
991 		scheduler_reschedule();
992 	}
993 
994 	return B_OK;
995 }
996 
997 
998 status_t
999 get_sem_count(sem_id id, int32 *_count)
1000 {
1001 	int slot;
1002 	int state;
1003 
1004 	if (sSemsActive == false)
1005 		return B_NO_MORE_SEMS;
1006 	if (id < 0)
1007 		return B_BAD_SEM_ID;
1008 	if (_count == NULL)
1009 		return B_BAD_VALUE;
1010 
1011 	slot = id % sMaxSems;
1012 
1013 	state = disable_interrupts();
1014 	GRAB_SEM_LOCK(sSems[slot]);
1015 
1016 	if (sSems[slot].id != id) {
1017 		RELEASE_SEM_LOCK(sSems[slot]);
1018 		restore_interrupts(state);
1019 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1020 		return B_BAD_SEM_ID;
1021 	}
1022 
1023 	*_count = sSems[slot].u.used.count;
1024 
1025 	RELEASE_SEM_LOCK(sSems[slot]);
1026 	restore_interrupts(state);
1027 
1028 	return B_OK;
1029 }
1030 
1031 
1032 /*!	Called by the get_sem_info() macro. */
1033 status_t
1034 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1035 {
1036 	status_t status = B_OK;
1037 	int state;
1038 	int slot;
1039 
1040 	if (!sSemsActive)
1041 		return B_NO_MORE_SEMS;
1042 	if (id < 0)
1043 		return B_BAD_SEM_ID;
1044 	if (info == NULL || size != sizeof(sem_info))
1045 		return B_BAD_VALUE;
1046 
1047 	slot = id % sMaxSems;
1048 
1049 	state = disable_interrupts();
1050 	GRAB_SEM_LOCK(sSems[slot]);
1051 
1052 	if (sSems[slot].id != id) {
1053 		status = B_BAD_SEM_ID;
1054 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1055 	} else
1056 		fill_sem_info(&sSems[slot], info, size);
1057 
1058 	RELEASE_SEM_LOCK(sSems[slot]);
1059 	restore_interrupts(state);
1060 
1061 	return status;
1062 }
1063 
1064 
1065 /*!	Called by the get_next_sem_info() macro. */
1066 status_t
1067 _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info,
1068 	size_t size)
1069 {
1070 	int state;
1071 	int slot;
1072 	bool found = false;
1073 
1074 	if (!sSemsActive)
1075 		return B_NO_MORE_SEMS;
1076 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1077 		return B_BAD_VALUE;
1078 
1079 	if (team == B_CURRENT_TEAM)
1080 		team = team_get_current_team_id();
1081 	/* prevents sSems[].owner == -1 >= means owned by a port */
1082 	if (team < 0 || !team_is_valid(team))
1083 		return B_BAD_TEAM_ID;
1084 
1085 	slot = *_cookie;
1086 	if (slot >= sMaxSems)
1087 		return B_BAD_VALUE;
1088 
1089 	state = disable_interrupts();
1090 	GRAB_SEM_LIST_LOCK();
1091 
1092 	while (slot < sMaxSems) {
1093 		if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1094 			GRAB_SEM_LOCK(sSems[slot]);
1095 			if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) {
1096 				// found one!
1097 				fill_sem_info(&sSems[slot], info, size);
1098 
1099 				RELEASE_SEM_LOCK(sSems[slot]);
1100 				slot++;
1101 				found = true;
1102 				break;
1103 			}
1104 			RELEASE_SEM_LOCK(sSems[slot]);
1105 		}
1106 		slot++;
1107 	}
1108 	RELEASE_SEM_LIST_LOCK();
1109 	restore_interrupts(state);
1110 
1111 	if (!found)
1112 		return B_BAD_VALUE;
1113 
1114 	*_cookie = slot;
1115 	return B_OK;
1116 }
1117 
1118 
1119 status_t
1120 set_sem_owner(sem_id id, team_id team)
1121 {
1122 	int state;
1123 	int slot;
1124 
1125 	if (sSemsActive == false)
1126 		return B_NO_MORE_SEMS;
1127 	if (id < 0)
1128 		return B_BAD_SEM_ID;
1129 	if (team < 0 || !team_is_valid(team))
1130 		return B_BAD_TEAM_ID;
1131 
1132 	slot = id % sMaxSems;
1133 
1134 	state = disable_interrupts();
1135 	GRAB_SEM_LOCK(sSems[slot]);
1136 
1137 	if (sSems[slot].id != id) {
1138 		RELEASE_SEM_LOCK(sSems[slot]);
1139 		restore_interrupts(state);
1140 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1141 		return B_BAD_SEM_ID;
1142 	}
1143 
1144 	// ToDo: this is a small race condition: the team ID could already
1145 	// be invalid at this point - we would lose one semaphore slot in
1146 	// this case!
1147 	// The only safe way to do this is to prevent either team (the new
1148 	// or the old owner) from dying until we leave the spinlock.
1149 	sSems[slot].u.used.owner = team;
1150 
1151 	RELEASE_SEM_LOCK(sSems[slot]);
1152 	restore_interrupts(state);
1153 
1154 	return B_NO_ERROR;
1155 }
1156 
1157 
1158 /*!	Returns the name of the semaphore. The name is not copied, so the caller
1159 	must make sure that the semaphore remains alive as long as the name is used.
1160 */
1161 const char*
1162 sem_get_name_unsafe(sem_id id)
1163 {
1164 	int slot = id % sMaxSems;
1165 
1166 	if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1167 		return NULL;
1168 
1169 	return sSems[slot].u.used.name;
1170 }
1171 
1172 
1173 //	#pragma mark - Syscalls
1174 
1175 
1176 sem_id
1177 _user_create_sem(int32 count, const char *userName)
1178 {
1179 	char name[B_OS_NAME_LENGTH];
1180 
1181 	if (userName == NULL)
1182 		return create_sem_etc(count, NULL, team_get_current_team_id());
1183 
1184 	if (!IS_USER_ADDRESS(userName)
1185 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1186 		return B_BAD_ADDRESS;
1187 
1188 	return create_sem_etc(count, name, team_get_current_team_id());
1189 }
1190 
1191 
1192 status_t
1193 _user_delete_sem(sem_id id)
1194 {
1195 	return delete_sem_internal(id, true);
1196 }
1197 
1198 
1199 status_t
1200 _user_acquire_sem(sem_id id)
1201 {
1202 	status_t error = switch_sem_etc(-1, id, 1,
1203 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1204 
1205 	return syscall_restart_handle_post(error);
1206 }
1207 
1208 
1209 status_t
1210 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1211 {
1212 	syscall_restart_handle_timeout_pre(flags, timeout);
1213 
1214 	status_t error = switch_sem_etc(-1, id, count,
1215 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1216 
1217 	return syscall_restart_handle_timeout_post(error, timeout);
1218 }
1219 
1220 
1221 status_t
1222 _user_switch_sem(sem_id releaseSem, sem_id id)
1223 {
1224 	status_t error = switch_sem_etc(releaseSem, id, 1,
1225 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1226 
1227 	if (releaseSem < 0)
1228 		return syscall_restart_handle_post(error);
1229 
1230 	return error;
1231 }
1232 
1233 
1234 status_t
1235 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1236 	bigtime_t timeout)
1237 {
1238 	if (releaseSem < 0)
1239 		syscall_restart_handle_timeout_pre(flags, timeout);
1240 
1241 	status_t error = switch_sem_etc(releaseSem, id, count,
1242 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1243 
1244 	if (releaseSem < 0)
1245 		return syscall_restart_handle_timeout_post(error, timeout);
1246 
1247 	return error;
1248 }
1249 
1250 
1251 status_t
1252 _user_release_sem(sem_id id)
1253 {
1254 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1255 }
1256 
1257 
1258 status_t
1259 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1260 {
1261 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1262 }
1263 
1264 
1265 status_t
1266 _user_get_sem_count(sem_id id, int32 *userCount)
1267 {
1268 	status_t status;
1269 	int32 count;
1270 
1271 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1272 		return B_BAD_ADDRESS;
1273 
1274 	status = get_sem_count(id, &count);
1275 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1276 		return B_BAD_ADDRESS;
1277 
1278 	return status;
1279 }
1280 
1281 
1282 status_t
1283 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1284 {
1285 	struct sem_info info;
1286 	status_t status;
1287 
1288 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1289 		return B_BAD_ADDRESS;
1290 
1291 	status = _get_sem_info(id, &info, size);
1292 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1293 		return B_BAD_ADDRESS;
1294 
1295 	return status;
1296 }
1297 
1298 
1299 status_t
1300 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1301 	size_t size)
1302 {
1303 	struct sem_info info;
1304 	int32 cookie;
1305 	status_t status;
1306 
1307 	if (userCookie == NULL || userInfo == NULL
1308 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1309 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1310 		return B_BAD_ADDRESS;
1311 
1312 	status = _get_next_sem_info(team, &cookie, &info, size);
1313 
1314 	if (status == B_OK) {
1315 		if (user_memcpy(userInfo, &info, size) < B_OK
1316 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1317 			return B_BAD_ADDRESS;
1318 	}
1319 
1320 	return status;
1321 }
1322 
1323 
1324 status_t
1325 _user_set_sem_owner(sem_id id, team_id team)
1326 {
1327 	return set_sem_owner(id, team);
1328 }
1329