xref: /haiku/src/system/kernel/sem.cpp (revision 1345706a9ff6ad0dc041339a02d4259998b0765d)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Semaphore code */
12 
13 
14 #include <sem.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <arch/int.h>
22 #include <boot/kernel_args.h>
23 #include <cpu.h>
24 #include <debug.h>
25 #include <int.h>
26 #include <kernel.h>
27 #include <ksignal.h>
28 #include <kscheduler.h>
29 #include <listeners.h>
30 #include <scheduling_analysis.h>
31 #include <smp.h>
32 #include <syscall_restart.h>
33 #include <team.h>
34 #include <thread.h>
35 #include <util/AutoLock.h>
36 #include <util/DoublyLinkedList.h>
37 #include <vfs.h>
38 #include <vm/vm_page.h>
39 #include <wait_for_objects.h>
40 
41 #include "kernel_debug_config.h"
42 
43 
44 //#define TRACE_SEM
45 #ifdef TRACE_SEM
46 #	define TRACE(x) dprintf_no_syslog x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 //#define KTRACE_SEM
52 #ifdef KTRACE_SEM
53 #	define KTRACE(x...) ktrace_printf(x)
54 #else
55 #	define KTRACE(x...) do {} while (false)
56 #endif
57 
58 
59 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
60 	queued_thread(struct thread *thread, int32 count)
61 		:
62 		thread(thread),
63 		count(count),
64 		queued(false)
65 	{
66 	}
67 
68 	struct thread	*thread;
69 	int32			count;
70 	bool			queued;
71 };
72 
73 typedef DoublyLinkedList<queued_thread> ThreadQueue;
74 
75 struct sem_entry {
76 	union {
77 		// when slot in use
78 		struct {
79 			struct list_link	team_link;
80 			int32				count;
81 			int32				net_count;
82 									// count + acquisition count of all blocked
83 									// threads
84 			char*				name;
85 			struct team*		owner;
86 			select_info*		select_infos;
87 			thread_id			last_acquirer;
88 #if DEBUG_SEM_LAST_ACQUIRER
89 			int32				last_acquire_count;
90 			thread_id			last_releaser;
91 			int32				last_release_count;
92 #endif
93 		} used;
94 
95 		// when slot unused
96 		struct {
97 			sem_id				next_id;
98 			struct sem_entry*	next;
99 		} unused;
100 	} u;
101 
102 	sem_id				id;
103 	spinlock			lock;	// protects only the id field when unused
104 	ThreadQueue			queue;	// should be in u.used, but has a constructor
105 };
106 
107 static const int32 kMaxSemaphores = 65536;
108 static int32 sMaxSems = 4096;
109 	// Final value is computed based on the amount of available memory
110 static int32 sUsedSems = 0;
111 
112 static struct sem_entry *sSems = NULL;
113 static bool sSemsActive = false;
114 static struct sem_entry	*sFreeSemsHead = NULL;
115 static struct sem_entry	*sFreeSemsTail = NULL;
116 
117 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
118 #define GRAB_SEM_LIST_LOCK()     acquire_spinlock(&sSemsSpinlock)
119 #define RELEASE_SEM_LIST_LOCK()  release_spinlock(&sSemsSpinlock)
120 #define GRAB_SEM_LOCK(s)         acquire_spinlock(&(s).lock)
121 #define RELEASE_SEM_LOCK(s)      release_spinlock(&(s).lock)
122 
123 
124 static int
125 dump_sem_list(int argc, char** argv)
126 {
127 	const char* name = NULL;
128 	team_id owner = -1;
129 	thread_id last = -1;
130 	int32 i;
131 
132 	if (argc > 2) {
133 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
134 			owner = strtoul(argv[2], NULL, 0);
135 		else if (!strcmp(argv[1], "name"))
136 			name = argv[2];
137 		else if (!strcmp(argv[1], "last"))
138 			last = strtoul(argv[2], NULL, 0);
139 	} else if (argc > 1)
140 		owner = strtoul(argv[1], NULL, 0);
141 
142 	kprintf("sem            id count   team   last  name\n");
143 
144 	for (i = 0; i < sMaxSems; i++) {
145 		struct sem_entry* sem = &sSems[i];
146 		if (sem->id < 0
147 			|| (last != -1 && sem->u.used.last_acquirer != last)
148 			|| (name != NULL && strstr(sem->u.used.name, name) == NULL)
149 			|| (owner != -1
150 				&& (sem->u.used.owner == NULL
151 					|| sem->u.used.owner->id != owner)))
152 			continue;
153 
154 		kprintf("%p %6ld %5ld %6ld "
155 			"%6ld "
156 			" %s\n", sem, sem->id, sem->u.used.count,
157 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1,
158 			sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
159 			sem->u.used.name);
160 	}
161 
162 	return 0;
163 }
164 
165 
166 static void
167 dump_sem(struct sem_entry* sem)
168 {
169 	kprintf("SEM: %p\n", sem);
170 	kprintf("id:      %ld (%#lx)\n", sem->id, sem->id);
171 	if (sem->id >= 0) {
172 		kprintf("name:    '%s'\n", sem->u.used.name);
173 		kprintf("owner:   %ld\n",
174 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
175 		kprintf("count:   %ld\n", sem->u.used.count);
176 		kprintf("queue:  ");
177 		if (!sem->queue.IsEmpty()) {
178 			ThreadQueue::Iterator it = sem->queue.GetIterator();
179 			while (queued_thread* entry = it.Next())
180 				kprintf(" %ld", entry->thread->id);
181 			kprintf("\n");
182 		} else
183 			kprintf(" -\n");
184 
185 		set_debug_variable("_sem", (addr_t)sem);
186 		set_debug_variable("_semID", sem->id);
187 		set_debug_variable("_owner",
188 			sem->u.used.owner != NULL ? sem->u.used.owner->id : -1);
189 
190 #if DEBUG_SEM_LAST_ACQUIRER
191 		kprintf("last acquired by: %ld, count: %ld\n",
192 			sem->u.used.last_acquirer, sem->u.used.last_acquire_count);
193 		kprintf("last released by: %ld, count: %ld\n",
194 			sem->u.used.last_releaser, sem->u.used.last_release_count);
195 
196 		if (sem->u.used.last_releaser != 0)
197 			set_debug_variable("_releaser", sem->u.used.last_releaser);
198 		else
199 			unset_debug_variable("_releaser");
200 #else
201 		kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer);
202 #endif
203 
204 		if (sem->u.used.last_acquirer != 0)
205 			set_debug_variable("_acquirer", sem->u.used.last_acquirer);
206 		else
207 			unset_debug_variable("_acquirer");
208 	} else {
209 		kprintf("next:    %p\n", sem->u.unused.next);
210 		kprintf("next_id: %ld\n", sem->u.unused.next_id);
211 	}
212 }
213 
214 
215 static int
216 dump_sem_info(int argc, char **argv)
217 {
218 	bool found = false;
219 	addr_t num;
220 	int32 i;
221 
222 	if (argc < 2) {
223 		print_debugger_command_usage(argv[0]);
224 		return 0;
225 	}
226 
227 	num = strtoul(argv[1], NULL, 0);
228 
229 	if (IS_KERNEL_ADDRESS(num)) {
230 		dump_sem((struct sem_entry *)num);
231 		return 0;
232 	} else if (num >= 0) {
233 		uint32 slot = num % sMaxSems;
234 		if (sSems[slot].id != (int)num) {
235 			kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
236 			return 0;
237 		}
238 
239 		dump_sem(&sSems[slot]);
240 		return 0;
241 	}
242 
243 	// walk through the sem list, trying to match name
244 	for (i = 0; i < sMaxSems; i++) {
245 		if (sSems[i].u.used.name != NULL
246 			&& strcmp(argv[1], sSems[i].u.used.name) == 0) {
247 			dump_sem(&sSems[i]);
248 			found = true;
249 		}
250 	}
251 
252 	if (!found)
253 		kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
254 	return 0;
255 }
256 
257 
258 /*!	\brief Appends a semaphore slot to the free list.
259 
260 	The semaphore list must be locked.
261 	The slot's id field is not changed. It should already be set to -1.
262 
263 	\param slot The index of the semaphore slot.
264 	\param nextID The ID the slot will get when reused. If < 0 the \a slot
265 		   is used.
266 */
267 static void
268 free_sem_slot(int slot, sem_id nextID)
269 {
270 	struct sem_entry *sem = sSems + slot;
271 	// set next_id to the next possible value; for sanity check the current ID
272 	if (nextID < 0)
273 		sem->u.unused.next_id = slot;
274 	else
275 		sem->u.unused.next_id = nextID;
276 	// append the entry to the list
277 	if (sFreeSemsTail)
278 		sFreeSemsTail->u.unused.next = sem;
279 	else
280 		sFreeSemsHead = sem;
281 	sFreeSemsTail = sem;
282 	sem->u.unused.next = NULL;
283 }
284 
285 
286 static inline void
287 notify_sem_select_events(struct sem_entry* sem, uint16 events)
288 {
289 	if (sem->u.used.select_infos)
290 		notify_select_events_list(sem->u.used.select_infos, events);
291 }
292 
293 
294 /*!	Fills the thread_info structure with information from the specified
295 	thread.
296 	The thread lock must be held when called.
297 */
298 static void
299 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
300 {
301 	info->sem = sem->id;
302 	info->team = sem->u.used.owner != NULL ? sem->u.used.owner->id : -1;
303 	strlcpy(info->name, sem->u.used.name, sizeof(info->name));
304 	info->count = sem->u.used.count;
305 	info->latest_holder = sem->u.used.last_acquirer;
306 }
307 
308 
309 /*!	You must call this function with interrupts disabled, and the semaphore's
310 	spinlock held. Note that it will unlock the spinlock itself.
311 	Since it cannot free() the semaphore's name with interrupts turned off, it
312 	will return that one in \a name.
313 */
314 static void
315 uninit_sem_locked(struct sem_entry& sem, char** _name)
316 {
317 	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);
318 
319 	notify_sem_select_events(&sem, B_EVENT_INVALID);
320 	sem.u.used.select_infos = NULL;
321 
322 	// free any threads waiting for this semaphore
323 	GRAB_THREAD_LOCK();
324 	while (queued_thread* entry = sem.queue.RemoveHead()) {
325 		entry->queued = false;
326 		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
327 	}
328 	RELEASE_THREAD_LOCK();
329 
330 	int32 id = sem.id;
331 	sem.id = -1;
332 	*_name = sem.u.used.name;
333 	sem.u.used.name = NULL;
334 
335 	RELEASE_SEM_LOCK(sem);
336 
337 	// append slot to the free list
338 	GRAB_SEM_LIST_LOCK();
339 	free_sem_slot(id % sMaxSems, id + sMaxSems);
340 	atomic_add(&sUsedSems, -1);
341 	RELEASE_SEM_LIST_LOCK();
342 }
343 
344 
345 static status_t
346 delete_sem_internal(sem_id id, bool checkPermission)
347 {
348 	if (sSemsActive == false)
349 		return B_NO_MORE_SEMS;
350 	if (id < 0)
351 		return B_BAD_SEM_ID;
352 
353 	int32 slot = id % sMaxSems;
354 
355 	cpu_status state = disable_interrupts();
356 	GRAB_TEAM_LOCK();
357 	GRAB_SEM_LOCK(sSems[slot]);
358 
359 	if (sSems[slot].id != id) {
360 		RELEASE_SEM_LOCK(sSems[slot]);
361 		RELEASE_TEAM_LOCK();
362 		restore_interrupts(state);
363 		TRACE(("delete_sem: invalid sem_id %ld\n", id));
364 		return B_BAD_SEM_ID;
365 	}
366 
367 	if (checkPermission
368 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
369 		RELEASE_SEM_LOCK(sSems[slot]);
370 		RELEASE_TEAM_LOCK();
371 		restore_interrupts(state);
372 		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
373 			thread_get_current_thread_id(), id);
374 		return B_NOT_ALLOWED;
375 	}
376 
377 	if (sSems[slot].u.used.owner != NULL) {
378 		list_remove_link(&sSems[slot].u.used.team_link);
379 		sSems[slot].u.used.owner = NULL;
380 	} else
381 		panic("sem %ld has no owner", id);
382 
383 	RELEASE_TEAM_LOCK();
384 
385 	char* name;
386 	uninit_sem_locked(sSems[slot], &name);
387 
388 	GRAB_THREAD_LOCK();
389 	scheduler_reschedule_if_necessary_locked();
390 	RELEASE_THREAD_LOCK();
391 
392 	restore_interrupts(state);
393 
394 	free(name);
395 	return B_OK;
396 }
397 
398 
399 //	#pragma mark - Private Kernel API
400 
401 
402 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
403 status_t
404 haiku_sem_init(kernel_args *args)
405 {
406 	area_id area;
407 	int32 i;
408 
409 	TRACE(("sem_init: entry\n"));
410 
411 	// compute maximal number of semaphores depending on the available memory
412 	// 128 MB -> 16384 semaphores, 448 kB fixed array size
413 	// 256 MB -> 32768, 896 kB
414 	// 512 MB and more-> 65536, 1.75 MB
415 	i = vm_page_num_pages() / 2;
416 	while (sMaxSems < i && sMaxSems < kMaxSemaphores)
417 		sMaxSems <<= 1;
418 
419 	// create and initialize semaphore table
420 	virtual_address_restrictions virtualRestrictions = {};
421 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
422 	physical_address_restrictions physicalRestrictions = {};
423 	area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
424 		sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
425 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
426 		&virtualRestrictions, &physicalRestrictions, (void**)&sSems);
427 	if (area < 0)
428 		panic("unable to allocate semaphore table!\n");
429 
430 	memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
431 	for (i = 0; i < sMaxSems; i++) {
432 		sSems[i].id = -1;
433 		free_sem_slot(i, i);
434 	}
435 
436 	// add debugger commands
437 	add_debugger_command_etc("sems", &dump_sem_list,
438 		"Dump a list of all active semaphores (for team, with name, etc.)",
439 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
440 			" | (\"last\" <last acquirer>)\n"
441 		"Prints a list of all active semaphores meeting the given\n"
442 		"requirement. If no argument is given, all sems are listed.\n"
443 		"  <team>             - The team owning the semaphores.\n"
444 		"  <name>             - Part of the name of the semaphores.\n"
445 		"  <last acquirer>    - The thread that last acquired the semaphore.\n"
446 		, 0);
447 	add_debugger_command_etc("sem", &dump_sem_info,
448 		"Dump info about a particular semaphore",
449 		"<sem>\n"
450 		"Prints info about the specified semaphore.\n"
451 		"  <sem>  - pointer to the semaphore structure, semaphore ID, or name\n"
452 		"           of the semaphore to print info for.\n", 0);
453 
454 	TRACE(("sem_init: exit\n"));
455 
456 	sSemsActive = true;
457 
458 	return 0;
459 }
460 
461 
462 /*!	Creates a semaphore with the given parameters.
463 
464 	This function is only available from within the kernel, and
465 	should not be made public - if possible, we should remove it
466 	completely (and have only create_sem() exported).
467 */
468 sem_id
469 create_sem_etc(int32 count, const char* name, team_id owner)
470 {
471 	struct sem_entry* sem = NULL;
472 	cpu_status state;
473 	sem_id id = B_NO_MORE_SEMS;
474 	char* tempName;
475 	size_t nameLength;
476 
477 	if (sSemsActive == false || sUsedSems == sMaxSems)
478 		return B_NO_MORE_SEMS;
479 
480 	if (name == NULL)
481 		name = "unnamed semaphore";
482 
483 	nameLength = strlen(name) + 1;
484 	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
485 	tempName = (char*)malloc(nameLength);
486 	if (tempName == NULL)
487 		return B_NO_MEMORY;
488 
489 	strlcpy(tempName, name, nameLength);
490 
491 	struct team* team = NULL;
492 	if (owner == team_get_kernel_team_id())
493 		team = team_get_kernel_team();
494 	else if (owner == team_get_current_team_id())
495 		team = thread_get_current_thread()->team;
496 
497 	bool teamsLocked = false;
498 	state = disable_interrupts();
499 
500 	if (team == NULL) {
501 		// We need to hold the team lock to make sure this one exists (and
502 		// won't go away.
503 		GRAB_TEAM_LOCK();
504 
505 		team = team_get_team_struct_locked(owner);
506 		if (team == NULL) {
507 			RELEASE_TEAM_LOCK();
508 			restore_interrupts(state);
509 			free(tempName);
510 			return B_BAD_TEAM_ID;
511 		}
512 
513 		teamsLocked = true;
514 	}
515 	GRAB_SEM_LIST_LOCK();
516 
517 	// get the first slot from the free list
518 	sem = sFreeSemsHead;
519 	if (sem) {
520 		// remove it from the free list
521 		sFreeSemsHead = sem->u.unused.next;
522 		if (!sFreeSemsHead)
523 			sFreeSemsTail = NULL;
524 
525 		// init the slot
526 		GRAB_SEM_LOCK(*sem);
527 		sem->id = sem->u.unused.next_id;
528 		sem->u.used.count = count;
529 		sem->u.used.net_count = count;
530 		new(&sem->queue) ThreadQueue;
531 		sem->u.used.name = tempName;
532 		sem->u.used.owner = team;
533 		sem->u.used.select_infos = NULL;
534 		id = sem->id;
535 
536 		if (teamsLocked) {
537 			// insert now
538 			list_add_item(&team->sem_list, &sem->u.used.team_link);
539 		}
540 
541 		RELEASE_SEM_LOCK(*sem);
542 
543 		atomic_add(&sUsedSems, 1);
544 
545 		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
546 			count, name, owner, id);
547 
548 		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
549 		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
550 			name);
551 	}
552 
553 	RELEASE_SEM_LIST_LOCK();
554 
555 	int32 slot = id % sMaxSems;
556 	if (sem != NULL && !teamsLocked) {
557 		GRAB_TEAM_LOCK();
558 		GRAB_SEM_LOCK(sSems[slot]);
559 
560 		list_add_item(&team->sem_list, &sem->u.used.team_link);
561 
562 		RELEASE_SEM_LOCK(sSems[slot]);
563 		teamsLocked = true;
564 	}
565 
566 	if (teamsLocked)
567 		RELEASE_TEAM_LOCK();
568 	restore_interrupts(state);
569 
570 	if (sem == NULL)
571 		free(tempName);
572 
573 	return id;
574 }
575 
576 
577 status_t
578 select_sem(int32 id, struct select_info* info, bool kernel)
579 {
580 	cpu_status state;
581 	int32 slot;
582 	status_t error = B_OK;
583 
584 	if (id < 0)
585 		return B_BAD_SEM_ID;
586 
587 	slot = id % sMaxSems;
588 
589 	state = disable_interrupts();
590 	GRAB_SEM_LOCK(sSems[slot]);
591 
592 	if (sSems[slot].id != id) {
593 		// bad sem ID
594 		error = B_BAD_SEM_ID;
595 	} else if (!kernel
596 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
597 		// kernel semaphore, but call from userland
598 		error = B_NOT_ALLOWED;
599 	} else {
600 		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
601 
602 		if (info->selected_events != 0) {
603 			info->next = sSems[slot].u.used.select_infos;
604 			sSems[slot].u.used.select_infos = info;
605 
606 			if (sSems[slot].u.used.count > 0)
607 				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
608 		}
609 	}
610 
611 	RELEASE_SEM_LOCK(sSems[slot]);
612 	restore_interrupts(state);
613 
614 	return error;
615 }
616 
617 
618 status_t
619 deselect_sem(int32 id, struct select_info* info, bool kernel)
620 {
621 	cpu_status state;
622 	int32 slot;
623 
624 	if (id < 0)
625 		return B_BAD_SEM_ID;
626 
627 	if (info->selected_events == 0)
628 		return B_OK;
629 
630 	slot = id % sMaxSems;
631 
632 	state = disable_interrupts();
633 	GRAB_SEM_LOCK(sSems[slot]);
634 
635 	if (sSems[slot].id == id) {
636 		select_info** infoLocation = &sSems[slot].u.used.select_infos;
637 		while (*infoLocation != NULL && *infoLocation != info)
638 			infoLocation = &(*infoLocation)->next;
639 
640 		if (*infoLocation == info)
641 			*infoLocation = info->next;
642 	}
643 
644 	RELEASE_SEM_LOCK(sSems[slot]);
645 	restore_interrupts(state);
646 
647 	return B_OK;
648 }
649 
650 
651 /*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
652 	other threads in the process.
653 	Must be called with semaphore lock held. The thread lock must not be held.
654 */
655 static void
656 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
657 {
658 	if (!entry->queued)
659 		return;
660 
661 	sem->queue.Remove(entry);
662 	entry->queued = false;
663 	sem->u.used.count += entry->count;
664 
665 	// We're done with this entry. We only have to check, if other threads
666 	// need unblocking, too.
667 
668 	// Now see if more threads need to be woken up. We get the thread lock for
669 	// that time, so the blocking state of threads won't change. We need that
670 	// lock anyway when unblocking a thread.
671 	GRAB_THREAD_LOCK();
672 
673 	while ((entry = sem->queue.Head()) != NULL) {
674 		if (thread_is_blocked(entry->thread)) {
675 			// The thread is still waiting. If its count is satisfied, unblock
676 			// it. Otherwise we can't unblock any other thread.
677 			if (entry->count > sem->u.used.net_count)
678 				break;
679 
680 			thread_unblock_locked(entry->thread, B_OK);
681 			sem->u.used.net_count -= entry->count;
682 		} else {
683 			// The thread is no longer waiting, but still queued, which means
684 			// acquiration failed and we can just remove it.
685 			sem->u.used.count += entry->count;
686 		}
687 
688 		sem->queue.Remove(entry);
689 		entry->queued = false;
690 	}
691 
692 	RELEASE_THREAD_LOCK();
693 
694 	// select notification, if the semaphore is now acquirable
695 	if (sem->u.used.count > 0)
696 		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
697 }
698 
699 
700 /*!	This function deletes all semaphores belonging to a particular team.
701 */
702 void
703 sem_delete_owned_sems(struct team* team)
704 {
705 	struct list queue;
706 
707 	{
708 		InterruptsSpinLocker locker(gTeamSpinlock);
709 		list_move_to_list(&team->sem_list, &queue);
710 	}
711 
712 	while (sem_entry* sem = (sem_entry*)list_remove_head_item(&queue)) {
713 		char* name;
714 
715 		{
716 			InterruptsLocker locker;
717 			GRAB_SEM_LOCK(*sem);
718 			uninit_sem_locked(*sem, &name);
719 		}
720 
721 		free(name);
722 	}
723 
724 	scheduler_reschedule_if_necessary();
725 }
726 
727 
728 int32
729 sem_max_sems(void)
730 {
731 	return sMaxSems;
732 }
733 
734 
735 int32
736 sem_used_sems(void)
737 {
738 	return sUsedSems;
739 }
740 
741 
742 //	#pragma mark - Public Kernel API
743 
744 
745 sem_id
746 create_sem(int32 count, const char* name)
747 {
748 	return create_sem_etc(count, name, team_get_kernel_team_id());
749 }
750 
751 
752 status_t
753 delete_sem(sem_id id)
754 {
755 	return delete_sem_internal(id, false);
756 }
757 
758 
759 status_t
760 acquire_sem(sem_id id)
761 {
762 	return switch_sem_etc(-1, id, 1, 0, 0);
763 }
764 
765 
766 status_t
767 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
768 {
769 	return switch_sem_etc(-1, id, count, flags, timeout);
770 }
771 
772 
773 status_t
774 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
775 {
776 	return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
777 }
778 
779 
780 status_t
781 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
782 	uint32 flags, bigtime_t timeout)
783 {
784 	int slot = id % sMaxSems;
785 	int state;
786 	status_t status = B_OK;
787 
788 	if (gKernelStartup)
789 		return B_OK;
790 	if (sSemsActive == false)
791 		return B_NO_MORE_SEMS;
792 
793 	if (!are_interrupts_enabled()) {
794 		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
795 			id);
796 	}
797 
798 	if (id < 0)
799 		return B_BAD_SEM_ID;
800 	if (count <= 0
801 		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
802 		return B_BAD_VALUE;
803 	}
804 
805 	state = disable_interrupts();
806 	GRAB_SEM_LOCK(sSems[slot]);
807 
808 	if (sSems[slot].id != id) {
809 		TRACE(("switch_sem_etc: bad sem %ld\n", id));
810 		status = B_BAD_SEM_ID;
811 		goto err;
812 	}
813 
814 	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
815 	//	doesn't have any use outside the kernel
816 	if ((flags & B_CHECK_PERMISSION) != 0
817 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
818 		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
819 			thread_get_current_thread_id(), id);
820 		status = B_NOT_ALLOWED;
821 		goto err;
822 	}
823 
824 	if (sSems[slot].u.used.count - count < 0) {
825 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
826 			// immediate timeout
827 			status = B_WOULD_BLOCK;
828 			goto err;
829 		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
830 			// absolute negative timeout
831 			status = B_TIMED_OUT;
832 			goto err;
833 		}
834 	}
835 
836 	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
837 		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
838 		timeout);
839 
840 	if ((sSems[slot].u.used.count -= count) < 0) {
841 		// we need to block
842 		struct thread *thread = thread_get_current_thread();
843 
844 		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
845 			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
846 
847 		// do a quick check to see if the thread has any pending signals
848 		// this should catch most of the cases where the thread had a signal
849 		if (thread_is_interrupted(thread, flags)) {
850 			sSems[slot].u.used.count += count;
851 			status = B_INTERRUPTED;
852 				// the other semaphore will be released later
853 			goto err;
854 		}
855 
856 		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
857 			timeout = B_INFINITE_TIMEOUT;
858 
859 		// enqueue in the semaphore queue and get ready to wait
860 		queued_thread queueEntry(thread, count);
861 		sSems[slot].queue.Add(&queueEntry);
862 		queueEntry.queued = true;
863 
864 		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
865 			(void*)(addr_t)id);
866 
867 		RELEASE_SEM_LOCK(sSems[slot]);
868 
869 		// release the other semaphore, if any
870 		if (semToBeReleased >= 0) {
871 			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
872 			semToBeReleased = -1;
873 		}
874 
875 		GRAB_THREAD_LOCK();
876 
877 		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
878 			? thread_block_locked(thread)
879 			: thread_block_with_timeout_locked(flags, timeout);
880 
881 		RELEASE_THREAD_LOCK();
882 		GRAB_SEM_LOCK(sSems[slot]);
883 
884 		// If we're still queued, this means the acquiration failed, and we
885 		// need to remove our entry and (potentially) wake up other threads.
886 		if (queueEntry.queued)
887 			remove_thread_from_sem(&queueEntry, &sSems[slot]);
888 
889 		if (acquireStatus >= B_OK) {
890 			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
891 #if DEBUG_SEM_LAST_ACQUIRER
892 			sSems[slot].u.used.last_acquire_count = count;
893 #endif
894 		}
895 
896 		RELEASE_SEM_LOCK(sSems[slot]);
897 		restore_interrupts(state);
898 
899 		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
900 			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
901 			thread->name));
902 		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
903 		return acquireStatus;
904 	} else {
905 		sSems[slot].u.used.net_count -= count;
906 		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
907 #if DEBUG_SEM_LAST_ACQUIRER
908 		sSems[slot].u.used.last_acquire_count = count;
909 #endif
910 	}
911 
912 err:
913 	RELEASE_SEM_LOCK(sSems[slot]);
914 	restore_interrupts(state);
915 
916 	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
917 		// depending on when we were interrupted, we need to still
918 		// release the semaphore to always leave in a consistent
919 		// state
920 		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
921 	}
922 
923 #if 0
924 	if (status == B_NOT_ALLOWED)
925 	_user_debugger("Thread tried to acquire kernel semaphore.");
926 #endif
927 
928 	KTRACE("switch_sem_etc() done: 0x%lx", status);
929 
930 	return status;
931 }
932 
933 
934 status_t
935 release_sem(sem_id id)
936 {
937 	return release_sem_etc(id, 1, 0);
938 }
939 
940 
941 status_t
942 release_sem_etc(sem_id id, int32 count, uint32 flags)
943 {
944 	int32 slot = id % sMaxSems;
945 
946 	if (gKernelStartup)
947 		return B_OK;
948 	if (sSemsActive == false)
949 		return B_NO_MORE_SEMS;
950 	if (id < 0)
951 		return B_BAD_SEM_ID;
952 	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
953 		return B_BAD_VALUE;
954 
955 	InterruptsLocker _;
956 	SpinLocker semLocker(sSems[slot].lock);
957 
958 	if (sSems[slot].id != id) {
959 		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
960 		return B_BAD_SEM_ID;
961 	}
962 
963 	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
964 	//	doesn't have any use outside the kernel
965 	if ((flags & B_CHECK_PERMISSION) != 0
966 		&& sSems[slot].u.used.owner == team_get_kernel_team()) {
967 		dprintf("thread %ld tried to release kernel semaphore.\n",
968 			thread_get_current_thread_id());
969 		return B_NOT_ALLOWED;
970 	}
971 
972 	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
973 		flags);
974 
975 	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
976 #if DEBUG_SEM_LAST_ACQUIRER
977 	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
978 	sSems[slot].u.used.last_release_count = count;
979 #endif
980 
981 	if (flags & B_RELEASE_ALL) {
982 		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
983 
984 		// is there anything to do for us at all?
985 		if (count == 0)
986 			return B_OK;
987 
988 		// Don't release more than necessary -- there might be interrupted/
989 		// timed out threads in the queue.
990 		flags |= B_RELEASE_IF_WAITING_ONLY;
991 	}
992 
993 	SpinLocker threadLocker(gThreadSpinlock);
994 
995 	while (count > 0) {
996 		queued_thread* entry = sSems[slot].queue.Head();
997 		if (entry == NULL) {
998 			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
999 				sSems[slot].u.used.count += count;
1000 				sSems[slot].u.used.net_count += count;
1001 			}
1002 			break;
1003 		}
1004 
1005 		if (thread_is_blocked(entry->thread)) {
1006 			// The thread is still waiting. If its count is satisfied,
1007 			// unblock it. Otherwise we can't unblock any other thread.
1008 			if (entry->count > sSems[slot].u.used.net_count + count) {
1009 				sSems[slot].u.used.count += count;
1010 				sSems[slot].u.used.net_count += count;
1011 				break;
1012 			}
1013 
1014 			thread_unblock_locked(entry->thread, B_OK);
1015 
1016 			int delta = min_c(count, entry->count);
1017 			sSems[slot].u.used.count += delta;
1018 			sSems[slot].u.used.net_count += delta - entry->count;
1019 			count -= delta;
1020 		} else {
1021 			// The thread is no longer waiting, but still queued, which
1022 			// means acquiration failed and we can just remove it.
1023 			sSems[slot].u.used.count += entry->count;
1024 		}
1025 
1026 		sSems[slot].queue.Remove(entry);
1027 		entry->queued = false;
1028 	}
1029 
1030 	threadLocker.Unlock();
1031 
1032 	if (sSems[slot].u.used.count > 0)
1033 		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1034 
1035 	// If we've unblocked another thread reschedule, if we've not explicitly
1036 	// been told not to.
1037 	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
1038 		semLocker.Unlock();
1039 		threadLocker.Lock();
1040 		scheduler_reschedule_if_necessary_locked();
1041 	}
1042 
1043 	return B_OK;
1044 }
1045 
1046 
1047 status_t
1048 get_sem_count(sem_id id, int32 *_count)
1049 {
1050 	int slot;
1051 	int state;
1052 
1053 	if (sSemsActive == false)
1054 		return B_NO_MORE_SEMS;
1055 	if (id < 0)
1056 		return B_BAD_SEM_ID;
1057 	if (_count == NULL)
1058 		return B_BAD_VALUE;
1059 
1060 	slot = id % sMaxSems;
1061 
1062 	state = disable_interrupts();
1063 	GRAB_SEM_LOCK(sSems[slot]);
1064 
1065 	if (sSems[slot].id != id) {
1066 		RELEASE_SEM_LOCK(sSems[slot]);
1067 		restore_interrupts(state);
1068 		TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1069 		return B_BAD_SEM_ID;
1070 	}
1071 
1072 	*_count = sSems[slot].u.used.count;
1073 
1074 	RELEASE_SEM_LOCK(sSems[slot]);
1075 	restore_interrupts(state);
1076 
1077 	return B_OK;
1078 }
1079 
1080 
1081 /*!	Called by the get_sem_info() macro. */
1082 status_t
1083 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1084 {
1085 	status_t status = B_OK;
1086 	int state;
1087 	int slot;
1088 
1089 	if (!sSemsActive)
1090 		return B_NO_MORE_SEMS;
1091 	if (id < 0)
1092 		return B_BAD_SEM_ID;
1093 	if (info == NULL || size != sizeof(sem_info))
1094 		return B_BAD_VALUE;
1095 
1096 	slot = id % sMaxSems;
1097 
1098 	state = disable_interrupts();
1099 	GRAB_SEM_LOCK(sSems[slot]);
1100 
1101 	if (sSems[slot].id != id) {
1102 		status = B_BAD_SEM_ID;
1103 		TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1104 	} else
1105 		fill_sem_info(&sSems[slot], info, size);
1106 
1107 	RELEASE_SEM_LOCK(sSems[slot]);
1108 	restore_interrupts(state);
1109 
1110 	return status;
1111 }
1112 
1113 
1114 /*!	Called by the get_next_sem_info() macro. */
1115 status_t
1116 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
1117 	size_t size)
1118 {
1119 	if (!sSemsActive)
1120 		return B_NO_MORE_SEMS;
1121 	if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1122 		return B_BAD_VALUE;
1123 	if (teamID < 0)
1124 		return B_BAD_TEAM_ID;
1125 
1126 	InterruptsSpinLocker locker(gTeamSpinlock);
1127 
1128 	struct team* team;
1129 	if (teamID == B_CURRENT_TEAM)
1130 		team = thread_get_current_thread()->team;
1131 	else
1132 		team = team_get_team_struct_locked(teamID);
1133 
1134 	if (team == NULL)
1135 		return B_BAD_TEAM_ID;
1136 
1137 	// TODO: find a way to iterate the list that is more reliable
1138 	sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
1139 	int32 newIndex = *_cookie;
1140 	int32 index = 0;
1141 	bool found = false;
1142 
1143 	while (!found) {
1144 		// find the next entry to be returned
1145 		while (sem != NULL && index < newIndex) {
1146 			sem = (sem_entry*)list_get_next_item(&team->sem_list, sem);
1147 			index++;
1148 		}
1149 
1150 		if (sem == NULL)
1151 			return B_BAD_VALUE;
1152 
1153 		GRAB_SEM_LOCK(*sem);
1154 
1155 		if (sem->id != -1 && sem->u.used.owner == team) {
1156 			// found one!
1157 			fill_sem_info(sem, info, size);
1158 			newIndex = index + 1;
1159 			found = true;
1160 		} else
1161 			newIndex++;
1162 
1163 		RELEASE_SEM_LOCK(*sem);
1164 	}
1165 
1166 	if (!found)
1167 		return B_BAD_VALUE;
1168 
1169 	*_cookie = newIndex;
1170 	return B_OK;
1171 }
1172 
1173 
1174 status_t
1175 set_sem_owner(sem_id id, team_id newTeamID)
1176 {
1177 	if (sSemsActive == false)
1178 		return B_NO_MORE_SEMS;
1179 	if (id < 0)
1180 		return B_BAD_SEM_ID;
1181 	if (newTeamID < 0)
1182 		return B_BAD_TEAM_ID;
1183 
1184 	int32 slot = id % sMaxSems;
1185 
1186 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1187 
1188 	struct team* newTeam = team_get_team_struct_locked(newTeamID);
1189 	if (newTeam == NULL)
1190 		return B_BAD_TEAM_ID;
1191 
1192 	SpinLocker semLocker(sSems[slot].lock);
1193 
1194 	if (sSems[slot].id != id) {
1195 		TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1196 		return B_BAD_SEM_ID;
1197 	}
1198 
1199 	list_remove_link(&sSems[slot].u.used.team_link);
1200 	list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
1201 
1202 	sSems[slot].u.used.owner = newTeam;
1203 	return B_OK;
1204 }
1205 
1206 
1207 /*!	Returns the name of the semaphore. The name is not copied, so the caller
1208 	must make sure that the semaphore remains alive as long as the name is used.
1209 */
1210 const char*
1211 sem_get_name_unsafe(sem_id id)
1212 {
1213 	int slot = id % sMaxSems;
1214 
1215 	if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1216 		return NULL;
1217 
1218 	return sSems[slot].u.used.name;
1219 }
1220 
1221 
1222 //	#pragma mark - Syscalls
1223 
1224 
1225 sem_id
1226 _user_create_sem(int32 count, const char *userName)
1227 {
1228 	char name[B_OS_NAME_LENGTH];
1229 
1230 	if (userName == NULL)
1231 		return create_sem_etc(count, NULL, team_get_current_team_id());
1232 
1233 	if (!IS_USER_ADDRESS(userName)
1234 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1235 		return B_BAD_ADDRESS;
1236 
1237 	return create_sem_etc(count, name, team_get_current_team_id());
1238 }
1239 
1240 
1241 status_t
1242 _user_delete_sem(sem_id id)
1243 {
1244 	return delete_sem_internal(id, true);
1245 }
1246 
1247 
1248 status_t
1249 _user_acquire_sem(sem_id id)
1250 {
1251 	status_t error = switch_sem_etc(-1, id, 1,
1252 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1253 
1254 	return syscall_restart_handle_post(error);
1255 }
1256 
1257 
1258 status_t
1259 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1260 {
1261 	syscall_restart_handle_timeout_pre(flags, timeout);
1262 
1263 	status_t error = switch_sem_etc(-1, id, count,
1264 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1265 
1266 	return syscall_restart_handle_timeout_post(error, timeout);
1267 }
1268 
1269 
1270 status_t
1271 _user_switch_sem(sem_id releaseSem, sem_id id)
1272 {
1273 	status_t error = switch_sem_etc(releaseSem, id, 1,
1274 		B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1275 
1276 	if (releaseSem < 0)
1277 		return syscall_restart_handle_post(error);
1278 
1279 	return error;
1280 }
1281 
1282 
1283 status_t
1284 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1285 	bigtime_t timeout)
1286 {
1287 	if (releaseSem < 0)
1288 		syscall_restart_handle_timeout_pre(flags, timeout);
1289 
1290 	status_t error = switch_sem_etc(releaseSem, id, count,
1291 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1292 
1293 	if (releaseSem < 0)
1294 		return syscall_restart_handle_timeout_post(error, timeout);
1295 
1296 	return error;
1297 }
1298 
1299 
1300 status_t
1301 _user_release_sem(sem_id id)
1302 {
1303 	return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1304 }
1305 
1306 
1307 status_t
1308 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1309 {
1310 	return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1311 }
1312 
1313 
1314 status_t
1315 _user_get_sem_count(sem_id id, int32 *userCount)
1316 {
1317 	status_t status;
1318 	int32 count;
1319 
1320 	if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1321 		return B_BAD_ADDRESS;
1322 
1323 	status = get_sem_count(id, &count);
1324 	if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1325 		return B_BAD_ADDRESS;
1326 
1327 	return status;
1328 }
1329 
1330 
1331 status_t
1332 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1333 {
1334 	struct sem_info info;
1335 	status_t status;
1336 
1337 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1338 		return B_BAD_ADDRESS;
1339 
1340 	status = _get_sem_info(id, &info, size);
1341 	if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1342 		return B_BAD_ADDRESS;
1343 
1344 	return status;
1345 }
1346 
1347 
1348 status_t
1349 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1350 	size_t size)
1351 {
1352 	struct sem_info info;
1353 	int32 cookie;
1354 	status_t status;
1355 
1356 	if (userCookie == NULL || userInfo == NULL
1357 		|| !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1358 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1359 		return B_BAD_ADDRESS;
1360 
1361 	status = _get_next_sem_info(team, &cookie, &info, size);
1362 
1363 	if (status == B_OK) {
1364 		if (user_memcpy(userInfo, &info, size) < B_OK
1365 			|| user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1366 			return B_BAD_ADDRESS;
1367 	}
1368 
1369 	return status;
1370 }
1371 
1372 
1373 status_t
1374 _user_set_sem_owner(sem_id id, team_id team)
1375 {
1376 	return set_sem_owner(id, team);
1377 }
1378