xref: /haiku/src/system/kernel/thread.cpp (revision 45bd7bb3db9d9e4dcb02b89a3e7c2bf382c0a88c)
1 /*
2  * Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/resource.h>
21 
22 #include <OS.h>
23 
24 #include <util/AutoLock.h>
25 #include <util/khash.h>
26 
27 #include <arch/debug.h>
28 #include <boot/kernel_args.h>
29 #include <condition_variable.h>
30 #include <cpu.h>
31 #include <int.h>
32 #include <kimage.h>
33 #include <kscheduler.h>
34 #include <ksignal.h>
35 #include <Notifications.h>
36 #include <real_time_clock.h>
37 #include <smp.h>
38 #include <syscalls.h>
39 #include <syscall_restart.h>
40 #include <team.h>
41 #include <tls.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <vfs.h>
45 #include <vm/vm.h>
46 #include <vm/VMAddressSpace.h>
47 #include <wait_for_objects.h>
48 
49 
50 //#define TRACE_THREAD
51 #ifdef TRACE_THREAD
52 #	define TRACE(x) dprintf x
53 #else
54 #	define TRACE(x) ;
55 #endif
56 
57 
58 #define THREAD_MAX_MESSAGE_SIZE		65536
59 
60 
61 struct thread_key {
62 	thread_id id;
63 };
64 
65 // global
66 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
67 
68 // thread list
69 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
70 static hash_table *sThreadHash = NULL;
71 static thread_id sNextThreadID = 1;
72 
73 // some arbitrary chosen limits - should probably depend on the available
74 // memory (the limit is not yet enforced)
75 static int32 sMaxThreads = 4096;
76 static int32 sUsedThreads = 0;
77 
78 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
79 	struct thread*	thread;
80 	team_id			teamID;
81 
82 	UndertakerEntry(struct thread* thread, team_id teamID)
83 		:
84 		thread(thread),
85 		teamID(teamID)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!	Inserts a thread into a team.
127 	You must hold the team lock when you call this function.
128 */
129 static void
130 insert_thread_into_team(struct team *team, struct thread *thread)
131 {
132 	thread->team_next = team->thread_list;
133 	team->thread_list = thread;
134 	team->num_threads++;
135 
136 	if (team->num_threads == 1) {
137 		// this was the first thread
138 		team->main_thread = thread;
139 	}
140 	thread->team = team;
141 }
142 
143 
144 /*!	Removes a thread from a team.
145 	You must hold the team lock when you call this function.
146 */
147 static void
148 remove_thread_from_team(struct team *team, struct thread *thread)
149 {
150 	struct thread *temp, *last = NULL;
151 
152 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
153 		if (temp == thread) {
154 			if (last == NULL)
155 				team->thread_list = temp->team_next;
156 			else
157 				last->team_next = temp->team_next;
158 
159 			team->num_threads--;
160 			break;
161 		}
162 		last = temp;
163 	}
164 }
165 
166 
167 static int
168 thread_struct_compare(void *_t, const void *_key)
169 {
170 	struct thread *thread = (struct thread*)_t;
171 	const struct thread_key *key = (const struct thread_key*)_key;
172 
173 	if (thread->id == key->id)
174 		return 0;
175 
176 	return 1;
177 }
178 
179 
180 static uint32
181 thread_struct_hash(void *_t, const void *_key, uint32 range)
182 {
183 	struct thread *thread = (struct thread*)_t;
184 	const struct thread_key *key = (const struct thread_key*)_key;
185 
186 	if (thread != NULL)
187 		return thread->id % range;
188 
189 	return (uint32)key->id % range;
190 }
191 
192 
193 static void
194 reset_signals(struct thread *thread)
195 {
196 	thread->sig_pending = 0;
197 	thread->sig_block_mask = 0;
198 	thread->sig_temp_enabled = 0;
199 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
200 	thread->signal_stack_base = 0;
201 	thread->signal_stack_size = 0;
202 	thread->signal_stack_enabled = false;
203 }
204 
205 
206 /*!	Allocates and fills in thread structure (or reuses one from the
207 	dead queue).
208 
209 	\param threadID The ID to be assigned to the new thread. If
210 		  \code < 0 \endcode a fresh one is allocated.
211 	\param thread initialize this thread struct if nonnull
212 */
213 
214 static struct thread *
215 create_thread_struct(struct thread *inthread, const char *name,
216 	thread_id threadID, struct cpu_ent *cpu)
217 {
218 	struct thread *thread;
219 	cpu_status state;
220 	char temp[64];
221 	bool recycled = false;
222 
223 	if (inthread == NULL) {
224 		// try to recycle one from the dead queue first
225 		state = disable_interrupts();
226 		GRAB_THREAD_LOCK();
227 		thread = thread_dequeue(&dead_q);
228 		RELEASE_THREAD_LOCK();
229 		restore_interrupts(state);
230 
231 		// if not, create a new one
232 		if (thread == NULL) {
233 			thread = (struct thread *)malloc(sizeof(struct thread));
234 			if (thread == NULL)
235 				return NULL;
236 		} else {
237 			recycled = true;
238 		}
239 	} else {
240 		thread = inthread;
241 	}
242 
243 	if (!recycled)
244 		scheduler_on_thread_create(thread);
245 
246 	if (name != NULL)
247 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
248 	else
249 		strcpy(thread->name, "unnamed thread");
250 
251 	thread->flags = 0;
252 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
253 	thread->team = NULL;
254 	thread->cpu = cpu;
255 	thread->previous_cpu = NULL;
256 	thread->pinned_to_cpu = 0;
257 	thread->fault_handler = 0;
258 	thread->page_faults_allowed = 1;
259 	thread->kernel_stack_area = -1;
260 	thread->kernel_stack_base = 0;
261 	thread->user_stack_area = -1;
262 	thread->user_stack_base = 0;
263 	thread->user_local_storage = 0;
264 	thread->kernel_errno = 0;
265 	thread->team_next = NULL;
266 	thread->queue_next = NULL;
267 	thread->priority = thread->next_priority = -1;
268 	thread->io_priority = -1;
269 	thread->args1 = NULL;  thread->args2 = NULL;
270 	thread->alarm.period = 0;
271 	reset_signals(thread);
272 	thread->in_kernel = true;
273 	thread->was_yielded = false;
274 	thread->user_time = 0;
275 	thread->kernel_time = 0;
276 	thread->last_time = 0;
277 	thread->exit.status = 0;
278 	thread->exit.reason = 0;
279 	thread->exit.signal = 0;
280 	list_init(&thread->exit.waiters);
281 	thread->select_infos = NULL;
282 	thread->post_interrupt_callback = NULL;
283 	thread->post_interrupt_data = NULL;
284 	thread->user_thread = NULL;
285 
286 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
287 	thread->exit.sem = create_sem(0, temp);
288 	if (thread->exit.sem < B_OK)
289 		goto err1;
290 
291 	sprintf(temp, "%s send", thread->name);
292 	thread->msg.write_sem = create_sem(1, temp);
293 	if (thread->msg.write_sem < B_OK)
294 		goto err2;
295 
296 	sprintf(temp, "%s receive", thread->name);
297 	thread->msg.read_sem = create_sem(0, temp);
298 	if (thread->msg.read_sem < B_OK)
299 		goto err3;
300 
301 	if (arch_thread_init_thread_struct(thread) < B_OK)
302 		goto err4;
303 
304 	return thread;
305 
306 err4:
307 	delete_sem(thread->msg.read_sem);
308 err3:
309 	delete_sem(thread->msg.write_sem);
310 err2:
311 	delete_sem(thread->exit.sem);
312 err1:
313 	// ToDo: put them in the dead queue instead?
314 	if (inthread == NULL) {
315 		scheduler_on_thread_destroy(thread);
316 		free(thread);
317 	}
318 
319 	return NULL;
320 }
321 
322 
323 static void
324 delete_thread_struct(struct thread *thread)
325 {
326 	delete_sem(thread->exit.sem);
327 	delete_sem(thread->msg.write_sem);
328 	delete_sem(thread->msg.read_sem);
329 
330 	scheduler_on_thread_destroy(thread);
331 
332 	// ToDo: put them in the dead queue instead?
333 	free(thread);
334 }
335 
336 
337 /*! This function gets run by a new thread before anything else */
338 static void
339 thread_kthread_entry(void)
340 {
341 	struct thread *thread = thread_get_current_thread();
342 
343 	// The thread is new and has been scheduled the first time. Notify the user
344 	// debugger code.
345 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
346 		user_debug_thread_scheduled(thread);
347 
348 	// simulates the thread spinlock release that would occur if the thread had been
349 	// rescheded from. The resched didn't happen because the thread is new.
350 	RELEASE_THREAD_LOCK();
351 
352 	// start tracking time
353 	thread->last_time = system_time();
354 
355 	enable_interrupts(); // this essentially simulates a return-from-interrupt
356 }
357 
358 
359 static void
360 thread_kthread_exit(void)
361 {
362 	struct thread *thread = thread_get_current_thread();
363 
364 	thread->exit.reason = THREAD_RETURN_EXIT;
365 	thread_exit();
366 }
367 
368 
369 /*!	Initializes the thread and jumps to its userspace entry point.
370 	This function is called at creation time of every user thread,
371 	but not for a team's main thread.
372 */
373 static int
374 _create_user_thread_kentry(void)
375 {
376 	struct thread *thread = thread_get_current_thread();
377 
378 	// jump to the entry point in user space
379 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
380 		thread->args1, thread->args2);
381 
382 	// only get here if the above call fails
383 	return 0;
384 }
385 
386 
387 /*! Initializes the thread and calls it kernel space entry point. */
388 static int
389 _create_kernel_thread_kentry(void)
390 {
391 	struct thread *thread = thread_get_current_thread();
392 	int (*func)(void *args) = (int (*)(void *))thread->entry;
393 
394 	// call the entry function with the appropriate args
395 	return func(thread->args1);
396 }
397 
398 
399 /*!	Creates a new thread in the team with the specified team ID.
400 
401 	\param threadID The ID to be assigned to the new thread. If
402 		  \code < 0 \endcode a fresh one is allocated.
403 */
404 static thread_id
405 create_thread(thread_creation_attributes& attributes, bool kernel)
406 {
407 	struct thread *thread, *currentThread;
408 	struct team *team;
409 	cpu_status state;
410 	char stack_name[B_OS_NAME_LENGTH];
411 	status_t status;
412 	bool abort = false;
413 	bool debugNewThread = false;
414 
415 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
416 		attributes.thread, kernel ? "kernel" : "user"));
417 
418 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
419 		NULL);
420 	if (thread == NULL)
421 		return B_NO_MEMORY;
422 
423 	thread->priority = attributes.priority == -1
424 		? B_NORMAL_PRIORITY : attributes.priority;
425 	thread->next_priority = thread->priority;
426 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
427 	thread->state = B_THREAD_SUSPENDED;
428 	thread->next_state = B_THREAD_SUSPENDED;
429 
430 	// init debug structure
431 	init_thread_debug_info(&thread->debug_info);
432 
433 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
434 		thread->id);
435 	thread->kernel_stack_area = create_area(stack_name,
436 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
437 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
438 		B_FULL_LOCK,
439 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
440 
441 	if (thread->kernel_stack_area < 0) {
442 		// we're not yet part of a team, so we can just bail out
443 		status = thread->kernel_stack_area;
444 
445 		dprintf("create_thread: error creating kernel stack: %s!\n",
446 			strerror(status));
447 
448 		delete_thread_struct(thread);
449 		return status;
450 	}
451 
452 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
453 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
454 
455 	state = disable_interrupts();
456 	GRAB_THREAD_LOCK();
457 
458 	// If the new thread belongs to the same team as the current thread,
459 	// it may inherit some of the thread debug flags.
460 	currentThread = thread_get_current_thread();
461 	if (currentThread && currentThread->team->id == attributes.team) {
462 		// inherit all user flags...
463 		int32 debugFlags = currentThread->debug_info.flags
464 			& B_THREAD_DEBUG_USER_FLAG_MASK;
465 
466 		// ... save the syscall tracing flags, unless explicitely specified
467 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
468 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
469 				| B_THREAD_DEBUG_POST_SYSCALL);
470 		}
471 
472 		thread->debug_info.flags = debugFlags;
473 
474 		// stop the new thread, if desired
475 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
476 	}
477 
478 	// insert into global list
479 	hash_insert(sThreadHash, thread);
480 	sUsedThreads++;
481 	scheduler_on_thread_init(thread);
482 	RELEASE_THREAD_LOCK();
483 
484 	GRAB_TEAM_LOCK();
485 	// look at the team, make sure it's not being deleted
486 	team = team_get_team_struct_locked(attributes.team);
487 
488 	if (team == NULL || team->state == TEAM_STATE_DEATH
489 		|| team->death_entry != NULL) {
490 		abort = true;
491 	}
492 
493 	if (!abort && !kernel) {
494 		thread->user_thread = team_allocate_user_thread(team);
495 		abort = thread->user_thread == NULL;
496 	}
497 
498 	if (!abort) {
499 		// Debug the new thread, if the parent thread required that (see above),
500 		// or the respective global team debug flag is set. But only, if a
501 		// debugger is installed for the team.
502 		debugNewThread |= (atomic_get(&team->debug_info.flags)
503 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
504 		if (debugNewThread
505 			&& (atomic_get(&team->debug_info.flags)
506 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
507 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
508 		}
509 
510 		insert_thread_into_team(team, thread);
511 	}
512 
513 	RELEASE_TEAM_LOCK();
514 	if (abort) {
515 		GRAB_THREAD_LOCK();
516 		hash_remove(sThreadHash, thread);
517 		RELEASE_THREAD_LOCK();
518 	}
519 	restore_interrupts(state);
520 	if (abort) {
521 		delete_area(thread->kernel_stack_area);
522 		delete_thread_struct(thread);
523 		return B_BAD_TEAM_ID;
524 	}
525 
526 	thread->args1 = attributes.args1;
527 	thread->args2 = attributes.args2;
528 	thread->entry = attributes.entry;
529 	status = thread->id;
530 
531 	// notify listeners
532 	sNotificationService.Notify(THREAD_ADDED, thread);
533 
534 	if (kernel) {
535 		// this sets up an initial kthread stack that runs the entry
536 
537 		// Note: whatever function wants to set up a user stack later for this
538 		// thread must initialize the TLS for it
539 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
540 			&thread_kthread_entry, &thread_kthread_exit);
541 	} else {
542 		// create user stack
543 
544 		// the stack will be between USER_STACK_REGION and the main thread stack
545 		// area (the user stack of the main thread is created in
546 		// team_create_team())
547 		if (attributes.stack_address == NULL) {
548 			thread->user_stack_base = USER_STACK_REGION;
549 			if (attributes.stack_size <= 0)
550 				thread->user_stack_size = USER_STACK_SIZE;
551 			else
552 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
553 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
554 
555 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
556 				attributes.name, thread->id);
557 			virtual_address_restrictions virtualRestrictions = {};
558 			virtualRestrictions.address = (void*)thread->user_stack_base;
559 			virtualRestrictions.address_specification = B_BASE_ADDRESS;
560 			physical_address_restrictions physicalRestrictions = {};
561 			thread->user_stack_area = create_area_etc(team->id, stack_name,
562 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
563 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
564 				&virtualRestrictions, &physicalRestrictions,
565 				(void**)&thread->user_stack_base);
566 			if (thread->user_stack_area < B_OK
567 				|| arch_thread_init_tls(thread) < B_OK) {
568 				// great, we have a fully running thread without a (usable)
569 				// stack
570 				dprintf("create_thread: unable to create proper user stack!\n");
571 				status = thread->user_stack_area;
572 				kill_thread(thread->id);
573 			}
574 		} else {
575 			thread->user_stack_base = (addr_t)attributes.stack_address;
576 			thread->user_stack_size = attributes.stack_size;
577 		}
578 
579 		user_debug_update_new_thread_flags(thread->id);
580 
581 		// copy the user entry over to the args field in the thread struct
582 		// the function this will call will immediately switch the thread into
583 		// user space.
584 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
585 			&thread_kthread_entry, &thread_kthread_exit);
586 	}
587 
588 	return status;
589 }
590 
591 
592 static status_t
593 undertaker(void* /*args*/)
594 {
595 	while (true) {
596 		// wait for a thread to bury
597 		InterruptsSpinLocker locker(gThreadSpinlock);
598 
599 		while (sUndertakerEntries.IsEmpty()) {
600 			ConditionVariableEntry conditionEntry;
601 			sUndertakerCondition.Add(&conditionEntry);
602 			locker.Unlock();
603 
604 			conditionEntry.Wait();
605 
606 			locker.Lock();
607 		}
608 
609 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
610 		locker.Unlock();
611 
612 		UndertakerEntry entry = *_entry;
613 			// we need a copy, since the original entry is on the thread's stack
614 
615 		// we've got an entry
616 		struct thread* thread = entry.thread;
617 
618 		// delete the old kernel stack area
619 		delete_area(thread->kernel_stack_area);
620 
621 		// remove this thread from all of the global lists
622 		disable_interrupts();
623 		GRAB_TEAM_LOCK();
624 
625 		remove_thread_from_team(team_get_kernel_team(), thread);
626 
627 		RELEASE_TEAM_LOCK();
628 		enable_interrupts();
629 			// needed for the debugger notification below
630 
631 		// free the thread structure
632 		locker.Lock();
633 		thread_enqueue(thread, &dead_q);
634 			// TODO: Use the slab allocator!
635 	}
636 
637 	// never can get here
638 	return B_OK;
639 }
640 
641 
642 static sem_id
643 get_thread_wait_sem(struct thread* thread)
644 {
645 	if (thread->state == B_THREAD_WAITING
646 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
647 		return (sem_id)(addr_t)thread->wait.object;
648 	}
649 	return -1;
650 }
651 
652 
653 /*!	Fills the thread_info structure with information from the specified
654 	thread.
655 	The thread lock must be held when called.
656 */
657 static void
658 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
659 {
660 	info->thread = thread->id;
661 	info->team = thread->team->id;
662 
663 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
664 
665 	if (thread->state == B_THREAD_WAITING) {
666 		info->state = B_THREAD_WAITING;
667 
668 		switch (thread->wait.type) {
669 			case THREAD_BLOCK_TYPE_SNOOZE:
670 				info->state = B_THREAD_ASLEEP;
671 				break;
672 
673 			case THREAD_BLOCK_TYPE_SEMAPHORE:
674 			{
675 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
676 				if (sem == thread->msg.read_sem)
677 					info->state = B_THREAD_RECEIVING;
678 				break;
679 			}
680 
681 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
682 			default:
683 				break;
684 		}
685 	} else
686 		info->state = (thread_state)thread->state;
687 
688 	info->priority = thread->priority;
689 	info->user_time = thread->user_time;
690 	info->kernel_time = thread->kernel_time;
691 	info->stack_base = (void *)thread->user_stack_base;
692 	info->stack_end = (void *)(thread->user_stack_base
693 		+ thread->user_stack_size);
694 	info->sem = get_thread_wait_sem(thread);
695 }
696 
697 
698 static status_t
699 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
700 	int32 flags)
701 {
702 	struct thread *target;
703 	sem_id cachedSem;
704 	cpu_status state;
705 	status_t status;
706 
707 	state = disable_interrupts();
708 	GRAB_THREAD_LOCK();
709 	target = thread_get_thread_struct_locked(id);
710 	if (!target) {
711 		RELEASE_THREAD_LOCK();
712 		restore_interrupts(state);
713 		return B_BAD_THREAD_ID;
714 	}
715 	cachedSem = target->msg.write_sem;
716 	RELEASE_THREAD_LOCK();
717 	restore_interrupts(state);
718 
719 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
720 		return B_NO_MEMORY;
721 
722 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
723 	if (status == B_INTERRUPTED) {
724 		// We got interrupted by a signal
725 		return status;
726 	}
727 	if (status != B_OK) {
728 		// Any other acquisition problems may be due to thread deletion
729 		return B_BAD_THREAD_ID;
730 	}
731 
732 	void* data;
733 	if (bufferSize > 0) {
734 		data = malloc(bufferSize);
735 		if (data == NULL)
736 			return B_NO_MEMORY;
737 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
738 			free(data);
739 			return B_BAD_DATA;
740 		}
741 	} else
742 		data = NULL;
743 
744 	state = disable_interrupts();
745 	GRAB_THREAD_LOCK();
746 
747 	// The target thread could have been deleted at this point
748 	target = thread_get_thread_struct_locked(id);
749 	if (target == NULL) {
750 		RELEASE_THREAD_LOCK();
751 		restore_interrupts(state);
752 		free(data);
753 		return B_BAD_THREAD_ID;
754 	}
755 
756 	// Save message informations
757 	target->msg.sender = thread_get_current_thread()->id;
758 	target->msg.code = code;
759 	target->msg.size = bufferSize;
760 	target->msg.buffer = data;
761 	cachedSem = target->msg.read_sem;
762 
763 	RELEASE_THREAD_LOCK();
764 	restore_interrupts(state);
765 
766 	release_sem(cachedSem);
767 	return B_OK;
768 }
769 
770 
771 static int32
772 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
773 	int32 flags)
774 {
775 	struct thread *thread = thread_get_current_thread();
776 	status_t status;
777 	size_t size;
778 	int32 code;
779 
780 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
781 	if (status < B_OK) {
782 		// Actually, we're not supposed to return error codes
783 		// but since the only reason this can fail is that we
784 		// were killed, it's probably okay to do so (but also
785 		// meaningless).
786 		return status;
787 	}
788 
789 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
790 		size = min_c(bufferSize, thread->msg.size);
791 		status = user_memcpy(buffer, thread->msg.buffer, size);
792 		if (status != B_OK) {
793 			free(thread->msg.buffer);
794 			release_sem(thread->msg.write_sem);
795 			return status;
796 		}
797 	}
798 
799 	*_sender = thread->msg.sender;
800 	code = thread->msg.code;
801 
802 	free(thread->msg.buffer);
803 	release_sem(thread->msg.write_sem);
804 
805 	return code;
806 }
807 
808 
809 static status_t
810 common_getrlimit(int resource, struct rlimit * rlp)
811 {
812 	if (!rlp)
813 		return B_BAD_ADDRESS;
814 
815 	switch (resource) {
816 		case RLIMIT_NOFILE:
817 		case RLIMIT_NOVMON:
818 			return vfs_getrlimit(resource, rlp);
819 
820 		case RLIMIT_CORE:
821 			rlp->rlim_cur = 0;
822 			rlp->rlim_max = 0;
823 			return B_OK;
824 
825 		case RLIMIT_STACK:
826 		{
827 			struct thread *thread = thread_get_current_thread();
828 			if (!thread)
829 				return B_ERROR;
830 			rlp->rlim_cur = thread->user_stack_size;
831 			rlp->rlim_max = thread->user_stack_size;
832 			return B_OK;
833 		}
834 
835 		default:
836 			return EINVAL;
837 	}
838 
839 	return B_OK;
840 }
841 
842 
843 static status_t
844 common_setrlimit(int resource, const struct rlimit * rlp)
845 {
846 	if (!rlp)
847 		return B_BAD_ADDRESS;
848 
849 	switch (resource) {
850 		case RLIMIT_NOFILE:
851 		case RLIMIT_NOVMON:
852 			return vfs_setrlimit(resource, rlp);
853 
854 		case RLIMIT_CORE:
855 			// We don't support core file, so allow settings to 0/0 only.
856 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
857 				return EINVAL;
858 			return B_OK;
859 
860 		default:
861 			return EINVAL;
862 	}
863 
864 	return B_OK;
865 }
866 
867 
868 //	#pragma mark - debugger calls
869 
870 
871 static int
872 make_thread_unreal(int argc, char **argv)
873 {
874 	struct thread *thread;
875 	struct hash_iterator i;
876 	int32 id = -1;
877 
878 	if (argc > 2) {
879 		print_debugger_command_usage(argv[0]);
880 		return 0;
881 	}
882 
883 	if (argc > 1)
884 		id = strtoul(argv[1], NULL, 0);
885 
886 	hash_open(sThreadHash, &i);
887 
888 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
889 		if (id != -1 && thread->id != id)
890 			continue;
891 
892 		if (thread->priority > B_DISPLAY_PRIORITY) {
893 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
894 			kprintf("thread %ld made unreal\n", thread->id);
895 		}
896 	}
897 
898 	hash_close(sThreadHash, &i, false);
899 	return 0;
900 }
901 
902 
903 static int
904 set_thread_prio(int argc, char **argv)
905 {
906 	struct thread *thread;
907 	struct hash_iterator i;
908 	int32 id;
909 	int32 prio;
910 
911 	if (argc > 3 || argc < 2) {
912 		print_debugger_command_usage(argv[0]);
913 		return 0;
914 	}
915 
916 	prio = strtoul(argv[1], NULL, 0);
917 	if (prio > THREAD_MAX_SET_PRIORITY)
918 		prio = THREAD_MAX_SET_PRIORITY;
919 	if (prio < THREAD_MIN_SET_PRIORITY)
920 		prio = THREAD_MIN_SET_PRIORITY;
921 
922 	if (argc > 2)
923 		id = strtoul(argv[2], NULL, 0);
924 	else
925 		id = thread_get_current_thread()->id;
926 
927 	hash_open(sThreadHash, &i);
928 
929 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
930 		if (thread->id != id)
931 			continue;
932 		thread->priority = thread->next_priority = prio;
933 		kprintf("thread %ld set to priority %ld\n", id, prio);
934 		break;
935 	}
936 	if (!thread)
937 		kprintf("thread %ld (%#lx) not found\n", id, id);
938 
939 	hash_close(sThreadHash, &i, false);
940 	return 0;
941 }
942 
943 
944 static int
945 make_thread_suspended(int argc, char **argv)
946 {
947 	struct thread *thread;
948 	struct hash_iterator i;
949 	int32 id;
950 
951 	if (argc > 2) {
952 		print_debugger_command_usage(argv[0]);
953 		return 0;
954 	}
955 
956 	if (argc == 1)
957 		id = thread_get_current_thread()->id;
958 	else
959 		id = strtoul(argv[1], NULL, 0);
960 
961 	hash_open(sThreadHash, &i);
962 
963 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
964 		if (thread->id != id)
965 			continue;
966 
967 		thread->next_state = B_THREAD_SUSPENDED;
968 		kprintf("thread %ld suspended\n", id);
969 		break;
970 	}
971 	if (!thread)
972 		kprintf("thread %ld (%#lx) not found\n", id, id);
973 
974 	hash_close(sThreadHash, &i, false);
975 	return 0;
976 }
977 
978 
979 static int
980 make_thread_resumed(int argc, char **argv)
981 {
982 	struct thread *thread;
983 	struct hash_iterator i;
984 	int32 id;
985 
986 	if (argc != 2) {
987 		print_debugger_command_usage(argv[0]);
988 		return 0;
989 	}
990 
991 	// force user to enter a thread id, as using
992 	// the current thread is usually not intended
993 	id = strtoul(argv[1], NULL, 0);
994 
995 	hash_open(sThreadHash, &i);
996 
997 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
998 		if (thread->id != id)
999 			continue;
1000 
1001 		if (thread->state == B_THREAD_SUSPENDED) {
1002 			scheduler_enqueue_in_run_queue(thread);
1003 			kprintf("thread %ld resumed\n", thread->id);
1004 		}
1005 		break;
1006 	}
1007 	if (!thread)
1008 		kprintf("thread %ld (%#lx) not found\n", id, id);
1009 
1010 	hash_close(sThreadHash, &i, false);
1011 	return 0;
1012 }
1013 
1014 
1015 static int
1016 drop_into_debugger(int argc, char **argv)
1017 {
1018 	status_t err;
1019 	int32 id;
1020 
1021 	if (argc > 2) {
1022 		print_debugger_command_usage(argv[0]);
1023 		return 0;
1024 	}
1025 
1026 	if (argc == 1)
1027 		id = thread_get_current_thread()->id;
1028 	else
1029 		id = strtoul(argv[1], NULL, 0);
1030 
1031 	err = _user_debug_thread(id);
1032 	if (err)
1033 		kprintf("drop failed\n");
1034 	else
1035 		kprintf("thread %ld dropped into user debugger\n", id);
1036 
1037 	return 0;
1038 }
1039 
1040 
1041 static const char *
1042 state_to_text(struct thread *thread, int32 state)
1043 {
1044 	switch (state) {
1045 		case B_THREAD_READY:
1046 			return "ready";
1047 
1048 		case B_THREAD_RUNNING:
1049 			return "running";
1050 
1051 		case B_THREAD_WAITING:
1052 		{
1053 			if (thread != NULL) {
1054 				switch (thread->wait.type) {
1055 					case THREAD_BLOCK_TYPE_SNOOZE:
1056 						return "zzz";
1057 
1058 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1059 					{
1060 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1061 						if (sem == thread->msg.read_sem)
1062 							return "receive";
1063 						break;
1064 					}
1065 				}
1066 			}
1067 
1068 			return "waiting";
1069 		}
1070 
1071 		case B_THREAD_SUSPENDED:
1072 			return "suspended";
1073 
1074 		case THREAD_STATE_FREE_ON_RESCHED:
1075 			return "death";
1076 
1077 		default:
1078 			return "UNKNOWN";
1079 	}
1080 }
1081 
1082 
1083 static void
1084 print_thread_list_table_head()
1085 {
1086 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1087 		"  team  name\n");
1088 }
1089 
1090 
1091 static void
1092 _dump_thread_info(struct thread *thread, bool shortInfo)
1093 {
1094 	if (shortInfo) {
1095 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1096 			thread->state));
1097 
1098 		// does it block on a semaphore or a condition variable?
1099 		if (thread->state == B_THREAD_WAITING) {
1100 			switch (thread->wait.type) {
1101 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1102 				{
1103 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1104 					if (sem == thread->msg.read_sem)
1105 						kprintf("                    ");
1106 					else
1107 						kprintf("sem  %12ld   ", sem);
1108 					break;
1109 				}
1110 
1111 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1112 					kprintf("cvar   %p   ", thread->wait.object);
1113 					break;
1114 
1115 				case THREAD_BLOCK_TYPE_SNOOZE:
1116 					kprintf("                    ");
1117 					break;
1118 
1119 				case THREAD_BLOCK_TYPE_SIGNAL:
1120 					kprintf("signal              ");
1121 					break;
1122 
1123 				case THREAD_BLOCK_TYPE_MUTEX:
1124 					kprintf("mutex  %p   ", thread->wait.object);
1125 					break;
1126 
1127 				case THREAD_BLOCK_TYPE_RW_LOCK:
1128 					kprintf("rwlock %p   ", thread->wait.object);
1129 					break;
1130 
1131 				case THREAD_BLOCK_TYPE_OTHER:
1132 					kprintf("other               ");
1133 					break;
1134 
1135 				default:
1136 					kprintf("???    %p   ", thread->wait.object);
1137 					break;
1138 			}
1139 		} else
1140 			kprintf("        -           ");
1141 
1142 		// on which CPU does it run?
1143 		if (thread->cpu)
1144 			kprintf("%2d", thread->cpu->cpu_num);
1145 		else
1146 			kprintf(" -");
1147 
1148 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1149 			(void *)thread->kernel_stack_base, thread->team->id,
1150 			thread->name != NULL ? thread->name : "<NULL>");
1151 
1152 		return;
1153 	}
1154 
1155 	// print the long info
1156 
1157 	struct death_entry *death = NULL;
1158 
1159 	kprintf("THREAD: %p\n", thread);
1160 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1161 	kprintf("name:               \"%s\"\n", thread->name);
1162 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1163 		thread->all_next, thread->team_next, thread->queue_next);
1164 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1165 		thread->next_priority, thread->io_priority);
1166 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1167 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1168 	kprintf("cpu:                %p ", thread->cpu);
1169 	if (thread->cpu)
1170 		kprintf("(%d)\n", thread->cpu->cpu_num);
1171 	else
1172 		kprintf("\n");
1173 	kprintf("sig_pending:        %#" B_PRIx32 " (blocked: %#" B_PRIx32
1174 		", temp enabled: %#" B_PRIx32 ")\n", thread->sig_pending,
1175 		thread->sig_block_mask, thread->sig_temp_enabled);
1176 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1177 
1178 	if (thread->state == B_THREAD_WAITING) {
1179 		kprintf("waiting for:        ");
1180 
1181 		switch (thread->wait.type) {
1182 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1183 			{
1184 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1185 				if (sem == thread->msg.read_sem)
1186 					kprintf("data\n");
1187 				else
1188 					kprintf("semaphore %ld\n", sem);
1189 				break;
1190 			}
1191 
1192 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1193 				kprintf("condition variable %p\n", thread->wait.object);
1194 				break;
1195 
1196 			case THREAD_BLOCK_TYPE_SNOOZE:
1197 				kprintf("snooze()\n");
1198 				break;
1199 
1200 			case THREAD_BLOCK_TYPE_SIGNAL:
1201 				kprintf("signal\n");
1202 				break;
1203 
1204 			case THREAD_BLOCK_TYPE_MUTEX:
1205 				kprintf("mutex %p\n", thread->wait.object);
1206 				break;
1207 
1208 			case THREAD_BLOCK_TYPE_RW_LOCK:
1209 				kprintf("rwlock %p\n", thread->wait.object);
1210 				break;
1211 
1212 			case THREAD_BLOCK_TYPE_OTHER:
1213 				kprintf("other (%s)\n", (char*)thread->wait.object);
1214 				break;
1215 
1216 			default:
1217 				kprintf("unknown (%p)\n", thread->wait.object);
1218 				break;
1219 		}
1220 	}
1221 
1222 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1223 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1224 	kprintf("entry:              %p\n", (void *)thread->entry);
1225 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1226 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1227 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1228 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1229 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1230 	kprintf("  exit.waiters:\n");
1231 	while ((death = (struct death_entry*)list_get_next_item(
1232 			&thread->exit.waiters, death)) != NULL) {
1233 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1234 	}
1235 
1236 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1237 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1238 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1239 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1240 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1241 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1242 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1243 		strerror(thread->kernel_errno));
1244 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1245 	kprintf("user_time:          %Ld\n", thread->user_time);
1246 	kprintf("flags:              0x%lx\n", thread->flags);
1247 	kprintf("architecture dependant section:\n");
1248 	arch_thread_dump_info(&thread->arch_info);
1249 }
1250 
1251 
1252 static int
1253 dump_thread_info(int argc, char **argv)
1254 {
1255 	bool shortInfo = false;
1256 	int argi = 1;
1257 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1258 		shortInfo = true;
1259 		print_thread_list_table_head();
1260 		argi++;
1261 	}
1262 
1263 	if (argi == argc) {
1264 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1265 		return 0;
1266 	}
1267 
1268 	for (; argi < argc; argi++) {
1269 		const char *name = argv[argi];
1270 		int32 id = strtoul(name, NULL, 0);
1271 
1272 		if (IS_KERNEL_ADDRESS(id)) {
1273 			// semi-hack
1274 			_dump_thread_info((struct thread *)id, shortInfo);
1275 			continue;
1276 		}
1277 
1278 		// walk through the thread list, trying to match name or id
1279 		bool found = false;
1280 		struct hash_iterator i;
1281 		hash_open(sThreadHash, &i);
1282 		struct thread *thread;
1283 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1284 			if (!strcmp(name, thread->name) || thread->id == id) {
1285 				_dump_thread_info(thread, shortInfo);
1286 				found = true;
1287 				break;
1288 			}
1289 		}
1290 		hash_close(sThreadHash, &i, false);
1291 
1292 		if (!found)
1293 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 
1300 static int
1301 dump_thread_list(int argc, char **argv)
1302 {
1303 	struct thread *thread;
1304 	struct hash_iterator i;
1305 	bool realTimeOnly = false;
1306 	bool calling = false;
1307 	const char *callSymbol = NULL;
1308 	addr_t callStart = 0;
1309 	addr_t callEnd = 0;
1310 	int32 requiredState = 0;
1311 	team_id team = -1;
1312 	sem_id sem = -1;
1313 
1314 	if (!strcmp(argv[0], "realtime"))
1315 		realTimeOnly = true;
1316 	else if (!strcmp(argv[0], "ready"))
1317 		requiredState = B_THREAD_READY;
1318 	else if (!strcmp(argv[0], "running"))
1319 		requiredState = B_THREAD_RUNNING;
1320 	else if (!strcmp(argv[0], "waiting")) {
1321 		requiredState = B_THREAD_WAITING;
1322 
1323 		if (argc > 1) {
1324 			sem = strtoul(argv[1], NULL, 0);
1325 			if (sem == 0)
1326 				kprintf("ignoring invalid semaphore argument.\n");
1327 		}
1328 	} else if (!strcmp(argv[0], "calling")) {
1329 		if (argc < 2) {
1330 			kprintf("Need to give a symbol name or start and end arguments.\n");
1331 			return 0;
1332 		} else if (argc == 3) {
1333 			callStart = parse_expression(argv[1]);
1334 			callEnd = parse_expression(argv[2]);
1335 		} else
1336 			callSymbol = argv[1];
1337 
1338 		calling = true;
1339 	} else if (argc > 1) {
1340 		team = strtoul(argv[1], NULL, 0);
1341 		if (team == 0)
1342 			kprintf("ignoring invalid team argument.\n");
1343 	}
1344 
1345 	print_thread_list_table_head();
1346 
1347 	hash_open(sThreadHash, &i);
1348 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1349 		// filter out threads not matching the search criteria
1350 		if ((requiredState && thread->state != requiredState)
1351 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1352 					callStart, callEnd))
1353 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1354 			|| (team > 0 && thread->team->id != team)
1355 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1356 			continue;
1357 
1358 		_dump_thread_info(thread, true);
1359 	}
1360 	hash_close(sThreadHash, &i, false);
1361 	return 0;
1362 }
1363 
1364 
1365 //	#pragma mark - private kernel API
1366 
1367 
1368 void
1369 thread_exit(void)
1370 {
1371 	cpu_status state;
1372 	struct thread *thread = thread_get_current_thread();
1373 	struct team *team = thread->team;
1374 	thread_id parentID = -1;
1375 	status_t status;
1376 	struct thread_debug_info debugInfo;
1377 	team_id teamID = team->id;
1378 
1379 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1380 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1381 			? "due to signal" : "normally", thread->exit.status));
1382 
1383 	if (!are_interrupts_enabled())
1384 		panic("thread_exit() called with interrupts disabled!\n");
1385 
1386 	// boost our priority to get this over with
1387 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1388 
1389 	// Cancel previously installed alarm timer, if any
1390 	cancel_timer(&thread->alarm);
1391 
1392 	// delete the user stack area first, we won't need it anymore
1393 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1394 		area_id area = thread->user_stack_area;
1395 		thread->user_stack_area = -1;
1396 		vm_delete_area(team->id, area, true);
1397 	}
1398 
1399 	struct job_control_entry *death = NULL;
1400 	struct death_entry* threadDeathEntry = NULL;
1401 	bool deleteTeam = false;
1402 	port_id debuggerPort = -1;
1403 
1404 	if (team != team_get_kernel_team()) {
1405 		user_debug_thread_exiting(thread);
1406 
1407 		if (team->main_thread == thread) {
1408 			// The main thread is exiting. Shut down the whole team.
1409 			deleteTeam = true;
1410 		} else {
1411 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1412 			team_free_user_thread(thread);
1413 		}
1414 
1415 		// remove this thread from the current team and add it to the kernel
1416 		// put the thread into the kernel team until it dies
1417 		state = disable_interrupts();
1418 		GRAB_TEAM_LOCK();
1419 
1420 		if (deleteTeam)
1421 			debuggerPort = team_shutdown_team(team, state);
1422 
1423 		GRAB_THREAD_LOCK();
1424 			// removing the thread and putting its death entry to the parent
1425 			// team needs to be an atomic operation
1426 
1427 		// remember how long this thread lasted
1428 		team->dead_threads_kernel_time += thread->kernel_time;
1429 		team->dead_threads_user_time += thread->user_time;
1430 
1431 		remove_thread_from_team(team, thread);
1432 		insert_thread_into_team(team_get_kernel_team(), thread);
1433 
1434 		if (team->death_entry != NULL) {
1435 			if (--team->death_entry->remaining_threads == 0)
1436 				team->death_entry->condition.NotifyOne(true, B_OK);
1437 		}
1438 
1439 		if (deleteTeam) {
1440 			struct team *parent = team->parent;
1441 
1442 			// remember who our parent was so we can send a signal
1443 			parentID = parent->id;
1444 
1445 			// Set the team job control state to "dead" and detach the job
1446 			// control entry from our team struct.
1447 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1448 			death = team->job_control_entry;
1449 			team->job_control_entry = NULL;
1450 
1451 			if (death != NULL) {
1452 				death->InitDeadState();
1453 
1454 				// team_set_job_control_state() already moved our entry
1455 				// into the parent's list. We just check the soft limit of
1456 				// death entries.
1457 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1458 					death = parent->dead_children->entries.RemoveHead();
1459 					parent->dead_children->count--;
1460 				} else
1461 					death = NULL;
1462 
1463 				RELEASE_THREAD_LOCK();
1464 			} else
1465 				RELEASE_THREAD_LOCK();
1466 
1467 			team_remove_team(team);
1468 
1469 			send_signal_etc(parentID, SIGCHLD,
1470 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1471 		} else {
1472 			// The thread is not the main thread. We store a thread death
1473 			// entry for it, unless someone is already waiting it.
1474 			if (threadDeathEntry != NULL
1475 				&& list_is_empty(&thread->exit.waiters)) {
1476 				threadDeathEntry->thread = thread->id;
1477 				threadDeathEntry->status = thread->exit.status;
1478 				threadDeathEntry->reason = thread->exit.reason;
1479 				threadDeathEntry->signal = thread->exit.signal;
1480 
1481 				// add entry -- remove and old one, if we hit the limit
1482 				list_add_item(&team->dead_threads, threadDeathEntry);
1483 				team->dead_threads_count++;
1484 				threadDeathEntry = NULL;
1485 
1486 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1487 					threadDeathEntry = (death_entry*)list_remove_head_item(
1488 						&team->dead_threads);
1489 					team->dead_threads_count--;
1490 				}
1491 			}
1492 
1493 			RELEASE_THREAD_LOCK();
1494 		}
1495 
1496 		RELEASE_TEAM_LOCK();
1497 
1498 		// swap address spaces, to make sure we're running on the kernel's pgdir
1499 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1500 		restore_interrupts(state);
1501 
1502 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1503 	}
1504 
1505 	free(threadDeathEntry);
1506 
1507 	// delete the team if we're its main thread
1508 	if (deleteTeam) {
1509 		team_delete_team(team, debuggerPort);
1510 
1511 		// we need to delete any death entry that made it to here
1512 		delete death;
1513 	}
1514 
1515 	state = disable_interrupts();
1516 	GRAB_THREAD_LOCK();
1517 
1518 	// remove thread from hash, so it's no longer accessible
1519 	hash_remove(sThreadHash, thread);
1520 	sUsedThreads--;
1521 
1522 	// Stop debugging for this thread
1523 	debugInfo = thread->debug_info;
1524 	clear_thread_debug_info(&thread->debug_info, true);
1525 
1526 	// Remove the select infos. We notify them a little later.
1527 	select_info* selectInfos = thread->select_infos;
1528 	thread->select_infos = NULL;
1529 
1530 	RELEASE_THREAD_LOCK();
1531 	restore_interrupts(state);
1532 
1533 	destroy_thread_debug_info(&debugInfo);
1534 
1535 	// notify select infos
1536 	select_info* info = selectInfos;
1537 	while (info != NULL) {
1538 		select_sync* sync = info->sync;
1539 
1540 		notify_select_events(info, B_EVENT_INVALID);
1541 		info = info->next;
1542 		put_select_sync(sync);
1543 	}
1544 
1545 	// notify listeners
1546 	sNotificationService.Notify(THREAD_REMOVED, thread);
1547 
1548 	// shutdown the thread messaging
1549 
1550 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1551 	if (status == B_WOULD_BLOCK) {
1552 		// there is data waiting for us, so let us eat it
1553 		thread_id sender;
1554 
1555 		delete_sem(thread->msg.write_sem);
1556 			// first, let's remove all possibly waiting writers
1557 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1558 	} else {
1559 		// we probably own the semaphore here, and we're the last to do so
1560 		delete_sem(thread->msg.write_sem);
1561 	}
1562 	// now we can safely remove the msg.read_sem
1563 	delete_sem(thread->msg.read_sem);
1564 
1565 	// fill all death entries and delete the sem that others will use to wait on us
1566 	{
1567 		sem_id cachedExitSem = thread->exit.sem;
1568 		cpu_status state;
1569 
1570 		state = disable_interrupts();
1571 		GRAB_THREAD_LOCK();
1572 
1573 		// make sure no one will grab this semaphore again
1574 		thread->exit.sem = -1;
1575 
1576 		// fill all death entries
1577 		death_entry* entry = NULL;
1578 		while ((entry = (struct death_entry*)list_get_next_item(
1579 				&thread->exit.waiters, entry)) != NULL) {
1580 			entry->status = thread->exit.status;
1581 			entry->reason = thread->exit.reason;
1582 			entry->signal = thread->exit.signal;
1583 		}
1584 
1585 		RELEASE_THREAD_LOCK();
1586 		restore_interrupts(state);
1587 
1588 		delete_sem(cachedExitSem);
1589 	}
1590 
1591 	// notify the debugger
1592 	if (teamID != team_get_kernel_team_id())
1593 		user_debug_thread_deleted(teamID, thread->id);
1594 
1595 	// enqueue in the undertaker list and reschedule for the last time
1596 	UndertakerEntry undertakerEntry(thread, teamID);
1597 
1598 	disable_interrupts();
1599 	GRAB_THREAD_LOCK();
1600 
1601 	sUndertakerEntries.Add(&undertakerEntry);
1602 	sUndertakerCondition.NotifyOne(true);
1603 
1604 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1605 	scheduler_reschedule();
1606 
1607 	panic("never can get here\n");
1608 }
1609 
1610 
1611 struct thread *
1612 thread_get_thread_struct(thread_id id)
1613 {
1614 	struct thread *thread;
1615 	cpu_status state;
1616 
1617 	state = disable_interrupts();
1618 	GRAB_THREAD_LOCK();
1619 
1620 	thread = thread_get_thread_struct_locked(id);
1621 
1622 	RELEASE_THREAD_LOCK();
1623 	restore_interrupts(state);
1624 
1625 	return thread;
1626 }
1627 
1628 
1629 struct thread *
1630 thread_get_thread_struct_locked(thread_id id)
1631 {
1632 	struct thread_key key;
1633 
1634 	key.id = id;
1635 
1636 	return (struct thread*)hash_lookup(sThreadHash, &key);
1637 }
1638 
1639 
1640 /*!	Called in the interrupt handler code when a thread enters
1641 	the kernel for any reason.
1642 	Only tracks time for now.
1643 	Interrupts are disabled.
1644 */
1645 void
1646 thread_at_kernel_entry(bigtime_t now)
1647 {
1648 	struct thread *thread = thread_get_current_thread();
1649 
1650 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1651 
1652 	// track user time
1653 	thread->user_time += now - thread->last_time;
1654 	thread->last_time = now;
1655 
1656 	thread->in_kernel = true;
1657 }
1658 
1659 
1660 /*!	Called whenever a thread exits kernel space to user space.
1661 	Tracks time, handles signals, ...
1662 	Interrupts must be enabled. When the function returns, interrupts will be
1663 	disabled.
1664 */
1665 void
1666 thread_at_kernel_exit(void)
1667 {
1668 	struct thread *thread = thread_get_current_thread();
1669 
1670 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1671 
1672 	while (handle_signals(thread)) {
1673 		InterruptsSpinLocker _(gThreadSpinlock);
1674 		scheduler_reschedule();
1675 	}
1676 
1677 	disable_interrupts();
1678 
1679 	thread->in_kernel = false;
1680 
1681 	// track kernel time
1682 	bigtime_t now = system_time();
1683 	thread->kernel_time += now - thread->last_time;
1684 	thread->last_time = now;
1685 }
1686 
1687 
1688 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1689 	and no debugging shall be done.
1690 	Interrupts must be disabled.
1691 */
1692 void
1693 thread_at_kernel_exit_no_signals(void)
1694 {
1695 	struct thread *thread = thread_get_current_thread();
1696 
1697 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1698 
1699 	thread->in_kernel = false;
1700 
1701 	// track kernel time
1702 	bigtime_t now = system_time();
1703 	thread->kernel_time += now - thread->last_time;
1704 	thread->last_time = now;
1705 }
1706 
1707 
1708 void
1709 thread_reset_for_exec(void)
1710 {
1711 	struct thread *thread = thread_get_current_thread();
1712 
1713 	reset_signals(thread);
1714 
1715 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
1716 }
1717 
1718 
1719 /*! Insert a thread to the tail of a queue */
1720 void
1721 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1722 {
1723 	thread->queue_next = NULL;
1724 	if (queue->head == NULL) {
1725 		queue->head = thread;
1726 		queue->tail = thread;
1727 	} else {
1728 		queue->tail->queue_next = thread;
1729 		queue->tail = thread;
1730 	}
1731 }
1732 
1733 
1734 struct thread *
1735 thread_lookat_queue(struct thread_queue *queue)
1736 {
1737 	return queue->head;
1738 }
1739 
1740 
1741 struct thread *
1742 thread_dequeue(struct thread_queue *queue)
1743 {
1744 	struct thread *thread = queue->head;
1745 
1746 	if (thread != NULL) {
1747 		queue->head = thread->queue_next;
1748 		if (queue->tail == thread)
1749 			queue->tail = NULL;
1750 	}
1751 	return thread;
1752 }
1753 
1754 
1755 struct thread *
1756 thread_dequeue_id(struct thread_queue *q, thread_id id)
1757 {
1758 	struct thread *thread;
1759 	struct thread *last = NULL;
1760 
1761 	thread = q->head;
1762 	while (thread != NULL) {
1763 		if (thread->id == id) {
1764 			if (last == NULL)
1765 				q->head = thread->queue_next;
1766 			else
1767 				last->queue_next = thread->queue_next;
1768 
1769 			if (q->tail == thread)
1770 				q->tail = last;
1771 			break;
1772 		}
1773 		last = thread;
1774 		thread = thread->queue_next;
1775 	}
1776 	return thread;
1777 }
1778 
1779 
1780 struct thread*
1781 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1782 {
1783 	struct hash_iterator iterator;
1784 	hash_open(sThreadHash, &iterator);
1785 
1786 	struct thread* thread;
1787 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1788 			!= NULL) {
1789 		if (callback(thread, cookie))
1790 			break;
1791 	}
1792 
1793 	hash_close(sThreadHash, &iterator, false);
1794 
1795 	return thread;
1796 }
1797 
1798 
1799 thread_id
1800 allocate_thread_id(void)
1801 {
1802 	return atomic_add(&sNextThreadID, 1);
1803 }
1804 
1805 
1806 thread_id
1807 peek_next_thread_id(void)
1808 {
1809 	return atomic_get(&sNextThreadID);
1810 }
1811 
1812 
1813 /*!	Yield the CPU to other threads.
1814 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1815 	If \c false, it will continue to run, if there's no other thread in ready
1816 	state, and if it has a higher priority than the other ready threads, it
1817 	still has a good chance to continue.
1818 */
1819 void
1820 thread_yield(bool force)
1821 {
1822 	if (force) {
1823 		// snooze for roughly 3 thread quantums
1824 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1825 #if 0
1826 		cpu_status state;
1827 
1828 		struct thread *thread = thread_get_current_thread();
1829 		if (thread == NULL)
1830 			return;
1831 
1832 		state = disable_interrupts();
1833 		GRAB_THREAD_LOCK();
1834 
1835 		// mark the thread as yielded, so it will not be scheduled next
1836 		//thread->was_yielded = true;
1837 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1838 		scheduler_reschedule();
1839 
1840 		RELEASE_THREAD_LOCK();
1841 		restore_interrupts(state);
1842 #endif
1843 	} else {
1844 		struct thread *thread = thread_get_current_thread();
1845 		if (thread == NULL)
1846 			return;
1847 
1848 		// Don't force the thread off the CPU, just reschedule.
1849 		InterruptsSpinLocker _(gThreadSpinlock);
1850 		scheduler_reschedule();
1851 	}
1852 }
1853 
1854 
1855 /*!	Kernel private thread creation function.
1856 
1857 	\param threadID The ID to be assigned to the new thread. If
1858 		  \code < 0 \endcode a fresh one is allocated.
1859 */
1860 thread_id
1861 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1862 	void *arg, team_id team, thread_id threadID)
1863 {
1864 	thread_creation_attributes attributes;
1865 	attributes.entry = (thread_entry_func)function;
1866 	attributes.name = name;
1867 	attributes.priority = priority;
1868 	attributes.args1 = arg;
1869 	attributes.args2 = NULL;
1870 	attributes.stack_address = NULL;
1871 	attributes.stack_size = 0;
1872 	attributes.team = team;
1873 	attributes.thread = threadID;
1874 
1875 	return create_thread(attributes, true);
1876 }
1877 
1878 
1879 status_t
1880 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1881 	status_t *_returnCode)
1882 {
1883 	sem_id exitSem = B_BAD_THREAD_ID;
1884 	struct death_entry death;
1885 	job_control_entry* freeDeath = NULL;
1886 	struct thread *thread;
1887 	cpu_status state;
1888 	status_t status = B_OK;
1889 
1890 	if (id < B_OK)
1891 		return B_BAD_THREAD_ID;
1892 
1893 	// we need to resume the thread we're waiting for first
1894 
1895 	state = disable_interrupts();
1896 	GRAB_THREAD_LOCK();
1897 
1898 	thread = thread_get_thread_struct_locked(id);
1899 	if (thread != NULL) {
1900 		// remember the semaphore we have to wait on and place our death entry
1901 		exitSem = thread->exit.sem;
1902 		list_add_link_to_head(&thread->exit.waiters, &death);
1903 	}
1904 
1905 	death_entry* threadDeathEntry = NULL;
1906 
1907 	RELEASE_THREAD_LOCK();
1908 
1909 	if (thread == NULL) {
1910 		// we couldn't find this thread - maybe it's already gone, and we'll
1911 		// find its death entry in our team
1912 		GRAB_TEAM_LOCK();
1913 
1914 		struct team* team = thread_get_current_thread()->team;
1915 
1916 		// check the child death entries first (i.e. main threads of child
1917 		// teams)
1918 		bool deleteEntry;
1919 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1920 		if (freeDeath != NULL) {
1921 			death.status = freeDeath->status;
1922 			if (!deleteEntry)
1923 				freeDeath = NULL;
1924 		} else {
1925 			// check the thread death entries of the team (non-main threads)
1926 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1927 					&team->dead_threads, threadDeathEntry)) != NULL) {
1928 				if (threadDeathEntry->thread == id) {
1929 					list_remove_item(&team->dead_threads, threadDeathEntry);
1930 					team->dead_threads_count--;
1931 					death.status = threadDeathEntry->status;
1932 					break;
1933 				}
1934 			}
1935 
1936 			if (threadDeathEntry == NULL)
1937 				status = B_BAD_THREAD_ID;
1938 		}
1939 
1940 		RELEASE_TEAM_LOCK();
1941 	}
1942 
1943 	restore_interrupts(state);
1944 
1945 	if (thread == NULL && status == B_OK) {
1946 		// we found the thread's death entry in our team
1947 		if (_returnCode)
1948 			*_returnCode = death.status;
1949 
1950 		delete freeDeath;
1951 		free(threadDeathEntry);
1952 		return B_OK;
1953 	}
1954 
1955 	// we need to wait for the death of the thread
1956 
1957 	if (exitSem < B_OK)
1958 		return B_BAD_THREAD_ID;
1959 
1960 	resume_thread(id);
1961 		// make sure we don't wait forever on a suspended thread
1962 
1963 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1964 
1965 	if (status == B_OK) {
1966 		// this should never happen as the thread deletes the semaphore on exit
1967 		panic("could acquire exit_sem for thread %ld\n", id);
1968 	} else if (status == B_BAD_SEM_ID) {
1969 		// this is the way the thread normally exits
1970 		status = B_OK;
1971 
1972 		if (_returnCode)
1973 			*_returnCode = death.status;
1974 	} else {
1975 		// We were probably interrupted; we need to remove our death entry now.
1976 		state = disable_interrupts();
1977 		GRAB_THREAD_LOCK();
1978 
1979 		thread = thread_get_thread_struct_locked(id);
1980 		if (thread != NULL)
1981 			list_remove_link(&death);
1982 
1983 		RELEASE_THREAD_LOCK();
1984 		restore_interrupts(state);
1985 
1986 		// If the thread is already gone, we need to wait for its exit semaphore
1987 		// to make sure our death entry stays valid - it won't take long
1988 		if (thread == NULL)
1989 			acquire_sem(exitSem);
1990 	}
1991 
1992 	return status;
1993 }
1994 
1995 
1996 status_t
1997 select_thread(int32 id, struct select_info* info, bool kernel)
1998 {
1999 	InterruptsSpinLocker locker(gThreadSpinlock);
2000 
2001 	// get thread
2002 	struct thread* thread = thread_get_thread_struct_locked(id);
2003 	if (thread == NULL)
2004 		return B_BAD_THREAD_ID;
2005 
2006 	// We support only B_EVENT_INVALID at the moment.
2007 	info->selected_events &= B_EVENT_INVALID;
2008 
2009 	// add info to list
2010 	if (info->selected_events != 0) {
2011 		info->next = thread->select_infos;
2012 		thread->select_infos = info;
2013 
2014 		// we need a sync reference
2015 		atomic_add(&info->sync->ref_count, 1);
2016 	}
2017 
2018 	return B_OK;
2019 }
2020 
2021 
2022 status_t
2023 deselect_thread(int32 id, struct select_info* info, bool kernel)
2024 {
2025 	InterruptsSpinLocker locker(gThreadSpinlock);
2026 
2027 	// get thread
2028 	struct thread* thread = thread_get_thread_struct_locked(id);
2029 	if (thread == NULL)
2030 		return B_BAD_THREAD_ID;
2031 
2032 	// remove info from list
2033 	select_info** infoLocation = &thread->select_infos;
2034 	while (*infoLocation != NULL && *infoLocation != info)
2035 		infoLocation = &(*infoLocation)->next;
2036 
2037 	if (*infoLocation != info)
2038 		return B_OK;
2039 
2040 	*infoLocation = info->next;
2041 
2042 	locker.Unlock();
2043 
2044 	// surrender sync reference
2045 	put_select_sync(info->sync);
2046 
2047 	return B_OK;
2048 }
2049 
2050 
2051 int32
2052 thread_max_threads(void)
2053 {
2054 	return sMaxThreads;
2055 }
2056 
2057 
2058 int32
2059 thread_used_threads(void)
2060 {
2061 	return sUsedThreads;
2062 }
2063 
2064 
2065 const char*
2066 thread_state_to_text(struct thread* thread, int32 state)
2067 {
2068 	return state_to_text(thread, state);
2069 }
2070 
2071 
2072 int32
2073 thread_get_io_priority(thread_id id)
2074 {
2075 	// take a shortcut, if it is the current thread
2076 	struct thread* thread = thread_get_current_thread();
2077 	int32 priority;
2078 	if (id == thread->id) {
2079 		int32 priority = thread->io_priority;
2080 		return priority < 0 ? thread->priority : priority;
2081 	}
2082 
2083 	// not the current thread -- get it
2084 	InterruptsSpinLocker locker(gThreadSpinlock);
2085 
2086 	thread = thread_get_thread_struct_locked(id);
2087 	if (thread == NULL)
2088 		return B_BAD_THREAD_ID;
2089 
2090 	priority = thread->io_priority;
2091 	return priority < 0 ? thread->priority : priority;
2092 }
2093 
2094 
2095 void
2096 thread_set_io_priority(int32 priority)
2097 {
2098 	struct thread* thread = thread_get_current_thread();
2099 	thread->io_priority = priority;
2100 }
2101 
2102 
2103 status_t
2104 thread_init(kernel_args *args)
2105 {
2106 	uint32 i;
2107 
2108 	TRACE(("thread_init: entry\n"));
2109 
2110 	// create the thread hash table
2111 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2112 		&thread_struct_compare, &thread_struct_hash);
2113 
2114 	// zero out the dead thread structure q
2115 	memset(&dead_q, 0, sizeof(dead_q));
2116 
2117 	if (arch_thread_init(args) < B_OK)
2118 		panic("arch_thread_init() failed!\n");
2119 
2120 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2121 	sNextThreadID = B_SYSTEM_TEAM + 1;
2122 
2123 	// create an idle thread for each cpu
2124 
2125 	for (i = 0; i < args->num_cpus; i++) {
2126 		struct thread *thread;
2127 		area_info info;
2128 		char name[64];
2129 
2130 		sprintf(name, "idle thread %lu", i + 1);
2131 		thread = create_thread_struct(&sIdleThreads[i], name,
2132 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2133 		if (thread == NULL) {
2134 			panic("error creating idle thread struct\n");
2135 			return B_NO_MEMORY;
2136 		}
2137 
2138 		gCPU[i].running_thread = thread;
2139 
2140 		thread->team = team_get_kernel_team();
2141 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2142 		thread->state = B_THREAD_RUNNING;
2143 		thread->next_state = B_THREAD_READY;
2144 		sprintf(name, "idle thread %lu kstack", i + 1);
2145 		thread->kernel_stack_area = find_area(name);
2146 		thread->entry = NULL;
2147 
2148 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2149 			panic("error finding idle kstack area\n");
2150 
2151 		thread->kernel_stack_base = (addr_t)info.address;
2152 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2153 
2154 		hash_insert(sThreadHash, thread);
2155 		insert_thread_into_team(thread->team, thread);
2156 	}
2157 	sUsedThreads = args->num_cpus;
2158 
2159 	// init the notification service
2160 	new(&sNotificationService) ThreadNotificationService();
2161 
2162 	// start the undertaker thread
2163 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2164 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2165 
2166 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2167 		B_DISPLAY_PRIORITY, NULL);
2168 	if (undertakerThread < 0)
2169 		panic("Failed to create undertaker thread!");
2170 	resume_thread(undertakerThread);
2171 
2172 	// set up some debugger commands
2173 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2174 		"[ <team> ]\n"
2175 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2176 		"all threads of the specified team.\n"
2177 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2178 	add_debugger_command_etc("ready", &dump_thread_list,
2179 		"List all ready threads",
2180 		"\n"
2181 		"Prints a list of all threads in ready state.\n", 0);
2182 	add_debugger_command_etc("running", &dump_thread_list,
2183 		"List all running threads",
2184 		"\n"
2185 		"Prints a list of all threads in running state.\n", 0);
2186 	add_debugger_command_etc("waiting", &dump_thread_list,
2187 		"List all waiting threads (optionally for a specific semaphore)",
2188 		"[ <sem> ]\n"
2189 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2190 		"specified, only the threads waiting on that semaphore are listed.\n"
2191 		"  <sem>  - ID of the semaphore.\n", 0);
2192 	add_debugger_command_etc("realtime", &dump_thread_list,
2193 		"List all realtime threads",
2194 		"\n"
2195 		"Prints a list of all threads with realtime priority.\n", 0);
2196 	add_debugger_command_etc("thread", &dump_thread_info,
2197 		"Dump info about a particular thread",
2198 		"[ -s ] ( <id> | <address> | <name> )*\n"
2199 		"Prints information about the specified thread. If no argument is\n"
2200 		"given the current thread is selected.\n"
2201 		"  -s         - Print info in compact table form (like \"threads\").\n"
2202 		"  <id>       - The ID of the thread.\n"
2203 		"  <address>  - The address of the thread structure.\n"
2204 		"  <name>     - The thread's name.\n", 0);
2205 	add_debugger_command_etc("calling", &dump_thread_list,
2206 		"Show all threads that have a specific address in their call chain",
2207 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2208 	add_debugger_command_etc("unreal", &make_thread_unreal,
2209 		"Set realtime priority threads to normal priority",
2210 		"[ <id> ]\n"
2211 		"Sets the priority of all realtime threads or, if given, the one\n"
2212 		"with the specified ID to \"normal\" priority.\n"
2213 		"  <id>  - The ID of the thread.\n", 0);
2214 	add_debugger_command_etc("suspend", &make_thread_suspended,
2215 		"Suspend a thread",
2216 		"[ <id> ]\n"
2217 		"Suspends the thread with the given ID. If no ID argument is given\n"
2218 		"the current thread is selected.\n"
2219 		"  <id>  - The ID of the thread.\n", 0);
2220 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2221 		"<id>\n"
2222 		"Resumes the specified thread, if it is currently suspended.\n"
2223 		"  <id>  - The ID of the thread.\n", 0);
2224 	add_debugger_command_etc("drop", &drop_into_debugger,
2225 		"Drop a thread into the userland debugger",
2226 		"<id>\n"
2227 		"Drops the specified (userland) thread into the userland debugger\n"
2228 		"after leaving the kernel debugger.\n"
2229 		"  <id>  - The ID of the thread.\n", 0);
2230 	add_debugger_command_etc("priority", &set_thread_prio,
2231 		"Set a thread's priority",
2232 		"<priority> [ <id> ]\n"
2233 		"Sets the priority of the thread with the specified ID to the given\n"
2234 		"priority. If no thread ID is given, the current thread is selected.\n"
2235 		"  <priority>  - The thread's new priority (0 - 120)\n"
2236 		"  <id>        - The ID of the thread.\n", 0);
2237 
2238 	return B_OK;
2239 }
2240 
2241 
2242 status_t
2243 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2244 {
2245 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2246 	// so that get_current_cpu and friends will work, which is crucial for
2247 	// a lot of low level routines
2248 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2249 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2250 	return B_OK;
2251 }
2252 
2253 
2254 //	#pragma mark - thread blocking API
2255 
2256 
2257 static status_t
2258 thread_block_timeout(timer* timer)
2259 {
2260 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2261 	// we're holding the thread lock already. This makes things comfortably
2262 	// easy.
2263 
2264 	struct thread* thread = (struct thread*)timer->user_data;
2265 	thread_unblock_locked(thread, B_TIMED_OUT);
2266 
2267 	return B_HANDLED_INTERRUPT;
2268 }
2269 
2270 
2271 status_t
2272 thread_block()
2273 {
2274 	InterruptsSpinLocker _(gThreadSpinlock);
2275 	return thread_block_locked(thread_get_current_thread());
2276 }
2277 
2278 
2279 void
2280 thread_unblock(status_t threadID, status_t status)
2281 {
2282 	InterruptsSpinLocker _(gThreadSpinlock);
2283 
2284 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2285 	if (thread != NULL)
2286 		thread_unblock_locked(thread, status);
2287 }
2288 
2289 
2290 status_t
2291 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2292 {
2293 	InterruptsSpinLocker _(gThreadSpinlock);
2294 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2295 }
2296 
2297 
2298 status_t
2299 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2300 {
2301 	struct thread* thread = thread_get_current_thread();
2302 
2303 	if (thread->wait.status != 1)
2304 		return thread->wait.status;
2305 
2306 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2307 		&& timeout != B_INFINITE_TIMEOUT;
2308 
2309 	if (useTimer) {
2310 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2311 		// avoids nasty race conditions and deadlock problems that could
2312 		// otherwise occur between our cancel_timer() and a concurrently
2313 		// executing thread_block_timeout().
2314 		uint32 timerFlags;
2315 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2316 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2317 		} else {
2318 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2319 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2320 				timeout -= rtc_boot_time();
2321 		}
2322 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2323 
2324 		// install the timer
2325 		thread->wait.unblock_timer.user_data = thread;
2326 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2327 			timerFlags);
2328 	}
2329 
2330 	// block
2331 	status_t error = thread_block_locked(thread);
2332 
2333 	// cancel timer, if it didn't fire
2334 	if (error != B_TIMED_OUT && useTimer)
2335 		cancel_timer(&thread->wait.unblock_timer);
2336 
2337 	return error;
2338 }
2339 
2340 
2341 /*!	Thread spinlock must be held.
2342 */
2343 static status_t
2344 user_unblock_thread(thread_id threadID, status_t status)
2345 {
2346 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2347 	if (thread == NULL)
2348 		return B_BAD_THREAD_ID;
2349 	if (thread->user_thread == NULL)
2350 		return B_NOT_ALLOWED;
2351 
2352 	if (thread->user_thread->wait_status > 0) {
2353 		thread->user_thread->wait_status = status;
2354 		thread_unblock_locked(thread, status);
2355 	}
2356 
2357 	return B_OK;
2358 }
2359 
2360 
2361 //	#pragma mark - public kernel API
2362 
2363 
2364 void
2365 exit_thread(status_t returnValue)
2366 {
2367 	struct thread *thread = thread_get_current_thread();
2368 
2369 	thread->exit.status = returnValue;
2370 	thread->exit.reason = THREAD_RETURN_EXIT;
2371 
2372 	// if called from a kernel thread, we don't deliver the signal,
2373 	// we just exit directly to keep the user space behaviour of
2374 	// this function
2375 	if (thread->team != team_get_kernel_team())
2376 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2377 	else
2378 		thread_exit();
2379 }
2380 
2381 
2382 status_t
2383 kill_thread(thread_id id)
2384 {
2385 	if (id <= 0)
2386 		return B_BAD_VALUE;
2387 
2388 	return send_signal(id, SIGKILLTHR);
2389 }
2390 
2391 
2392 status_t
2393 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2394 {
2395 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2396 }
2397 
2398 
2399 int32
2400 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2401 {
2402 	return receive_data_etc(sender, buffer, bufferSize, 0);
2403 }
2404 
2405 
2406 bool
2407 has_data(thread_id thread)
2408 {
2409 	int32 count;
2410 
2411 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2412 			&count) != B_OK)
2413 		return false;
2414 
2415 	return count == 0 ? false : true;
2416 }
2417 
2418 
2419 status_t
2420 _get_thread_info(thread_id id, thread_info *info, size_t size)
2421 {
2422 	status_t status = B_OK;
2423 	struct thread *thread;
2424 	cpu_status state;
2425 
2426 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2427 		return B_BAD_VALUE;
2428 
2429 	state = disable_interrupts();
2430 	GRAB_THREAD_LOCK();
2431 
2432 	thread = thread_get_thread_struct_locked(id);
2433 	if (thread == NULL) {
2434 		status = B_BAD_VALUE;
2435 		goto err;
2436 	}
2437 
2438 	fill_thread_info(thread, info, size);
2439 
2440 err:
2441 	RELEASE_THREAD_LOCK();
2442 	restore_interrupts(state);
2443 
2444 	return status;
2445 }
2446 
2447 
2448 status_t
2449 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
2450 	size_t size)
2451 {
2452 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
2453 		return B_BAD_VALUE;
2454 
2455 	int32 lastID = *_cookie;
2456 
2457 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
2458 
2459 	struct team* team;
2460 	if (teamID == B_CURRENT_TEAM)
2461 		team = thread_get_current_thread()->team;
2462 	else
2463 		team = team_get_team_struct_locked(teamID);
2464 
2465 	if (team == NULL)
2466 		return B_BAD_VALUE;
2467 
2468 	struct thread* thread = NULL;
2469 
2470 	if (lastID == 0) {
2471 		// We start with the main thread
2472 		thread = team->main_thread;
2473 	} else {
2474 		// Find the one thread with an ID higher than ours
2475 		// (as long as the IDs don't overlap they are always sorted from
2476 		// highest to lowest).
2477 		for (struct thread* next = team->thread_list; next != NULL;
2478 				next = next->team_next) {
2479 			if (next->id <= lastID)
2480 				break;
2481 
2482 			thread = next;
2483 		}
2484 	}
2485 
2486 	if (thread == NULL)
2487 		return B_BAD_VALUE;
2488 
2489 	lastID = thread->id;
2490 	*_cookie = lastID;
2491 
2492 	SpinLocker threadLocker(gThreadSpinlock);
2493 	fill_thread_info(thread, info, size);
2494 
2495 	return B_OK;
2496 }
2497 
2498 
2499 thread_id
2500 find_thread(const char *name)
2501 {
2502 	struct hash_iterator iterator;
2503 	struct thread *thread;
2504 	cpu_status state;
2505 
2506 	if (name == NULL)
2507 		return thread_get_current_thread_id();
2508 
2509 	state = disable_interrupts();
2510 	GRAB_THREAD_LOCK();
2511 
2512 	// ToDo: this might not be in the same order as find_thread() in BeOS
2513 	//		which could be theoretically problematic.
2514 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2515 	//		cheap either - although this function is probably used very rarely.
2516 
2517 	hash_open(sThreadHash, &iterator);
2518 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2519 			!= NULL) {
2520 		// Search through hash
2521 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2522 			thread_id id = thread->id;
2523 
2524 			RELEASE_THREAD_LOCK();
2525 			restore_interrupts(state);
2526 			return id;
2527 		}
2528 	}
2529 
2530 	RELEASE_THREAD_LOCK();
2531 	restore_interrupts(state);
2532 
2533 	return B_NAME_NOT_FOUND;
2534 }
2535 
2536 
2537 status_t
2538 rename_thread(thread_id id, const char *name)
2539 {
2540 	struct thread *thread = thread_get_current_thread();
2541 	status_t status = B_BAD_THREAD_ID;
2542 	cpu_status state;
2543 
2544 	if (name == NULL)
2545 		return B_BAD_VALUE;
2546 
2547 	state = disable_interrupts();
2548 	GRAB_THREAD_LOCK();
2549 
2550 	if (thread->id != id)
2551 		thread = thread_get_thread_struct_locked(id);
2552 
2553 	if (thread != NULL) {
2554 		if (thread->team == thread_get_current_thread()->team) {
2555 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2556 			status = B_OK;
2557 		} else
2558 			status = B_NOT_ALLOWED;
2559 	}
2560 
2561 	RELEASE_THREAD_LOCK();
2562 	restore_interrupts(state);
2563 
2564 	return status;
2565 }
2566 
2567 
2568 status_t
2569 set_thread_priority(thread_id id, int32 priority)
2570 {
2571 	struct thread *thread;
2572 	int32 oldPriority;
2573 
2574 	// make sure the passed in priority is within bounds
2575 	if (priority > THREAD_MAX_SET_PRIORITY)
2576 		priority = THREAD_MAX_SET_PRIORITY;
2577 	if (priority < THREAD_MIN_SET_PRIORITY)
2578 		priority = THREAD_MIN_SET_PRIORITY;
2579 
2580 	thread = thread_get_current_thread();
2581 	if (thread->id == id) {
2582 		if (thread_is_idle_thread(thread))
2583 			return B_NOT_ALLOWED;
2584 
2585 		// It's ourself, so we know we aren't in the run queue, and we can
2586 		// manipulate our structure directly
2587 		oldPriority = thread->priority;
2588 			// Note that this might not return the correct value if we are
2589 			// preempted here, and another thread changes our priority before
2590 			// the next line is executed.
2591 		thread->priority = thread->next_priority = priority;
2592 	} else {
2593 		InterruptsSpinLocker _(gThreadSpinlock);
2594 
2595 		thread = thread_get_thread_struct_locked(id);
2596 		if (thread == NULL)
2597 			return B_BAD_THREAD_ID;
2598 
2599 		if (thread_is_idle_thread(thread))
2600 			return B_NOT_ALLOWED;
2601 
2602 		oldPriority = thread->priority;
2603 		scheduler_set_thread_priority(thread, priority);
2604 	}
2605 
2606 	return oldPriority;
2607 }
2608 
2609 
2610 status_t
2611 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2612 {
2613 	status_t status;
2614 
2615 	if (timebase != B_SYSTEM_TIMEBASE)
2616 		return B_BAD_VALUE;
2617 
2618 	InterruptsSpinLocker _(gThreadSpinlock);
2619 	struct thread* thread = thread_get_current_thread();
2620 
2621 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2622 	status = thread_block_with_timeout_locked(flags, timeout);
2623 
2624 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2625 		return B_OK;
2626 
2627 	return status;
2628 }
2629 
2630 
2631 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2632 status_t
2633 snooze(bigtime_t timeout)
2634 {
2635 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2636 }
2637 
2638 
2639 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
2640 	signals.
2641 */
2642 status_t
2643 snooze_until(bigtime_t timeout, int timebase)
2644 {
2645 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2646 }
2647 
2648 
2649 status_t
2650 wait_for_thread(thread_id thread, status_t *_returnCode)
2651 {
2652 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2653 }
2654 
2655 
2656 status_t
2657 suspend_thread(thread_id id)
2658 {
2659 	if (id <= 0)
2660 		return B_BAD_VALUE;
2661 
2662 	return send_signal(id, SIGSTOP);
2663 }
2664 
2665 
2666 status_t
2667 resume_thread(thread_id id)
2668 {
2669 	if (id <= 0)
2670 		return B_BAD_VALUE;
2671 
2672 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2673 		// This retains compatibility to BeOS which documents the
2674 		// combination of suspend_thread() and resume_thread() to
2675 		// interrupt threads waiting on semaphores.
2676 }
2677 
2678 
2679 thread_id
2680 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2681 	void *arg)
2682 {
2683 	thread_creation_attributes attributes;
2684 	attributes.entry = (thread_entry_func)function;
2685 	attributes.name = name;
2686 	attributes.priority = priority;
2687 	attributes.args1 = arg;
2688 	attributes.args2 = NULL;
2689 	attributes.stack_address = NULL;
2690 	attributes.stack_size = 0;
2691 	attributes.team = team_get_kernel_team()->id;
2692 	attributes.thread = -1;
2693 
2694 	return create_thread(attributes, true);
2695 }
2696 
2697 
2698 int
2699 getrlimit(int resource, struct rlimit * rlp)
2700 {
2701 	status_t error = common_getrlimit(resource, rlp);
2702 	if (error != B_OK) {
2703 		errno = error;
2704 		return -1;
2705 	}
2706 
2707 	return 0;
2708 }
2709 
2710 
2711 int
2712 setrlimit(int resource, const struct rlimit * rlp)
2713 {
2714 	status_t error = common_setrlimit(resource, rlp);
2715 	if (error != B_OK) {
2716 		errno = error;
2717 		return -1;
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 
2724 //	#pragma mark - syscalls
2725 
2726 
2727 void
2728 _user_exit_thread(status_t returnValue)
2729 {
2730 	exit_thread(returnValue);
2731 }
2732 
2733 
2734 status_t
2735 _user_kill_thread(thread_id thread)
2736 {
2737 	return kill_thread(thread);
2738 }
2739 
2740 
2741 status_t
2742 _user_resume_thread(thread_id thread)
2743 {
2744 	return resume_thread(thread);
2745 }
2746 
2747 
2748 status_t
2749 _user_suspend_thread(thread_id thread)
2750 {
2751 	return suspend_thread(thread);
2752 }
2753 
2754 
2755 status_t
2756 _user_rename_thread(thread_id thread, const char *userName)
2757 {
2758 	char name[B_OS_NAME_LENGTH];
2759 
2760 	if (!IS_USER_ADDRESS(userName)
2761 		|| userName == NULL
2762 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2763 		return B_BAD_ADDRESS;
2764 
2765 	return rename_thread(thread, name);
2766 }
2767 
2768 
2769 int32
2770 _user_set_thread_priority(thread_id thread, int32 newPriority)
2771 {
2772 	return set_thread_priority(thread, newPriority);
2773 }
2774 
2775 
2776 thread_id
2777 _user_spawn_thread(thread_creation_attributes* userAttributes)
2778 {
2779 	thread_creation_attributes attributes;
2780 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2781 		|| user_memcpy(&attributes, userAttributes,
2782 				sizeof(attributes)) != B_OK) {
2783 		return B_BAD_ADDRESS;
2784 	}
2785 
2786 	if (attributes.stack_size != 0
2787 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2788 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2789 		return B_BAD_VALUE;
2790 	}
2791 
2792 	char name[B_OS_NAME_LENGTH];
2793 	thread_id threadID;
2794 
2795 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2796 		|| (attributes.stack_address != NULL
2797 			&& !IS_USER_ADDRESS(attributes.stack_address))
2798 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2799 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2800 		return B_BAD_ADDRESS;
2801 
2802 	attributes.name = attributes.name != NULL ? name : "user thread";
2803 	attributes.team = thread_get_current_thread()->team->id;
2804 	attributes.thread = -1;
2805 
2806 	threadID = create_thread(attributes, false);
2807 
2808 	if (threadID >= 0)
2809 		user_debug_thread_created(threadID);
2810 
2811 	return threadID;
2812 }
2813 
2814 
2815 status_t
2816 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2817 {
2818 	// NOTE: We only know the system timebase at the moment.
2819 	syscall_restart_handle_timeout_pre(flags, timeout);
2820 
2821 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2822 
2823 	return syscall_restart_handle_timeout_post(error, timeout);
2824 }
2825 
2826 
2827 void
2828 _user_thread_yield(void)
2829 {
2830 	thread_yield(true);
2831 }
2832 
2833 
2834 status_t
2835 _user_get_thread_info(thread_id id, thread_info *userInfo)
2836 {
2837 	thread_info info;
2838 	status_t status;
2839 
2840 	if (!IS_USER_ADDRESS(userInfo))
2841 		return B_BAD_ADDRESS;
2842 
2843 	status = _get_thread_info(id, &info, sizeof(thread_info));
2844 
2845 	if (status >= B_OK
2846 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2847 		return B_BAD_ADDRESS;
2848 
2849 	return status;
2850 }
2851 
2852 
2853 status_t
2854 _user_get_next_thread_info(team_id team, int32 *userCookie,
2855 	thread_info *userInfo)
2856 {
2857 	status_t status;
2858 	thread_info info;
2859 	int32 cookie;
2860 
2861 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2862 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2863 		return B_BAD_ADDRESS;
2864 
2865 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2866 	if (status < B_OK)
2867 		return status;
2868 
2869 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2870 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2871 		return B_BAD_ADDRESS;
2872 
2873 	return status;
2874 }
2875 
2876 
2877 thread_id
2878 _user_find_thread(const char *userName)
2879 {
2880 	char name[B_OS_NAME_LENGTH];
2881 
2882 	if (userName == NULL)
2883 		return find_thread(NULL);
2884 
2885 	if (!IS_USER_ADDRESS(userName)
2886 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2887 		return B_BAD_ADDRESS;
2888 
2889 	return find_thread(name);
2890 }
2891 
2892 
2893 status_t
2894 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2895 {
2896 	status_t returnCode;
2897 	status_t status;
2898 
2899 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2900 		return B_BAD_ADDRESS;
2901 
2902 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2903 
2904 	if (status == B_OK && userReturnCode != NULL
2905 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2906 		return B_BAD_ADDRESS;
2907 	}
2908 
2909 	return syscall_restart_handle_post(status);
2910 }
2911 
2912 
2913 bool
2914 _user_has_data(thread_id thread)
2915 {
2916 	return has_data(thread);
2917 }
2918 
2919 
2920 status_t
2921 _user_send_data(thread_id thread, int32 code, const void *buffer,
2922 	size_t bufferSize)
2923 {
2924 	if (!IS_USER_ADDRESS(buffer))
2925 		return B_BAD_ADDRESS;
2926 
2927 	return send_data_etc(thread, code, buffer, bufferSize,
2928 		B_KILL_CAN_INTERRUPT);
2929 		// supports userland buffers
2930 }
2931 
2932 
2933 status_t
2934 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2935 {
2936 	thread_id sender;
2937 	status_t code;
2938 
2939 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2940 		|| !IS_USER_ADDRESS(buffer))
2941 		return B_BAD_ADDRESS;
2942 
2943 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2944 		// supports userland buffers
2945 
2946 	if (_userSender != NULL)
2947 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2948 			return B_BAD_ADDRESS;
2949 
2950 	return code;
2951 }
2952 
2953 
2954 status_t
2955 _user_block_thread(uint32 flags, bigtime_t timeout)
2956 {
2957 	syscall_restart_handle_timeout_pre(flags, timeout);
2958 	flags |= B_CAN_INTERRUPT;
2959 
2960 	struct thread* thread = thread_get_current_thread();
2961 
2962 	InterruptsSpinLocker locker(gThreadSpinlock);
2963 
2964 	// check, if already done
2965 	if (thread->user_thread->wait_status <= 0)
2966 		return thread->user_thread->wait_status;
2967 
2968 	// nope, so wait
2969 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2970 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2971 	thread->user_thread->wait_status = status;
2972 
2973 	return syscall_restart_handle_timeout_post(status, timeout);
2974 }
2975 
2976 
2977 status_t
2978 _user_unblock_thread(thread_id threadID, status_t status)
2979 {
2980 	InterruptsSpinLocker locker(gThreadSpinlock);
2981 	status_t error = user_unblock_thread(threadID, status);
2982 	scheduler_reschedule_if_necessary_locked();
2983 	return error;
2984 }
2985 
2986 
2987 status_t
2988 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2989 {
2990 	enum {
2991 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2992 	};
2993 
2994 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2995 		return B_BAD_ADDRESS;
2996 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2997 		return B_BAD_VALUE;
2998 
2999 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3000 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3001 		return B_BAD_ADDRESS;
3002 
3003 	InterruptsSpinLocker locker(gThreadSpinlock);
3004 	for (uint32 i = 0; i < count; i++)
3005 		user_unblock_thread(threads[i], status);
3006 
3007 	scheduler_reschedule_if_necessary_locked();
3008 
3009 	return B_OK;
3010 }
3011 
3012 
3013 // TODO: the following two functions don't belong here
3014 
3015 
3016 int
3017 _user_getrlimit(int resource, struct rlimit *urlp)
3018 {
3019 	struct rlimit rl;
3020 	int ret;
3021 
3022 	if (urlp == NULL)
3023 		return EINVAL;
3024 
3025 	if (!IS_USER_ADDRESS(urlp))
3026 		return B_BAD_ADDRESS;
3027 
3028 	ret = common_getrlimit(resource, &rl);
3029 
3030 	if (ret == 0) {
3031 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3032 		if (ret < 0)
3033 			return ret;
3034 
3035 		return 0;
3036 	}
3037 
3038 	return ret;
3039 }
3040 
3041 
3042 int
3043 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3044 {
3045 	struct rlimit resourceLimit;
3046 
3047 	if (userResourceLimit == NULL)
3048 		return EINVAL;
3049 
3050 	if (!IS_USER_ADDRESS(userResourceLimit)
3051 		|| user_memcpy(&resourceLimit, userResourceLimit,
3052 			sizeof(struct rlimit)) < B_OK)
3053 		return B_BAD_ADDRESS;
3054 
3055 	return common_setrlimit(resource, &resourceLimit);
3056 }
3057