xref: /haiku/src/system/kernel/thread.cpp (revision 5e96d7d537fbec23bad4ae9b4c8e7b02e769f0c6)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <malloc.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/resource.h>
22 
23 #include <algorithm>
24 
25 #include <OS.h>
26 
27 #include <util/AutoLock.h>
28 
29 #include <arch/debug.h>
30 #include <boot/kernel_args.h>
31 #include <condition_variable.h>
32 #include <cpu.h>
33 #include <int.h>
34 #include <kimage.h>
35 #include <kscheduler.h>
36 #include <ksignal.h>
37 #include <Notifications.h>
38 #include <real_time_clock.h>
39 #include <slab/Slab.h>
40 #include <smp.h>
41 #include <syscalls.h>
42 #include <syscall_restart.h>
43 #include <team.h>
44 #include <tls.h>
45 #include <user_runtime.h>
46 #include <user_thread.h>
47 #include <vfs.h>
48 #include <vm/vm.h>
49 #include <vm/VMAddressSpace.h>
50 #include <wait_for_objects.h>
51 
52 #include "TeamThreadTables.h"
53 
54 
55 //#define TRACE_THREAD
56 #ifdef TRACE_THREAD
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 #define THREAD_MAX_MESSAGE_SIZE		65536
64 
65 
66 // #pragma mark - ThreadHashTable
67 
68 
69 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable;
70 
71 
72 // thread list
73 static Thread sIdleThreads[SMP_MAX_CPUS];
74 static ThreadHashTable sThreadHash;
75 static spinlock sThreadHashLock = B_SPINLOCK_INITIALIZER;
76 static thread_id sNextThreadID = 2;
77 	// ID 1 is allocated for the kernel by Team::Team() behind our back
78 
79 // some arbitrarily chosen limits -- should probably depend on the available
80 // memory (the limit is not yet enforced)
81 static int32 sMaxThreads = 4096;
82 static int32 sUsedThreads = 0;
83 
84 spinlock gThreadCreationLock = B_SPINLOCK_INITIALIZER;
85 
86 
87 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
88 	Thread*	thread;
89 	team_id	teamID;
90 
91 	UndertakerEntry(Thread* thread, team_id teamID)
92 		:
93 		thread(thread),
94 		teamID(teamID)
95 	{
96 	}
97 };
98 
99 
100 struct ThreadEntryArguments {
101 	status_t	(*kernelFunction)(void* argument);
102 	void*		argument;
103 	bool		enterUserland;
104 };
105 
106 struct UserThreadEntryArguments : ThreadEntryArguments {
107 	addr_t			userlandEntry;
108 	void*			userlandArgument1;
109 	void*			userlandArgument2;
110 	pthread_t		pthread;
111 	arch_fork_arg*	forkArgs;
112 	uint32			flags;
113 };
114 
115 
116 class ThreadNotificationService : public DefaultNotificationService {
117 public:
118 	ThreadNotificationService()
119 		: DefaultNotificationService("threads")
120 	{
121 	}
122 
123 	void Notify(uint32 eventCode, team_id teamID, thread_id threadID,
124 		Thread* thread = NULL)
125 	{
126 		char eventBuffer[180];
127 		KMessage event;
128 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
129 		event.AddInt32("event", eventCode);
130 		event.AddInt32("team", teamID);
131 		event.AddInt32("thread", threadID);
132 		if (thread != NULL)
133 			event.AddPointer("threadStruct", thread);
134 
135 		DefaultNotificationService::Notify(event, eventCode);
136 	}
137 
138 	void Notify(uint32 eventCode, Thread* thread)
139 	{
140 		return Notify(eventCode, thread->id, thread->team->id, thread);
141 	}
142 };
143 
144 
145 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
146 static spinlock sUndertakerLock = B_SPINLOCK_INITIALIZER;
147 static ConditionVariable sUndertakerCondition;
148 static ThreadNotificationService sNotificationService;
149 
150 
151 // object cache to allocate thread structures from
152 static object_cache* sThreadCache;
153 
154 
155 // #pragma mark - Thread
156 
157 
158 /*!	Constructs a thread.
159 
160 	\param name The thread's name.
161 	\param threadID The ID to be assigned to the new thread. If
162 		  \code < 0 \endcode a fresh one is allocated.
163 	\param cpu The CPU the thread shall be assigned.
164 */
165 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
166 	:
167 	flags(0),
168 	serial_number(-1),
169 	hash_next(NULL),
170 	team_next(NULL),
171 	priority(-1),
172 	io_priority(-1),
173 	cpu(cpu),
174 	previous_cpu(NULL),
175 	pinned_to_cpu(0),
176 	sig_block_mask(0),
177 	sigsuspend_original_unblocked_mask(0),
178 	user_signal_context(NULL),
179 	signal_stack_base(0),
180 	signal_stack_size(0),
181 	signal_stack_enabled(false),
182 	in_kernel(true),
183 	has_yielded(false),
184 	user_thread(NULL),
185 	fault_handler(0),
186 	page_faults_allowed(1),
187 	team(NULL),
188 	select_infos(NULL),
189 	kernel_stack_area(-1),
190 	kernel_stack_base(0),
191 	user_stack_area(-1),
192 	user_stack_base(0),
193 	user_local_storage(0),
194 	kernel_errno(0),
195 	user_time(0),
196 	kernel_time(0),
197 	last_time(0),
198 	cpu_clock_offset(0),
199 	post_interrupt_callback(NULL),
200 	post_interrupt_data(NULL)
201 {
202 	id = threadID >= 0 ? threadID : allocate_thread_id();
203 	visible = false;
204 
205 	// init locks
206 	char lockName[32];
207 	snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id);
208 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
209 
210 	B_INITIALIZE_SPINLOCK(&time_lock);
211 	B_INITIALIZE_SPINLOCK(&scheduler_lock);
212 	B_INITIALIZE_RW_SPINLOCK(&team_lock);
213 
214 	// init name
215 	if (name != NULL)
216 		strlcpy(this->name, name, B_OS_NAME_LENGTH);
217 	else
218 		strcpy(this->name, "unnamed thread");
219 
220 	exit.status = 0;
221 
222 	list_init(&exit.waiters);
223 
224 	exit.sem = -1;
225 	msg.write_sem = -1;
226 	msg.read_sem = -1;
227 
228 	// add to thread table -- yet invisible
229 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
230 	sThreadHash.Insert(this);
231 }
232 
233 
234 Thread::~Thread()
235 {
236 	// Delete resources that should actually be deleted by the thread itself,
237 	// when it exited, but that might still exist, if the thread was never run.
238 
239 	if (user_stack_area >= 0)
240 		delete_area(user_stack_area);
241 
242 	DeleteUserTimers(false);
243 
244 	// delete the resources, that may remain in either case
245 
246 	if (kernel_stack_area >= 0)
247 		delete_area(kernel_stack_area);
248 
249 	fPendingSignals.Clear();
250 
251 	if (exit.sem >= 0)
252 		delete_sem(exit.sem);
253 	if (msg.write_sem >= 0)
254 		delete_sem(msg.write_sem);
255 	if (msg.read_sem >= 0)
256 		delete_sem(msg.read_sem);
257 
258 	scheduler_on_thread_destroy(this);
259 
260 	mutex_destroy(&fLock);
261 
262 	// remove from thread table
263 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
264 	sThreadHash.Remove(this);
265 }
266 
267 
268 /*static*/ status_t
269 Thread::Create(const char* name, Thread*& _thread)
270 {
271 	Thread* thread = new Thread(name, -1, NULL);
272 	if (thread == NULL)
273 		return B_NO_MEMORY;
274 
275 	status_t error = thread->Init(false);
276 	if (error != B_OK) {
277 		delete thread;
278 		return error;
279 	}
280 
281 	_thread = thread;
282 	return B_OK;
283 }
284 
285 
286 /*static*/ Thread*
287 Thread::Get(thread_id id)
288 {
289 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
290 	Thread* thread = sThreadHash.Lookup(id);
291 	if (thread != NULL)
292 		thread->AcquireReference();
293 	return thread;
294 }
295 
296 
297 /*static*/ Thread*
298 Thread::GetAndLock(thread_id id)
299 {
300 	// look it up and acquire a reference
301 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
302 	Thread* thread = sThreadHash.Lookup(id);
303 	if (thread == NULL)
304 		return NULL;
305 
306 	thread->AcquireReference();
307 	threadHashLocker.Unlock();
308 
309 	// lock and check, if it is still in the hash table
310 	thread->Lock();
311 	threadHashLocker.Lock();
312 
313 	if (sThreadHash.Lookup(id) == thread)
314 		return thread;
315 
316 	threadHashLocker.Unlock();
317 
318 	// nope, the thread is no longer in the hash table
319 	thread->UnlockAndReleaseReference();
320 
321 	return NULL;
322 }
323 
324 
325 /*static*/ Thread*
326 Thread::GetDebug(thread_id id)
327 {
328 	return sThreadHash.Lookup(id, false);
329 }
330 
331 
332 /*static*/ bool
333 Thread::IsAlive(thread_id id)
334 {
335 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
336 	return sThreadHash.Lookup(id) != NULL;
337 }
338 
339 
340 void*
341 Thread::operator new(size_t size)
342 {
343 	return object_cache_alloc(sThreadCache, 0);
344 }
345 
346 
347 void*
348 Thread::operator new(size_t, void* pointer)
349 {
350 	return pointer;
351 }
352 
353 
354 void
355 Thread::operator delete(void* pointer, size_t size)
356 {
357 	object_cache_free(sThreadCache, pointer, 0);
358 }
359 
360 
361 status_t
362 Thread::Init(bool idleThread)
363 {
364 	status_t error = scheduler_on_thread_create(this, idleThread);
365 	if (error != B_OK)
366 		return error;
367 
368 	char temp[64];
369 	snprintf(temp, sizeof(temp), "thread_%" B_PRId32 "_retcode_sem", id);
370 	exit.sem = create_sem(0, temp);
371 	if (exit.sem < 0)
372 		return exit.sem;
373 
374 	snprintf(temp, sizeof(temp), "%s send", name);
375 	msg.write_sem = create_sem(1, temp);
376 	if (msg.write_sem < 0)
377 		return msg.write_sem;
378 
379 	snprintf(temp, sizeof(temp), "%s receive", name);
380 	msg.read_sem = create_sem(0, temp);
381 	if (msg.read_sem < 0)
382 		return msg.read_sem;
383 
384 	error = arch_thread_init_thread_struct(this);
385 	if (error != B_OK)
386 		return error;
387 
388 	return B_OK;
389 }
390 
391 
392 /*!	Checks whether the thread is still in the thread hash table.
393 */
394 bool
395 Thread::IsAlive() const
396 {
397 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
398 
399 	return sThreadHash.Lookup(id) != NULL;
400 }
401 
402 
403 void
404 Thread::ResetSignalsOnExec()
405 {
406 	// We are supposed keep the pending signals and the signal mask. Only the
407 	// signal stack, if set, shall be unset.
408 
409 	sigsuspend_original_unblocked_mask = 0;
410 	user_signal_context = NULL;
411 	signal_stack_base = 0;
412 	signal_stack_size = 0;
413 	signal_stack_enabled = false;
414 }
415 
416 
417 /*!	Adds the given user timer to the thread and, if user-defined, assigns it an
418 	ID.
419 
420 	The caller must hold the thread's lock.
421 
422 	\param timer The timer to be added. If it doesn't have an ID yet, it is
423 		considered user-defined and will be assigned an ID.
424 	\return \c B_OK, if the timer was added successfully, another error code
425 		otherwise.
426 */
427 status_t
428 Thread::AddUserTimer(UserTimer* timer)
429 {
430 	// If the timer is user-defined, check timer limit and increment
431 	// user-defined count.
432 	if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer())
433 		return EAGAIN;
434 
435 	fUserTimers.AddTimer(timer);
436 
437 	return B_OK;
438 }
439 
440 
441 /*!	Removes the given user timer from the thread.
442 
443 	The caller must hold the thread's lock.
444 
445 	\param timer The timer to be removed.
446 
447 */
448 void
449 Thread::RemoveUserTimer(UserTimer* timer)
450 {
451 	fUserTimers.RemoveTimer(timer);
452 
453 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
454 		team->UserDefinedTimersRemoved(1);
455 }
456 
457 
458 /*!	Deletes all (or all user-defined) user timers of the thread.
459 
460 	The caller must hold the thread's lock.
461 
462 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
463 		otherwise all timers are deleted.
464 */
465 void
466 Thread::DeleteUserTimers(bool userDefinedOnly)
467 {
468 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
469 	if (count > 0)
470 		team->UserDefinedTimersRemoved(count);
471 }
472 
473 
474 void
475 Thread::DeactivateCPUTimeUserTimers()
476 {
477 	while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head())
478 		timer->Deactivate();
479 }
480 
481 
482 // #pragma mark - ThreadListIterator
483 
484 
485 ThreadListIterator::ThreadListIterator()
486 {
487 	// queue the entry
488 	InterruptsSpinLocker locker(sThreadHashLock);
489 	sThreadHash.InsertIteratorEntry(&fEntry);
490 }
491 
492 
493 ThreadListIterator::~ThreadListIterator()
494 {
495 	// remove the entry
496 	InterruptsSpinLocker locker(sThreadHashLock);
497 	sThreadHash.RemoveIteratorEntry(&fEntry);
498 }
499 
500 
501 Thread*
502 ThreadListIterator::Next()
503 {
504 	// get the next team -- if there is one, get reference for it
505 	InterruptsSpinLocker locker(sThreadHashLock);
506 	Thread* thread = sThreadHash.NextElement(&fEntry);
507 	if (thread != NULL)
508 		thread->AcquireReference();
509 
510 	return thread;
511 }
512 
513 
514 // #pragma mark - ThreadCreationAttributes
515 
516 
517 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function,
518 	const char* name, int32 priority, void* arg, team_id team,
519 	Thread* thread)
520 {
521 	this->entry = NULL;
522 	this->name = name;
523 	this->priority = priority;
524 	this->args1 = NULL;
525 	this->args2 = NULL;
526 	this->stack_address = NULL;
527 	this->stack_size = 0;
528 	this->guard_size = 0;
529 	this->pthread = NULL;
530 	this->flags = 0;
531 	this->team = team >= 0 ? team : team_get_kernel_team()->id;
532 	this->thread = thread;
533 	this->signal_mask = 0;
534 	this->additional_stack_size = 0;
535 	this->kernelEntry = function;
536 	this->kernelArgument = arg;
537 	this->forkArgs = NULL;
538 }
539 
540 
541 /*!	Initializes the structure from a userland structure.
542 	\param userAttributes The userland structure (must be a userland address).
543 	\param nameBuffer A character array of at least size B_OS_NAME_LENGTH,
544 		which will be used for the \c name field, if the userland structure has
545 		a name. The buffer must remain valid as long as this structure is in
546 		use afterwards (or until it is reinitialized).
547 	\return \c B_OK, if the initialization went fine, another error code
548 		otherwise.
549 */
550 status_t
551 ThreadCreationAttributes::InitFromUserAttributes(
552 	const thread_creation_attributes* userAttributes, char* nameBuffer)
553 {
554 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
555 		|| user_memcpy((thread_creation_attributes*)this, userAttributes,
556 				sizeof(thread_creation_attributes)) != B_OK) {
557 		return B_BAD_ADDRESS;
558 	}
559 
560 	if (stack_size != 0
561 		&& (stack_size < MIN_USER_STACK_SIZE
562 			|| stack_size > MAX_USER_STACK_SIZE)) {
563 		return B_BAD_VALUE;
564 	}
565 
566 	if (entry == NULL || !IS_USER_ADDRESS(entry)
567 		|| (stack_address != NULL && !IS_USER_ADDRESS(stack_address))
568 		|| (name != NULL && (!IS_USER_ADDRESS(name)
569 			|| user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) {
570 		return B_BAD_ADDRESS;
571 	}
572 
573 	name = name != NULL ? nameBuffer : "user thread";
574 
575 	// kernel only attributes (not in thread_creation_attributes):
576 	Thread* currentThread = thread_get_current_thread();
577 	team = currentThread->team->id;
578 	thread = NULL;
579 	signal_mask = currentThread->sig_block_mask;
580 		// inherit the current thread's signal mask
581 	additional_stack_size = 0;
582 	kernelEntry = NULL;
583 	kernelArgument = NULL;
584 	forkArgs = NULL;
585 
586 	return B_OK;
587 }
588 
589 
590 // #pragma mark - private functions
591 
592 
593 /*!	Inserts a thread into a team.
594 	The caller must hold the team's lock, the thread's lock, and the scheduler
595 	lock.
596 */
597 static void
598 insert_thread_into_team(Team *team, Thread *thread)
599 {
600 	thread->team_next = team->thread_list;
601 	team->thread_list = thread;
602 	team->num_threads++;
603 
604 	if (team->num_threads == 1) {
605 		// this was the first thread
606 		team->main_thread = thread;
607 	}
608 	thread->team = team;
609 }
610 
611 
612 /*!	Removes a thread from a team.
613 	The caller must hold the team's lock, the thread's lock, and the scheduler
614 	lock.
615 */
616 static void
617 remove_thread_from_team(Team *team, Thread *thread)
618 {
619 	Thread *temp, *last = NULL;
620 
621 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
622 		if (temp == thread) {
623 			if (last == NULL)
624 				team->thread_list = temp->team_next;
625 			else
626 				last->team_next = temp->team_next;
627 
628 			team->num_threads--;
629 			break;
630 		}
631 		last = temp;
632 	}
633 }
634 
635 
636 static status_t
637 enter_userspace(Thread* thread, UserThreadEntryArguments* args)
638 {
639 	status_t error = arch_thread_init_tls(thread);
640 	if (error != B_OK) {
641 		dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32
642 			")\n", thread->name, thread->id);
643 		free(args->forkArgs);
644 		return error;
645 	}
646 
647 	user_debug_update_new_thread_flags(thread);
648 
649 	// init the thread's user_thread
650 	user_thread* userThread = thread->user_thread;
651 	userThread->pthread = args->pthread;
652 	userThread->flags = 0;
653 	userThread->wait_status = B_OK;
654 	userThread->defer_signals
655 		= (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0;
656 	userThread->pending_signals = 0;
657 
658 	if (args->forkArgs != NULL) {
659 		// This is a fork()ed thread. Copy the fork args onto the stack and
660 		// free them.
661 		arch_fork_arg archArgs = *args->forkArgs;
662 		free(args->forkArgs);
663 
664 		arch_restore_fork_frame(&archArgs);
665 			// this one won't return here
666 		return B_ERROR;
667 	}
668 
669 	// Jump to the entry point in user space. Only returns, if something fails.
670 	return arch_thread_enter_userspace(thread, args->userlandEntry,
671 		args->userlandArgument1, args->userlandArgument2);
672 }
673 
674 
675 status_t
676 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
677 	void* argument1, void* argument2)
678 {
679 	UserThreadEntryArguments entryArgs;
680 	entryArgs.kernelFunction = NULL;
681 	entryArgs.argument = NULL;
682 	entryArgs.enterUserland = true;
683 	entryArgs.userlandEntry = (addr_t)entryFunction;
684 	entryArgs.userlandArgument1 = argument1;
685 	entryArgs.userlandArgument2 = argument2;
686 	entryArgs.pthread = NULL;
687 	entryArgs.forkArgs = NULL;
688 	entryArgs.flags = 0;
689 
690 	return enter_userspace(thread, &entryArgs);
691 }
692 
693 
694 static void
695 common_thread_entry(void* _args)
696 {
697 	Thread* thread = thread_get_current_thread();
698 
699 	// The thread is new and has been scheduled the first time.
700 
701 	scheduler_new_thread_entry(thread);
702 
703 	// unlock the scheduler lock and enable interrupts
704 	release_spinlock(&thread->scheduler_lock);
705 	enable_interrupts();
706 
707 	// call the kernel function, if any
708 	ThreadEntryArguments* args = (ThreadEntryArguments*)_args;
709 	if (args->kernelFunction != NULL)
710 		args->kernelFunction(args->argument);
711 
712 	// If requested, enter userland, now.
713 	if (args->enterUserland) {
714 		enter_userspace(thread, (UserThreadEntryArguments*)args);
715 			// only returns or error
716 
717 		// If that's the team's main thread, init the team exit info.
718 		if (thread == thread->team->main_thread)
719 			team_init_exit_info_on_error(thread->team);
720 	}
721 
722 	// we're done
723 	thread_exit();
724 }
725 
726 
727 /*!	Prepares the given thread's kernel stack for executing its entry function.
728 
729 	The data pointed to by \a data of size \a dataSize are copied to the
730 	thread's kernel stack. A pointer to the copy's data is passed to the entry
731 	function. The entry function is common_thread_entry().
732 
733 	\param thread The thread.
734 	\param data Pointer to data to be copied to the thread's stack and passed
735 		to the entry function.
736 	\param dataSize The size of \a data.
737  */
738 static void
739 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize)
740 {
741 	uint8* stack = (uint8*)thread->kernel_stack_base;
742 	uint8* stackTop = (uint8*)thread->kernel_stack_top;
743 
744 	// clear (or rather invalidate) the kernel stack contents, if compiled with
745 	// debugging
746 #if KDEBUG > 0
747 #	if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS)
748 	memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc,
749 		KERNEL_STACK_SIZE);
750 #	else
751 	memset(stack, 0xcc, KERNEL_STACK_SIZE);
752 #	endif
753 #endif
754 
755 	// copy the data onto the stack, with 16-byte alignment to be on the safe
756 	// side
757 	void* clonedData;
758 #ifdef STACK_GROWS_DOWNWARDS
759 	clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16);
760 	stackTop = (uint8*)clonedData;
761 #else
762 	clonedData = (void*)ROUNDUP((addr_t)stack, 16);
763 	stack = (uint8*)clonedData + ROUNDUP(dataSize, 16);
764 #endif
765 
766 	memcpy(clonedData, data, dataSize);
767 
768 	arch_thread_init_kthread_stack(thread, stack, stackTop,
769 		&common_thread_entry, clonedData);
770 }
771 
772 
773 static status_t
774 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase,
775 	size_t stackSize, size_t additionalSize, size_t guardSize,
776 	char* nameBuffer)
777 {
778 	area_id stackArea = -1;
779 	uint8* stackBase = (uint8*)_stackBase;
780 
781 	if (stackBase != NULL) {
782 		// A stack has been specified. It must be large enough to hold the
783 		// TLS space at least. Guard pages are ignored for existing stacks.
784 		STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE);
785 		if (stackSize < MIN_USER_STACK_SIZE)
786 			return B_BAD_VALUE;
787 
788 		stackSize -= TLS_SIZE;
789 	} else {
790 		// No user-defined stack -- allocate one. For non-main threads the stack
791 		// will be between USER_STACK_REGION and the main thread stack area. For
792 		// a main thread the position is fixed.
793 
794 		guardSize = PAGE_ALIGN(guardSize);
795 
796 		if (stackSize == 0) {
797 			// Use the default size (a different one for a main thread).
798 			stackSize = thread->id == team->id
799 				? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE;
800 		} else {
801 			// Verify that the given stack size is large enough.
802 			if (stackSize < MIN_USER_STACK_SIZE)
803 				return B_BAD_VALUE;
804 
805 			stackSize = PAGE_ALIGN(stackSize);
806 		}
807 
808 		size_t areaSize = PAGE_ALIGN(guardSize + stackSize + TLS_SIZE
809 			+ additionalSize);
810 
811 		snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack",
812 			thread->name, thread->id);
813 
814 		stackBase = (uint8*)USER_STACK_REGION;
815 
816 		virtual_address_restrictions virtualRestrictions = {};
817 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
818 		virtualRestrictions.address = (void*)stackBase;
819 
820 		physical_address_restrictions physicalRestrictions = {};
821 
822 		stackArea = create_area_etc(team->id, nameBuffer,
823 			areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA,
824 			0, guardSize, &virtualRestrictions, &physicalRestrictions,
825 			(void**)&stackBase);
826 		if (stackArea < 0)
827 			return stackArea;
828 	}
829 
830 	// set the stack
831 	ThreadLocker threadLocker(thread);
832 #ifdef STACK_GROWS_DOWNWARDS
833 	thread->user_stack_base = (addr_t)stackBase + guardSize;
834 #else
835 	thread->user_stack_base = (addr_t)stackBase;
836 #endif
837 	thread->user_stack_size = stackSize;
838 	thread->user_stack_area = stackArea;
839 
840 	return B_OK;
841 }
842 
843 
844 status_t
845 thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
846 	size_t stackSize, size_t additionalSize)
847 {
848 	char nameBuffer[B_OS_NAME_LENGTH];
849 	return create_thread_user_stack(team, thread, stackBase, stackSize,
850 		additionalSize, USER_STACK_GUARD_SIZE, nameBuffer);
851 }
852 
853 
854 /*!	Creates a new thread.
855 
856 	\param attributes The thread creation attributes, specifying the team in
857 		which to create the thread, as well as a whole bunch of other arguments.
858 	\param kernel \c true, if a kernel-only thread shall be created, \c false,
859 		if the thread shall also be able to run in userland.
860 	\return The ID of the newly created thread (>= 0) or an error code on
861 		failure.
862 */
863 thread_id
864 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
865 {
866 	status_t status = B_OK;
867 
868 	TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name,
869 		attributes.thread, kernel ? "kernel" : "user"));
870 
871 	// get the team
872 	Team* team = Team::Get(attributes.team);
873 	if (team == NULL)
874 		return B_BAD_TEAM_ID;
875 	BReference<Team> teamReference(team, true);
876 
877 	// If a thread object is given, acquire a reference to it, otherwise create
878 	// a new thread object with the given attributes.
879 	Thread* thread = attributes.thread;
880 	if (thread != NULL) {
881 		thread->AcquireReference();
882 	} else {
883 		status = Thread::Create(attributes.name, thread);
884 		if (status != B_OK)
885 			return status;
886 	}
887 	BReference<Thread> threadReference(thread, true);
888 
889 	thread->team = team;
890 		// set already, so, if something goes wrong, the team pointer is
891 		// available for deinitialization
892 	thread->priority = attributes.priority == -1
893 		? B_NORMAL_PRIORITY : attributes.priority;
894 	thread->state = B_THREAD_SUSPENDED;
895 
896 	thread->sig_block_mask = attributes.signal_mask;
897 
898 	// init debug structure
899 	init_thread_debug_info(&thread->debug_info);
900 
901 	// create the kernel stack
902 	char stackName[B_OS_NAME_LENGTH];
903 	snprintf(stackName, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_kstack",
904 		thread->name, thread->id);
905 	virtual_address_restrictions virtualRestrictions = {};
906 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
907 	physical_address_restrictions physicalRestrictions = {};
908 
909 	thread->kernel_stack_area = create_area_etc(B_SYSTEM_TEAM, stackName,
910 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
911 		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA
912 			| B_KERNEL_STACK_AREA, 0, KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
913 		&virtualRestrictions, &physicalRestrictions,
914 		(void**)&thread->kernel_stack_base);
915 
916 	if (thread->kernel_stack_area < 0) {
917 		// we're not yet part of a team, so we can just bail out
918 		status = thread->kernel_stack_area;
919 
920 		dprintf("create_thread: error creating kernel stack: %s!\n",
921 			strerror(status));
922 
923 		return status;
924 	}
925 
926 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
927 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
928 
929 	if (kernel) {
930 		// Init the thread's kernel stack. It will start executing
931 		// common_thread_entry() with the arguments we prepare here.
932 		ThreadEntryArguments entryArgs;
933 		entryArgs.kernelFunction = attributes.kernelEntry;
934 		entryArgs.argument = attributes.kernelArgument;
935 		entryArgs.enterUserland = false;
936 
937 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
938 	} else {
939 		// create the userland stack, if the thread doesn't have one yet
940 		if (thread->user_stack_base == 0) {
941 			status = create_thread_user_stack(team, thread,
942 				attributes.stack_address, attributes.stack_size,
943 				attributes.additional_stack_size, attributes.guard_size,
944 				stackName);
945 			if (status != B_OK)
946 				return status;
947 		}
948 
949 		// Init the thread's kernel stack. It will start executing
950 		// common_thread_entry() with the arguments we prepare here.
951 		UserThreadEntryArguments entryArgs;
952 		entryArgs.kernelFunction = attributes.kernelEntry;
953 		entryArgs.argument = attributes.kernelArgument;
954 		entryArgs.enterUserland = true;
955 		entryArgs.userlandEntry = (addr_t)attributes.entry;
956 		entryArgs.userlandArgument1 = attributes.args1;
957 		entryArgs.userlandArgument2 = attributes.args2;
958 		entryArgs.pthread = attributes.pthread;
959 		entryArgs.forkArgs = attributes.forkArgs;
960 		entryArgs.flags = attributes.flags;
961 
962 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
963 
964 		// create the pre-defined thread timers
965 		status = user_timer_create_thread_timers(team, thread);
966 		if (status != B_OK)
967 			return status;
968 	}
969 
970 	// lock the team and see, if it is still alive
971 	TeamLocker teamLocker(team);
972 	if (team->state >= TEAM_STATE_SHUTDOWN)
973 		return B_BAD_TEAM_ID;
974 
975 	bool debugNewThread = false;
976 	if (!kernel) {
977 		// allocate the user_thread structure, if not already allocated
978 		if (thread->user_thread == NULL) {
979 			thread->user_thread = team_allocate_user_thread(team);
980 			if (thread->user_thread == NULL)
981 				return B_NO_MEMORY;
982 		}
983 
984 		// If the new thread belongs to the same team as the current thread, it
985 		// may inherit some of the thread debug flags.
986 		Thread* currentThread = thread_get_current_thread();
987 		if (currentThread != NULL && currentThread->team == team) {
988 			// inherit all user flags...
989 			int32 debugFlags = atomic_get(&currentThread->debug_info.flags)
990 				& B_THREAD_DEBUG_USER_FLAG_MASK;
991 
992 			// ... save the syscall tracing flags, unless explicitely specified
993 			if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
994 				debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
995 					| B_THREAD_DEBUG_POST_SYSCALL);
996 			}
997 
998 			thread->debug_info.flags = debugFlags;
999 
1000 			// stop the new thread, if desired
1001 			debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
1002 		}
1003 	}
1004 
1005 	// We're going to make the thread live, now. The thread itself will take
1006 	// over a reference to its Thread object. We'll acquire another reference
1007 	// for our own use (and threadReference remains armed).
1008 
1009 	ThreadLocker threadLocker(thread);
1010 
1011 	InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
1012 	SpinLocker threadHashLocker(sThreadHashLock);
1013 
1014 	// check the thread limit
1015 	if (sUsedThreads >= sMaxThreads) {
1016 		// Clean up the user_thread structure. It's a bit unfortunate that the
1017 		// Thread destructor cannot do that, so we have to do that explicitly.
1018 		threadHashLocker.Unlock();
1019 		threadCreationLocker.Unlock();
1020 
1021 		user_thread* userThread = thread->user_thread;
1022 		thread->user_thread = NULL;
1023 
1024 		threadLocker.Unlock();
1025 
1026 		if (userThread != NULL)
1027 			team_free_user_thread(team, userThread);
1028 
1029 		return B_NO_MORE_THREADS;
1030 	}
1031 
1032 	// make thread visible in global hash/list
1033 	thread->visible = true;
1034 	sUsedThreads++;
1035 
1036 	scheduler_on_thread_init(thread);
1037 
1038 	thread->AcquireReference();
1039 
1040 	// Debug the new thread, if the parent thread required that (see above),
1041 	// or the respective global team debug flag is set. But only, if a
1042 	// debugger is installed for the team.
1043 	if (!kernel) {
1044 		int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1045 		debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0;
1046 		if (debugNewThread
1047 			&& (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) {
1048 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
1049 		}
1050 	}
1051 
1052 	{
1053 		SpinLocker signalLocker(team->signal_lock);
1054 		SpinLocker timeLocker(team->time_lock);
1055 
1056 		// insert thread into team
1057 		insert_thread_into_team(team, thread);
1058 	}
1059 
1060 	threadHashLocker.Unlock();
1061 	threadCreationLocker.Unlock();
1062 	threadLocker.Unlock();
1063 	teamLocker.Unlock();
1064 
1065 	// notify listeners
1066 	sNotificationService.Notify(THREAD_ADDED, thread);
1067 
1068 	return thread->id;
1069 }
1070 
1071 
1072 static status_t
1073 undertaker(void* /*args*/)
1074 {
1075 	while (true) {
1076 		// wait for a thread to bury
1077 		InterruptsSpinLocker locker(sUndertakerLock);
1078 
1079 		while (sUndertakerEntries.IsEmpty()) {
1080 			ConditionVariableEntry conditionEntry;
1081 			sUndertakerCondition.Add(&conditionEntry);
1082 			locker.Unlock();
1083 
1084 			conditionEntry.Wait();
1085 
1086 			locker.Lock();
1087 		}
1088 
1089 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
1090 		locker.Unlock();
1091 
1092 		UndertakerEntry entry = *_entry;
1093 			// we need a copy, since the original entry is on the thread's stack
1094 
1095 		// we've got an entry
1096 		Thread* thread = entry.thread;
1097 
1098 		// make sure the thread isn't running anymore
1099 		InterruptsSpinLocker schedulerLocker(thread->scheduler_lock);
1100 		ASSERT(thread->state == THREAD_STATE_FREE_ON_RESCHED);
1101 		schedulerLocker.Unlock();
1102 
1103 		// remove this thread from from the kernel team -- this makes it
1104 		// unaccessible
1105 		Team* kernelTeam = team_get_kernel_team();
1106 		TeamLocker kernelTeamLocker(kernelTeam);
1107 		thread->Lock();
1108 
1109 		InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
1110 		SpinLocker signalLocker(kernelTeam->signal_lock);
1111 		SpinLocker timeLocker(kernelTeam->time_lock);
1112 
1113 		remove_thread_from_team(kernelTeam, thread);
1114 
1115 		timeLocker.Unlock();
1116 		signalLocker.Unlock();
1117 		threadCreationLocker.Unlock();
1118 
1119 		kernelTeamLocker.Unlock();
1120 
1121 		// free the thread structure
1122 		thread->UnlockAndReleaseReference();
1123 	}
1124 
1125 	// can never get here
1126 	return B_OK;
1127 }
1128 
1129 
1130 /*!	Returns the semaphore the thread is currently waiting on.
1131 
1132 	The return value is purely informative.
1133 	The caller must hold the scheduler lock.
1134 
1135 	\param thread The thread.
1136 	\return The ID of the semaphore the thread is currently waiting on or \c -1,
1137 		if it isn't waiting on a semaphore.
1138 */
1139 static sem_id
1140 get_thread_wait_sem(Thread* thread)
1141 {
1142 	if (thread->state == B_THREAD_WAITING
1143 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
1144 		return (sem_id)(addr_t)thread->wait.object;
1145 	}
1146 	return -1;
1147 }
1148 
1149 
1150 /*!	Fills the thread_info structure with information from the specified thread.
1151 	The caller must hold the thread's lock and the scheduler lock.
1152 */
1153 static void
1154 fill_thread_info(Thread *thread, thread_info *info, size_t size)
1155 {
1156 	info->thread = thread->id;
1157 	info->team = thread->team->id;
1158 
1159 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
1160 
1161 	info->sem = -1;
1162 
1163 	if (thread->state == B_THREAD_WAITING) {
1164 		info->state = B_THREAD_WAITING;
1165 
1166 		switch (thread->wait.type) {
1167 			case THREAD_BLOCK_TYPE_SNOOZE:
1168 				info->state = B_THREAD_ASLEEP;
1169 				break;
1170 
1171 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1172 			{
1173 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1174 				if (sem == thread->msg.read_sem)
1175 					info->state = B_THREAD_RECEIVING;
1176 				else
1177 					info->sem = sem;
1178 				break;
1179 			}
1180 
1181 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1182 			default:
1183 				break;
1184 		}
1185 	} else
1186 		info->state = (thread_state)thread->state;
1187 
1188 	info->priority = thread->priority;
1189 	info->stack_base = (void *)thread->user_stack_base;
1190 	info->stack_end = (void *)(thread->user_stack_base
1191 		+ thread->user_stack_size);
1192 
1193 	InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1194 	info->user_time = thread->user_time;
1195 	info->kernel_time = thread->kernel_time;
1196 }
1197 
1198 
1199 static status_t
1200 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
1201 	int32 flags)
1202 {
1203 	// get the thread
1204 	Thread *target = Thread::Get(id);
1205 	if (target == NULL)
1206 		return B_BAD_THREAD_ID;
1207 	BReference<Thread> targetReference(target, true);
1208 
1209 	// get the write semaphore
1210 	ThreadLocker targetLocker(target);
1211 	sem_id cachedSem = target->msg.write_sem;
1212 	targetLocker.Unlock();
1213 
1214 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
1215 		return B_NO_MEMORY;
1216 
1217 	status_t status = acquire_sem_etc(cachedSem, 1, flags, 0);
1218 	if (status == B_INTERRUPTED) {
1219 		// we got interrupted by a signal
1220 		return status;
1221 	}
1222 	if (status != B_OK) {
1223 		// Any other acquisition problems may be due to thread deletion
1224 		return B_BAD_THREAD_ID;
1225 	}
1226 
1227 	void* data;
1228 	if (bufferSize > 0) {
1229 		data = malloc(bufferSize);
1230 		if (data == NULL)
1231 			return B_NO_MEMORY;
1232 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
1233 			free(data);
1234 			return B_BAD_DATA;
1235 		}
1236 	} else
1237 		data = NULL;
1238 
1239 	targetLocker.Lock();
1240 
1241 	// The target thread could have been deleted at this point.
1242 	if (!target->IsAlive()) {
1243 		targetLocker.Unlock();
1244 		free(data);
1245 		return B_BAD_THREAD_ID;
1246 	}
1247 
1248 	// Save message informations
1249 	target->msg.sender = thread_get_current_thread()->id;
1250 	target->msg.code = code;
1251 	target->msg.size = bufferSize;
1252 	target->msg.buffer = data;
1253 	cachedSem = target->msg.read_sem;
1254 
1255 	targetLocker.Unlock();
1256 
1257 	release_sem(cachedSem);
1258 	return B_OK;
1259 }
1260 
1261 
1262 static int32
1263 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
1264 	int32 flags)
1265 {
1266 	Thread *thread = thread_get_current_thread();
1267 	size_t size;
1268 	int32 code;
1269 
1270 	status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
1271 	if (status != B_OK) {
1272 		// Actually, we're not supposed to return error codes
1273 		// but since the only reason this can fail is that we
1274 		// were killed, it's probably okay to do so (but also
1275 		// meaningless).
1276 		return status;
1277 	}
1278 
1279 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
1280 		size = min_c(bufferSize, thread->msg.size);
1281 		status = user_memcpy(buffer, thread->msg.buffer, size);
1282 		if (status != B_OK) {
1283 			free(thread->msg.buffer);
1284 			release_sem(thread->msg.write_sem);
1285 			return status;
1286 		}
1287 	}
1288 
1289 	*_sender = thread->msg.sender;
1290 	code = thread->msg.code;
1291 
1292 	free(thread->msg.buffer);
1293 	release_sem(thread->msg.write_sem);
1294 
1295 	return code;
1296 }
1297 
1298 
1299 static status_t
1300 common_getrlimit(int resource, struct rlimit * rlp)
1301 {
1302 	if (!rlp)
1303 		return B_BAD_ADDRESS;
1304 
1305 	switch (resource) {
1306 		case RLIMIT_NOFILE:
1307 		case RLIMIT_NOVMON:
1308 			return vfs_getrlimit(resource, rlp);
1309 
1310 		case RLIMIT_CORE:
1311 			rlp->rlim_cur = 0;
1312 			rlp->rlim_max = 0;
1313 			return B_OK;
1314 
1315 		case RLIMIT_STACK:
1316 		{
1317 			rlp->rlim_cur = USER_MAIN_THREAD_STACK_SIZE;
1318 			rlp->rlim_max = USER_MAIN_THREAD_STACK_SIZE;
1319 			return B_OK;
1320 		}
1321 
1322 		default:
1323 			return EINVAL;
1324 	}
1325 
1326 	return B_OK;
1327 }
1328 
1329 
1330 static status_t
1331 common_setrlimit(int resource, const struct rlimit * rlp)
1332 {
1333 	if (!rlp)
1334 		return B_BAD_ADDRESS;
1335 
1336 	switch (resource) {
1337 		case RLIMIT_NOFILE:
1338 		case RLIMIT_NOVMON:
1339 			return vfs_setrlimit(resource, rlp);
1340 
1341 		case RLIMIT_CORE:
1342 			// We don't support core file, so allow settings to 0/0 only.
1343 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
1344 				return EINVAL;
1345 			return B_OK;
1346 
1347 		default:
1348 			return EINVAL;
1349 	}
1350 
1351 	return B_OK;
1352 }
1353 
1354 
1355 static status_t
1356 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags,
1357 	bigtime_t* _remainingTime)
1358 {
1359 	switch (clockID) {
1360 		case CLOCK_REALTIME:
1361 			// make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall
1362 			// through
1363 			flags |= B_TIMEOUT_REAL_TIME_BASE;
1364 		case CLOCK_MONOTONIC:
1365 		{
1366 			// Store the start time, for the case that we get interrupted and
1367 			// need to return the remaining time. For absolute timeouts we can
1368 			// still get he time later, if needed.
1369 			bigtime_t startTime
1370 				= _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0
1371 					? system_time() : 0;
1372 
1373 			Thread* thread = thread_get_current_thread();
1374 
1375 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE,
1376 				NULL);
1377 			status_t status = thread_block_with_timeout(flags, timeout);
1378 
1379 			if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
1380 				return B_OK;
1381 
1382 			// If interrupted, compute the remaining time, if requested.
1383 			if (status == B_INTERRUPTED && _remainingTime != NULL) {
1384 				if ((flags & B_RELATIVE_TIMEOUT) != 0) {
1385 					*_remainingTime = std::max(
1386 						startTime + timeout - system_time(), (bigtime_t)0);
1387 				} else {
1388 					bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0
1389 						? real_time_clock_usecs() : system_time();
1390 					*_remainingTime = std::max(timeout - now, (bigtime_t)0);
1391 				}
1392 			}
1393 
1394 			return status;
1395 		}
1396 
1397 		case CLOCK_THREAD_CPUTIME_ID:
1398 			// Waiting for ourselves to do something isn't particularly
1399 			// productive.
1400 			return B_BAD_VALUE;
1401 
1402 		case CLOCK_PROCESS_CPUTIME_ID:
1403 		default:
1404 			// We don't have to support those, but we are allowed to. Could be
1405 			// done be creating a UserTimer on the fly with a custom UserEvent
1406 			// that would just wake us up.
1407 			return ENOTSUP;
1408 	}
1409 }
1410 
1411 
1412 //	#pragma mark - debugger calls
1413 
1414 
1415 static int
1416 make_thread_unreal(int argc, char **argv)
1417 {
1418 	int32 id = -1;
1419 
1420 	if (argc > 2) {
1421 		print_debugger_command_usage(argv[0]);
1422 		return 0;
1423 	}
1424 
1425 	if (argc > 1)
1426 		id = strtoul(argv[1], NULL, 0);
1427 
1428 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1429 			Thread* thread = it.Next();) {
1430 		if (id != -1 && thread->id != id)
1431 			continue;
1432 
1433 		if (thread->priority > B_DISPLAY_PRIORITY) {
1434 			scheduler_set_thread_priority(thread, B_NORMAL_PRIORITY);
1435 			kprintf("thread %" B_PRId32 " made unreal\n", thread->id);
1436 		}
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 
1443 static int
1444 set_thread_prio(int argc, char **argv)
1445 {
1446 	int32 id;
1447 	int32 prio;
1448 
1449 	if (argc > 3 || argc < 2) {
1450 		print_debugger_command_usage(argv[0]);
1451 		return 0;
1452 	}
1453 
1454 	prio = strtoul(argv[1], NULL, 0);
1455 	if (prio > THREAD_MAX_SET_PRIORITY)
1456 		prio = THREAD_MAX_SET_PRIORITY;
1457 	if (prio < THREAD_MIN_SET_PRIORITY)
1458 		prio = THREAD_MIN_SET_PRIORITY;
1459 
1460 	if (argc > 2)
1461 		id = strtoul(argv[2], NULL, 0);
1462 	else
1463 		id = thread_get_current_thread()->id;
1464 
1465 	bool found = false;
1466 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1467 			Thread* thread = it.Next();) {
1468 		if (thread->id != id)
1469 			continue;
1470 		scheduler_set_thread_priority(thread, prio);
1471 		kprintf("thread %" B_PRId32 " set to priority %" B_PRId32 "\n", id, prio);
1472 		found = true;
1473 		break;
1474 	}
1475 	if (!found)
1476 		kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1477 
1478 	return 0;
1479 }
1480 
1481 
1482 static int
1483 make_thread_suspended(int argc, char **argv)
1484 {
1485 	int32 id;
1486 
1487 	if (argc > 2) {
1488 		print_debugger_command_usage(argv[0]);
1489 		return 0;
1490 	}
1491 
1492 	if (argc == 1)
1493 		id = thread_get_current_thread()->id;
1494 	else
1495 		id = strtoul(argv[1], NULL, 0);
1496 
1497 	bool found = false;
1498 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1499 			Thread* thread = it.Next();) {
1500 		if (thread->id != id)
1501 			continue;
1502 
1503 		Signal signal(SIGSTOP, SI_USER, B_OK, team_get_kernel_team()->id);
1504 		send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
1505 
1506 		kprintf("thread %" B_PRId32 " suspended\n", id);
1507 		found = true;
1508 		break;
1509 	}
1510 	if (!found)
1511 		kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1512 
1513 	return 0;
1514 }
1515 
1516 
1517 static int
1518 make_thread_resumed(int argc, char **argv)
1519 {
1520 	int32 id;
1521 
1522 	if (argc != 2) {
1523 		print_debugger_command_usage(argv[0]);
1524 		return 0;
1525 	}
1526 
1527 	// force user to enter a thread id, as using
1528 	// the current thread is usually not intended
1529 	id = strtoul(argv[1], NULL, 0);
1530 
1531 	bool found = false;
1532 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1533 			Thread* thread = it.Next();) {
1534 		if (thread->id != id)
1535 			continue;
1536 
1537 		if (thread->state == B_THREAD_SUSPENDED) {
1538 			scheduler_enqueue_in_run_queue(thread);
1539 			kprintf("thread %" B_PRId32 " resumed\n", thread->id);
1540 		}
1541 		found = true;
1542 		break;
1543 	}
1544 	if (!found)
1545 		kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1546 
1547 	return 0;
1548 }
1549 
1550 
1551 static int
1552 drop_into_debugger(int argc, char **argv)
1553 {
1554 	status_t err;
1555 	int32 id;
1556 
1557 	if (argc > 2) {
1558 		print_debugger_command_usage(argv[0]);
1559 		return 0;
1560 	}
1561 
1562 	if (argc == 1)
1563 		id = thread_get_current_thread()->id;
1564 	else
1565 		id = strtoul(argv[1], NULL, 0);
1566 
1567 	err = _user_debug_thread(id);
1568 		// TODO: This is a non-trivial syscall doing some locking, so this is
1569 		// really nasty and may go seriously wrong.
1570 	if (err)
1571 		kprintf("drop failed\n");
1572 	else
1573 		kprintf("thread %" B_PRId32 " dropped into user debugger\n", id);
1574 
1575 	return 0;
1576 }
1577 
1578 
1579 /*!	Returns a user-readable string for a thread state.
1580 	Only for use in the kernel debugger.
1581 */
1582 static const char *
1583 state_to_text(Thread *thread, int32 state)
1584 {
1585 	switch (state) {
1586 		case B_THREAD_READY:
1587 			return "ready";
1588 
1589 		case B_THREAD_RUNNING:
1590 			return "running";
1591 
1592 		case B_THREAD_WAITING:
1593 		{
1594 			if (thread != NULL) {
1595 				switch (thread->wait.type) {
1596 					case THREAD_BLOCK_TYPE_SNOOZE:
1597 						return "zzz";
1598 
1599 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1600 					{
1601 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1602 						if (sem == thread->msg.read_sem)
1603 							return "receive";
1604 						break;
1605 					}
1606 				}
1607 			}
1608 
1609 			return "waiting";
1610 		}
1611 
1612 		case B_THREAD_SUSPENDED:
1613 			return "suspended";
1614 
1615 		case THREAD_STATE_FREE_ON_RESCHED:
1616 			return "death";
1617 
1618 		default:
1619 			return "UNKNOWN";
1620 	}
1621 }
1622 
1623 
1624 static void
1625 print_thread_list_table_head()
1626 {
1627 	kprintf("%-*s       id  state     wait for  %-*s    cpu pri  %-*s   team  "
1628 		"name\n",
1629 		B_PRINTF_POINTER_WIDTH, "thread", B_PRINTF_POINTER_WIDTH, "object",
1630 		B_PRINTF_POINTER_WIDTH, "stack");
1631 }
1632 
1633 
1634 static void
1635 _dump_thread_info(Thread *thread, bool shortInfo)
1636 {
1637 	if (shortInfo) {
1638 		kprintf("%p %6" B_PRId32 "  %-10s", thread, thread->id,
1639 			state_to_text(thread, thread->state));
1640 
1641 		// does it block on a semaphore or a condition variable?
1642 		if (thread->state == B_THREAD_WAITING) {
1643 			switch (thread->wait.type) {
1644 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1645 				{
1646 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1647 					if (sem == thread->msg.read_sem)
1648 						kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, "");
1649 					else {
1650 						kprintf("sem       %-*" B_PRId32,
1651 							B_PRINTF_POINTER_WIDTH + 5, sem);
1652 					}
1653 					break;
1654 				}
1655 
1656 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1657 					kprintf("cvar      %p   ", thread->wait.object);
1658 					break;
1659 
1660 				case THREAD_BLOCK_TYPE_SNOOZE:
1661 					kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, "");
1662 					break;
1663 
1664 				case THREAD_BLOCK_TYPE_SIGNAL:
1665 					kprintf("signal%*s", B_PRINTF_POINTER_WIDTH + 9, "");
1666 					break;
1667 
1668 				case THREAD_BLOCK_TYPE_MUTEX:
1669 					kprintf("mutex     %p   ", thread->wait.object);
1670 					break;
1671 
1672 				case THREAD_BLOCK_TYPE_RW_LOCK:
1673 					kprintf("rwlock    %p   ", thread->wait.object);
1674 					break;
1675 
1676 				case THREAD_BLOCK_TYPE_OTHER:
1677 					kprintf("other%*s", B_PRINTF_POINTER_WIDTH + 10, "");
1678 					break;
1679 
1680 				default:
1681 					kprintf("???       %p   ", thread->wait.object);
1682 					break;
1683 			}
1684 		} else
1685 			kprintf("-%*s", B_PRINTF_POINTER_WIDTH + 14, "");
1686 
1687 		// on which CPU does it run?
1688 		if (thread->cpu)
1689 			kprintf("%2d", thread->cpu->cpu_num);
1690 		else
1691 			kprintf(" -");
1692 
1693 		kprintf("%4" B_PRId32 "  %p%5" B_PRId32 "  %s\n", thread->priority,
1694 			(void *)thread->kernel_stack_base, thread->team->id,
1695 			thread->name != NULL ? thread->name : "<NULL>");
1696 
1697 		return;
1698 	}
1699 
1700 	// print the long info
1701 
1702 	struct thread_death_entry *death = NULL;
1703 
1704 	kprintf("THREAD: %p\n", thread);
1705 	kprintf("id:                 %" B_PRId32 " (%#" B_PRIx32 ")\n", thread->id,
1706 		thread->id);
1707 	kprintf("serial_number:      %" B_PRId64 "\n", thread->serial_number);
1708 	kprintf("name:               \"%s\"\n", thread->name);
1709 	kprintf("hash_next:          %p\nteam_next:          %p\n",
1710 		thread->hash_next, thread->team_next);
1711 	kprintf("priority:           %" B_PRId32 " (I/O: %" B_PRId32 ")\n",
1712 		thread->priority, thread->io_priority);
1713 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1714 	kprintf("cpu:                %p ", thread->cpu);
1715 	if (thread->cpu)
1716 		kprintf("(%d)\n", thread->cpu->cpu_num);
1717 	else
1718 		kprintf("\n");
1719 	kprintf("sig_pending:        %#" B_PRIx64 " (blocked: %#" B_PRIx64
1720 		", before sigsuspend(): %#" B_PRIx64 ")\n",
1721 		(int64)thread->ThreadPendingSignals(),
1722 		(int64)thread->sig_block_mask,
1723 		(int64)thread->sigsuspend_original_unblocked_mask);
1724 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1725 
1726 	if (thread->state == B_THREAD_WAITING) {
1727 		kprintf("waiting for:        ");
1728 
1729 		switch (thread->wait.type) {
1730 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1731 			{
1732 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1733 				if (sem == thread->msg.read_sem)
1734 					kprintf("data\n");
1735 				else
1736 					kprintf("semaphore %" B_PRId32 "\n", sem);
1737 				break;
1738 			}
1739 
1740 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1741 				kprintf("condition variable %p\n", thread->wait.object);
1742 				break;
1743 
1744 			case THREAD_BLOCK_TYPE_SNOOZE:
1745 				kprintf("snooze()\n");
1746 				break;
1747 
1748 			case THREAD_BLOCK_TYPE_SIGNAL:
1749 				kprintf("signal\n");
1750 				break;
1751 
1752 			case THREAD_BLOCK_TYPE_MUTEX:
1753 				kprintf("mutex %p\n", thread->wait.object);
1754 				break;
1755 
1756 			case THREAD_BLOCK_TYPE_RW_LOCK:
1757 				kprintf("rwlock %p\n", thread->wait.object);
1758 				break;
1759 
1760 			case THREAD_BLOCK_TYPE_OTHER:
1761 				kprintf("other (%s)\n", (char*)thread->wait.object);
1762 				break;
1763 
1764 			default:
1765 				kprintf("unknown (%p)\n", thread->wait.object);
1766 				break;
1767 		}
1768 	}
1769 
1770 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1771 	kprintf("team:               %p, \"%s\"\n", thread->team,
1772 		thread->team->Name());
1773 	kprintf("  exit.sem:         %" B_PRId32 "\n", thread->exit.sem);
1774 	kprintf("  exit.status:      %#" B_PRIx32 " (%s)\n", thread->exit.status,
1775 		strerror(thread->exit.status));
1776 	kprintf("  exit.waiters:\n");
1777 	while ((death = (struct thread_death_entry*)list_get_next_item(
1778 			&thread->exit.waiters, death)) != NULL) {
1779 		kprintf("\t%p (thread %" B_PRId32 ")\n", death, death->thread);
1780 	}
1781 
1782 	kprintf("kernel_stack_area:  %" B_PRId32 "\n", thread->kernel_stack_area);
1783 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1784 	kprintf("user_stack_area:    %" B_PRId32 "\n", thread->user_stack_area);
1785 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1786 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1787 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1788 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1789 		strerror(thread->kernel_errno));
1790 	kprintf("kernel_time:        %" B_PRId64 "\n", thread->kernel_time);
1791 	kprintf("user_time:          %" B_PRId64 "\n", thread->user_time);
1792 	kprintf("flags:              0x%" B_PRIx32 "\n", thread->flags);
1793 	kprintf("architecture dependant section:\n");
1794 	arch_thread_dump_info(&thread->arch_info);
1795 	kprintf("scheduler data:\n");
1796 	scheduler_dump_thread_data(thread);
1797 }
1798 
1799 
1800 static int
1801 dump_thread_info(int argc, char **argv)
1802 {
1803 	bool shortInfo = false;
1804 	int argi = 1;
1805 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1806 		shortInfo = true;
1807 		print_thread_list_table_head();
1808 		argi++;
1809 	}
1810 
1811 	if (argi == argc) {
1812 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1813 		return 0;
1814 	}
1815 
1816 	for (; argi < argc; argi++) {
1817 		const char *name = argv[argi];
1818 		ulong arg = strtoul(name, NULL, 0);
1819 
1820 		if (IS_KERNEL_ADDRESS(arg)) {
1821 			// semi-hack
1822 			_dump_thread_info((Thread *)arg, shortInfo);
1823 			continue;
1824 		}
1825 
1826 		// walk through the thread list, trying to match name or id
1827 		bool found = false;
1828 		for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1829 				Thread* thread = it.Next();) {
1830 			if (!strcmp(name, thread->name) || thread->id == (thread_id)arg) {
1831 				_dump_thread_info(thread, shortInfo);
1832 				found = true;
1833 				break;
1834 			}
1835 		}
1836 
1837 		if (!found)
1838 			kprintf("thread \"%s\" (%" B_PRId32 ") doesn't exist!\n", name, (thread_id)arg);
1839 	}
1840 
1841 	return 0;
1842 }
1843 
1844 
1845 static int
1846 dump_thread_list(int argc, char **argv)
1847 {
1848 	bool realTimeOnly = false;
1849 	bool calling = false;
1850 	const char *callSymbol = NULL;
1851 	addr_t callStart = 0;
1852 	addr_t callEnd = 0;
1853 	int32 requiredState = 0;
1854 	team_id team = -1;
1855 	sem_id sem = -1;
1856 
1857 	if (!strcmp(argv[0], "realtime"))
1858 		realTimeOnly = true;
1859 	else if (!strcmp(argv[0], "ready"))
1860 		requiredState = B_THREAD_READY;
1861 	else if (!strcmp(argv[0], "running"))
1862 		requiredState = B_THREAD_RUNNING;
1863 	else if (!strcmp(argv[0], "waiting")) {
1864 		requiredState = B_THREAD_WAITING;
1865 
1866 		if (argc > 1) {
1867 			sem = strtoul(argv[1], NULL, 0);
1868 			if (sem == 0)
1869 				kprintf("ignoring invalid semaphore argument.\n");
1870 		}
1871 	} else if (!strcmp(argv[0], "calling")) {
1872 		if (argc < 2) {
1873 			kprintf("Need to give a symbol name or start and end arguments.\n");
1874 			return 0;
1875 		} else if (argc == 3) {
1876 			callStart = parse_expression(argv[1]);
1877 			callEnd = parse_expression(argv[2]);
1878 		} else
1879 			callSymbol = argv[1];
1880 
1881 		calling = true;
1882 	} else if (argc > 1) {
1883 		team = strtoul(argv[1], NULL, 0);
1884 		if (team == 0)
1885 			kprintf("ignoring invalid team argument.\n");
1886 	}
1887 
1888 	print_thread_list_table_head();
1889 
1890 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1891 			Thread* thread = it.Next();) {
1892 		// filter out threads not matching the search criteria
1893 		if ((requiredState && thread->state != requiredState)
1894 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1895 					callStart, callEnd))
1896 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1897 			|| (team > 0 && thread->team->id != team)
1898 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1899 			continue;
1900 
1901 		_dump_thread_info(thread, true);
1902 	}
1903 	return 0;
1904 }
1905 
1906 
1907 //	#pragma mark - private kernel API
1908 
1909 
1910 void
1911 thread_exit(void)
1912 {
1913 	cpu_status state;
1914 	Thread* thread = thread_get_current_thread();
1915 	Team* team = thread->team;
1916 	Team* kernelTeam = team_get_kernel_team();
1917 	status_t status;
1918 	struct thread_debug_info debugInfo;
1919 	team_id teamID = team->id;
1920 
1921 	TRACE(("thread %" B_PRId32 " exiting w/return code %#" B_PRIx32 "\n",
1922 		thread->id, thread->exit.status));
1923 
1924 	if (!are_interrupts_enabled())
1925 		panic("thread_exit() called with interrupts disabled!\n");
1926 
1927 	// boost our priority to get this over with
1928 	scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY);
1929 
1930 	if (team != kernelTeam) {
1931 		// Delete all user timers associated with the thread.
1932 		ThreadLocker threadLocker(thread);
1933 		thread->DeleteUserTimers(false);
1934 
1935 		// detach the thread's user thread
1936 		user_thread* userThread = thread->user_thread;
1937 		thread->user_thread = NULL;
1938 
1939 		threadLocker.Unlock();
1940 
1941 		// Delete the thread's user thread, if it's not the main thread. If it
1942 		// is, we can save the work, since it will be deleted with the team's
1943 		// address space.
1944 		if (thread != team->main_thread)
1945 			team_free_user_thread(team, userThread);
1946 	}
1947 
1948 	// remember the user stack area -- we will delete it below
1949 	area_id userStackArea = -1;
1950 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1951 		userStackArea = thread->user_stack_area;
1952 		thread->user_stack_area = -1;
1953 	}
1954 
1955 	struct job_control_entry *death = NULL;
1956 	struct thread_death_entry* threadDeathEntry = NULL;
1957 	bool deleteTeam = false;
1958 	port_id debuggerPort = -1;
1959 
1960 	if (team != kernelTeam) {
1961 		user_debug_thread_exiting(thread);
1962 
1963 		if (team->main_thread == thread) {
1964 			// The main thread is exiting. Shut down the whole team.
1965 			deleteTeam = true;
1966 
1967 			// kill off all other threads and the user debugger facilities
1968 			debuggerPort = team_shutdown_team(team);
1969 
1970 			// acquire necessary locks, which are: process group lock, kernel
1971 			// team lock, parent team lock, and the team lock
1972 			team->LockProcessGroup();
1973 			kernelTeam->Lock();
1974 			team->LockTeamAndParent(true);
1975 		} else {
1976 			threadDeathEntry
1977 				= (thread_death_entry*)malloc(sizeof(thread_death_entry));
1978 
1979 			// acquire necessary locks, which are: kernel team lock and the team
1980 			// lock
1981 			kernelTeam->Lock();
1982 			team->Lock();
1983 		}
1984 
1985 		ThreadLocker threadLocker(thread);
1986 
1987 		state = disable_interrupts();
1988 
1989 		// swap address spaces, to make sure we're running on the kernel's pgdir
1990 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1991 
1992 		WriteSpinLocker teamLocker(thread->team_lock);
1993 		SpinLocker threadCreationLocker(gThreadCreationLock);
1994 			// removing the thread and putting its death entry to the parent
1995 			// team needs to be an atomic operation
1996 
1997 		// remember how long this thread lasted
1998 		bigtime_t now = system_time();
1999 
2000 		InterruptsSpinLocker signalLocker(kernelTeam->signal_lock);
2001 		SpinLocker teamTimeLocker(kernelTeam->time_lock);
2002 		SpinLocker threadTimeLocker(thread->time_lock);
2003 
2004 		thread->kernel_time += now - thread->last_time;
2005 		thread->last_time = now;
2006 
2007 		team->dead_threads_kernel_time += thread->kernel_time;
2008 		team->dead_threads_user_time += thread->user_time;
2009 
2010 		// stop/update thread/team CPU time user timers
2011 		if (thread->HasActiveCPUTimeUserTimers()
2012 			|| team->HasActiveCPUTimeUserTimers()) {
2013 			user_timer_stop_cpu_timers(thread, NULL);
2014 		}
2015 
2016 		// deactivate CPU time user timers for the thread
2017 		if (thread->HasActiveCPUTimeUserTimers())
2018 			thread->DeactivateCPUTimeUserTimers();
2019 
2020 		threadTimeLocker.Unlock();
2021 
2022 		// put the thread into the kernel team until it dies
2023 		remove_thread_from_team(team, thread);
2024 		insert_thread_into_team(kernelTeam, thread);
2025 
2026 		teamTimeLocker.Unlock();
2027 		signalLocker.Unlock();
2028 
2029 		teamLocker.Unlock();
2030 
2031 		if (team->death_entry != NULL) {
2032 			if (--team->death_entry->remaining_threads == 0)
2033 				team->death_entry->condition.NotifyOne();
2034 		}
2035 
2036 		if (deleteTeam) {
2037 			Team* parent = team->parent;
2038 
2039 			// Set the team job control state to "dead" and detach the job
2040 			// control entry from our team struct.
2041 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL);
2042 			death = team->job_control_entry;
2043 			team->job_control_entry = NULL;
2044 
2045 			if (death != NULL) {
2046 				death->InitDeadState();
2047 
2048 				// team_set_job_control_state() already moved our entry
2049 				// into the parent's list. We just check the soft limit of
2050 				// death entries.
2051 				if (parent->dead_children.count > MAX_DEAD_CHILDREN) {
2052 					death = parent->dead_children.entries.RemoveHead();
2053 					parent->dead_children.count--;
2054 				} else
2055 					death = NULL;
2056 			}
2057 
2058 			threadCreationLocker.Unlock();
2059 			restore_interrupts(state);
2060 
2061 			threadLocker.Unlock();
2062 
2063 			// Get a temporary reference to the team's process group
2064 			// -- team_remove_team() removes the team from the group, which
2065 			// might destroy it otherwise and we wouldn't be able to unlock it.
2066 			ProcessGroup* group = team->group;
2067 			group->AcquireReference();
2068 
2069 			pid_t foregroundGroupToSignal;
2070 			team_remove_team(team, foregroundGroupToSignal);
2071 
2072 			// unlock everything but the parent team
2073 			team->Unlock();
2074 			if (parent != kernelTeam)
2075 				kernelTeam->Unlock();
2076 			group->Unlock();
2077 			group->ReleaseReference();
2078 
2079 			// Send SIGCHLD to the parent as long as we still have its lock.
2080 			// This makes job control state change + signalling atomic.
2081 			Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id);
2082 			if (team->exit.reason == CLD_EXITED) {
2083 				childSignal.SetStatus(team->exit.status);
2084 			} else {
2085 				childSignal.SetStatus(team->exit.signal);
2086 				childSignal.SetSendingUser(team->exit.signaling_user);
2087 			}
2088 			send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE);
2089 
2090 			// also unlock the parent
2091 			parent->Unlock();
2092 
2093 			// If the team was a session leader with controlling TTY, we have
2094 			// to send SIGHUP to the foreground process group.
2095 			if (foregroundGroupToSignal >= 0) {
2096 				Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id);
2097 				send_signal_to_process_group(foregroundGroupToSignal,
2098 					groupSignal, B_DO_NOT_RESCHEDULE);
2099 			}
2100 		} else {
2101 			// The thread is not the main thread. We store a thread death entry
2102 			// for it, unless someone is already waiting for it.
2103 			if (threadDeathEntry != NULL
2104 				&& list_is_empty(&thread->exit.waiters)) {
2105 				threadDeathEntry->thread = thread->id;
2106 				threadDeathEntry->status = thread->exit.status;
2107 
2108 				// add entry -- remove an old one, if we hit the limit
2109 				list_add_item(&team->dead_threads, threadDeathEntry);
2110 				team->dead_threads_count++;
2111 				threadDeathEntry = NULL;
2112 
2113 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
2114 					threadDeathEntry
2115 						= (thread_death_entry*)list_remove_head_item(
2116 							&team->dead_threads);
2117 					team->dead_threads_count--;
2118 				}
2119 			}
2120 
2121 			threadCreationLocker.Unlock();
2122 			restore_interrupts(state);
2123 
2124 			threadLocker.Unlock();
2125 			team->Unlock();
2126 			kernelTeam->Unlock();
2127 		}
2128 
2129 		TRACE(("thread_exit: thread %" B_PRId32 " now a kernel thread!\n",
2130 			thread->id));
2131 	}
2132 
2133 	free(threadDeathEntry);
2134 
2135 	// delete the team if we're its main thread
2136 	if (deleteTeam) {
2137 		team_delete_team(team, debuggerPort);
2138 
2139 		// we need to delete any death entry that made it to here
2140 		delete death;
2141 	}
2142 
2143 	ThreadLocker threadLocker(thread);
2144 
2145 	state = disable_interrupts();
2146 	SpinLocker threadCreationLocker(gThreadCreationLock);
2147 
2148 	// mark invisible in global hash/list, so it's no longer accessible
2149 	SpinLocker threadHashLocker(sThreadHashLock);
2150 	thread->visible = false;
2151 	sUsedThreads--;
2152 	threadHashLocker.Unlock();
2153 
2154 	// Stop debugging for this thread
2155 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2156 	debugInfo = thread->debug_info;
2157 	clear_thread_debug_info(&thread->debug_info, true);
2158 	threadDebugInfoLocker.Unlock();
2159 
2160 	// Remove the select infos. We notify them a little later.
2161 	select_info* selectInfos = thread->select_infos;
2162 	thread->select_infos = NULL;
2163 
2164 	threadCreationLocker.Unlock();
2165 	restore_interrupts(state);
2166 
2167 	threadLocker.Unlock();
2168 
2169 	destroy_thread_debug_info(&debugInfo);
2170 
2171 	// notify select infos
2172 	select_info* info = selectInfos;
2173 	while (info != NULL) {
2174 		select_sync* sync = info->sync;
2175 
2176 		notify_select_events(info, B_EVENT_INVALID);
2177 		info = info->next;
2178 		put_select_sync(sync);
2179 	}
2180 
2181 	// notify listeners
2182 	sNotificationService.Notify(THREAD_REMOVED, thread);
2183 
2184 	// shutdown the thread messaging
2185 
2186 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
2187 	if (status == B_WOULD_BLOCK) {
2188 		// there is data waiting for us, so let us eat it
2189 		thread_id sender;
2190 
2191 		delete_sem(thread->msg.write_sem);
2192 			// first, let's remove all possibly waiting writers
2193 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
2194 	} else {
2195 		// we probably own the semaphore here, and we're the last to do so
2196 		delete_sem(thread->msg.write_sem);
2197 	}
2198 	// now we can safely remove the msg.read_sem
2199 	delete_sem(thread->msg.read_sem);
2200 
2201 	// fill all death entries and delete the sem that others will use to wait
2202 	// for us
2203 	{
2204 		sem_id cachedExitSem = thread->exit.sem;
2205 
2206 		ThreadLocker threadLocker(thread);
2207 
2208 		// make sure no one will grab this semaphore again
2209 		thread->exit.sem = -1;
2210 
2211 		// fill all death entries
2212 		thread_death_entry* entry = NULL;
2213 		while ((entry = (thread_death_entry*)list_get_next_item(
2214 				&thread->exit.waiters, entry)) != NULL) {
2215 			entry->status = thread->exit.status;
2216 		}
2217 
2218 		threadLocker.Unlock();
2219 
2220 		delete_sem(cachedExitSem);
2221 	}
2222 
2223 	// delete the user stack, if this was a user thread
2224 	if (!deleteTeam && userStackArea >= 0) {
2225 		// We postponed deleting the user stack until now, since this way all
2226 		// notifications for the thread's death are out already and all other
2227 		// threads waiting for this thread's death and some object on its stack
2228 		// will wake up before we (try to) delete the stack area. Of most
2229 		// relevance is probably the case where this is the main thread and
2230 		// other threads use objects on its stack -- so we want them terminated
2231 		// first.
2232 		// When the team is deleted, all areas are deleted anyway, so we don't
2233 		// need to do that explicitly in that case.
2234 		vm_delete_area(teamID, userStackArea, true);
2235 	}
2236 
2237 	// notify the debugger
2238 	if (teamID != kernelTeam->id)
2239 		user_debug_thread_deleted(teamID, thread->id);
2240 
2241 	// enqueue in the undertaker list and reschedule for the last time
2242 	UndertakerEntry undertakerEntry(thread, teamID);
2243 
2244 	disable_interrupts();
2245 
2246 	SpinLocker schedulerLocker(thread->scheduler_lock);
2247 
2248 	SpinLocker undertakerLocker(sUndertakerLock);
2249 	sUndertakerEntries.Add(&undertakerEntry);
2250 	sUndertakerCondition.NotifyOne();
2251 	undertakerLocker.Unlock();
2252 
2253 	scheduler_reschedule(THREAD_STATE_FREE_ON_RESCHED);
2254 
2255 	panic("never can get here\n");
2256 }
2257 
2258 
2259 /*!	Called in the interrupt handler code when a thread enters
2260 	the kernel for any reason.
2261 	Only tracks time for now.
2262 	Interrupts are disabled.
2263 */
2264 void
2265 thread_at_kernel_entry(bigtime_t now)
2266 {
2267 	Thread *thread = thread_get_current_thread();
2268 
2269 	TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32 "\n", thread->id));
2270 
2271 	// track user time
2272 	SpinLocker threadTimeLocker(thread->time_lock);
2273 	thread->user_time += now - thread->last_time;
2274 	thread->last_time = now;
2275 	thread->in_kernel = true;
2276 	threadTimeLocker.Unlock();
2277 }
2278 
2279 
2280 /*!	Called whenever a thread exits kernel space to user space.
2281 	Tracks time, handles signals, ...
2282 	Interrupts must be enabled. When the function returns, interrupts will be
2283 	disabled.
2284 	The function may not return. This e.g. happens when the thread has received
2285 	a deadly signal.
2286 */
2287 void
2288 thread_at_kernel_exit(void)
2289 {
2290 	Thread *thread = thread_get_current_thread();
2291 
2292 	TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32 "\n", thread->id));
2293 
2294 	handle_signals(thread);
2295 
2296 	disable_interrupts();
2297 
2298 	// track kernel time
2299 	bigtime_t now = system_time();
2300 	SpinLocker threadTimeLocker(thread->time_lock);
2301 	thread->in_kernel = false;
2302 	thread->kernel_time += now - thread->last_time;
2303 	thread->last_time = now;
2304 }
2305 
2306 
2307 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
2308 	and no debugging shall be done.
2309 	Interrupts must be disabled.
2310 */
2311 void
2312 thread_at_kernel_exit_no_signals(void)
2313 {
2314 	Thread *thread = thread_get_current_thread();
2315 
2316 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32 "\n",
2317 		thread->id));
2318 
2319 	// track kernel time
2320 	bigtime_t now = system_time();
2321 	SpinLocker threadTimeLocker(thread->time_lock);
2322 	thread->in_kernel = false;
2323 	thread->kernel_time += now - thread->last_time;
2324 	thread->last_time = now;
2325 }
2326 
2327 
2328 void
2329 thread_reset_for_exec(void)
2330 {
2331 	Thread* thread = thread_get_current_thread();
2332 
2333 	ThreadLocker threadLocker(thread);
2334 
2335 	// delete user-defined timers
2336 	thread->DeleteUserTimers(true);
2337 
2338 	// cancel pre-defined timer
2339 	if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID))
2340 		timer->Cancel();
2341 
2342 	// reset user_thread and user stack
2343 	thread->user_thread = NULL;
2344 	thread->user_stack_area = -1;
2345 	thread->user_stack_base = 0;
2346 	thread->user_stack_size = 0;
2347 
2348 	// reset signals
2349 	thread->ResetSignalsOnExec();
2350 
2351 	// reset thread CPU time clock
2352 	InterruptsSpinLocker timeLocker(thread->time_lock);
2353 	thread->cpu_clock_offset = -thread->CPUTime(false);
2354 }
2355 
2356 
2357 thread_id
2358 allocate_thread_id()
2359 {
2360 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2361 
2362 	// find the next unused ID
2363 	thread_id id;
2364 	do {
2365 		id = sNextThreadID++;
2366 
2367 		// deal with integer overflow
2368 		if (sNextThreadID < 0)
2369 			sNextThreadID = 2;
2370 
2371 		// check whether the ID is already in use
2372 	} while (sThreadHash.Lookup(id, false) != NULL);
2373 
2374 	return id;
2375 }
2376 
2377 
2378 thread_id
2379 peek_next_thread_id()
2380 {
2381 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2382 	return sNextThreadID;
2383 }
2384 
2385 
2386 /*!	Yield the CPU to other threads.
2387 	Thread will continue to run, if there's no other thread in ready
2388 	state, and if it has a higher priority than the other ready threads, it
2389 	still has a good chance to continue.
2390 */
2391 void
2392 thread_yield(void)
2393 {
2394 	Thread *thread = thread_get_current_thread();
2395 	if (thread == NULL)
2396 		return;
2397 
2398 	InterruptsSpinLocker _(thread->scheduler_lock);
2399 
2400 	thread->has_yielded = true;
2401 	scheduler_reschedule(B_THREAD_READY);
2402 }
2403 
2404 
2405 void
2406 thread_map(void (*function)(Thread* thread, void* data), void* data)
2407 {
2408 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2409 
2410 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
2411 		Thread* thread = it.Next();) {
2412 		function(thread, data);
2413 	}
2414 }
2415 
2416 
2417 /*!	Kernel private thread creation function.
2418 */
2419 thread_id
2420 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
2421 	void *arg, team_id team)
2422 {
2423 	return thread_create_thread(
2424 		ThreadCreationAttributes(function, name, priority, arg, team),
2425 		true);
2426 }
2427 
2428 
2429 status_t
2430 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
2431 	status_t *_returnCode)
2432 {
2433 	if (id < 0)
2434 		return B_BAD_THREAD_ID;
2435 
2436 	// get the thread, queue our death entry, and fetch the semaphore we have to
2437 	// wait on
2438 	sem_id exitSem = B_BAD_THREAD_ID;
2439 	struct thread_death_entry death;
2440 
2441 	Thread* thread = Thread::GetAndLock(id);
2442 	if (thread != NULL) {
2443 		// remember the semaphore we have to wait on and place our death entry
2444 		exitSem = thread->exit.sem;
2445 		if (exitSem >= 0)
2446 			list_add_link_to_head(&thread->exit.waiters, &death);
2447 
2448 		thread->UnlockAndReleaseReference();
2449 
2450 		if (exitSem < 0)
2451 			return B_BAD_THREAD_ID;
2452 	} else {
2453 		// we couldn't find this thread -- maybe it's already gone, and we'll
2454 		// find its death entry in our team
2455 		Team* team = thread_get_current_thread()->team;
2456 		TeamLocker teamLocker(team);
2457 
2458 		// check the child death entries first (i.e. main threads of child
2459 		// teams)
2460 		bool deleteEntry;
2461 		job_control_entry* freeDeath
2462 			= team_get_death_entry(team, id, &deleteEntry);
2463 		if (freeDeath != NULL) {
2464 			death.status = freeDeath->status;
2465 			if (deleteEntry)
2466 				delete freeDeath;
2467 		} else {
2468 			// check the thread death entries of the team (non-main threads)
2469 			thread_death_entry* threadDeathEntry = NULL;
2470 			while ((threadDeathEntry = (thread_death_entry*)list_get_next_item(
2471 					&team->dead_threads, threadDeathEntry)) != NULL) {
2472 				if (threadDeathEntry->thread == id) {
2473 					list_remove_item(&team->dead_threads, threadDeathEntry);
2474 					team->dead_threads_count--;
2475 					death.status = threadDeathEntry->status;
2476 					free(threadDeathEntry);
2477 					break;
2478 				}
2479 			}
2480 
2481 			if (threadDeathEntry == NULL)
2482 				return B_BAD_THREAD_ID;
2483 		}
2484 
2485 		// we found the thread's death entry in our team
2486 		if (_returnCode)
2487 			*_returnCode = death.status;
2488 
2489 		return B_OK;
2490 	}
2491 
2492 	// we need to wait for the death of the thread
2493 
2494 	resume_thread(id);
2495 		// make sure we don't wait forever on a suspended thread
2496 
2497 	status_t status = acquire_sem_etc(exitSem, 1, flags, timeout);
2498 
2499 	if (status == B_OK) {
2500 		// this should never happen as the thread deletes the semaphore on exit
2501 		panic("could acquire exit_sem for thread %" B_PRId32 "\n", id);
2502 	} else if (status == B_BAD_SEM_ID) {
2503 		// this is the way the thread normally exits
2504 		status = B_OK;
2505 	} else {
2506 		// We were probably interrupted or the timeout occurred; we need to
2507 		// remove our death entry now.
2508 		thread = Thread::GetAndLock(id);
2509 		if (thread != NULL) {
2510 			list_remove_link(&death);
2511 			thread->UnlockAndReleaseReference();
2512 		} else {
2513 			// The thread is already gone, so we need to wait uninterruptibly
2514 			// for its exit semaphore to make sure our death entry stays valid.
2515 			// It won't take long, since the thread is apparently already in the
2516 			// middle of the cleanup.
2517 			acquire_sem(exitSem);
2518 			status = B_OK;
2519 		}
2520 	}
2521 
2522 	if (status == B_OK && _returnCode != NULL)
2523 		*_returnCode = death.status;
2524 
2525 	return status;
2526 }
2527 
2528 
2529 status_t
2530 select_thread(int32 id, struct select_info* info, bool kernel)
2531 {
2532 	// get and lock the thread
2533 	Thread* thread = Thread::GetAndLock(id);
2534 	if (thread == NULL)
2535 		return B_BAD_THREAD_ID;
2536 	BReference<Thread> threadReference(thread, true);
2537 	ThreadLocker threadLocker(thread, true);
2538 
2539 	// We support only B_EVENT_INVALID at the moment.
2540 	info->selected_events &= B_EVENT_INVALID;
2541 
2542 	// add info to list
2543 	if (info->selected_events != 0) {
2544 		info->next = thread->select_infos;
2545 		thread->select_infos = info;
2546 
2547 		// we need a sync reference
2548 		atomic_add(&info->sync->ref_count, 1);
2549 	}
2550 
2551 	return B_OK;
2552 }
2553 
2554 
2555 status_t
2556 deselect_thread(int32 id, struct select_info* info, bool kernel)
2557 {
2558 	// get and lock the thread
2559 	Thread* thread = Thread::GetAndLock(id);
2560 	if (thread == NULL)
2561 		return B_BAD_THREAD_ID;
2562 	BReference<Thread> threadReference(thread, true);
2563 	ThreadLocker threadLocker(thread, true);
2564 
2565 	// remove info from list
2566 	select_info** infoLocation = &thread->select_infos;
2567 	while (*infoLocation != NULL && *infoLocation != info)
2568 		infoLocation = &(*infoLocation)->next;
2569 
2570 	if (*infoLocation != info)
2571 		return B_OK;
2572 
2573 	*infoLocation = info->next;
2574 
2575 	threadLocker.Unlock();
2576 
2577 	// surrender sync reference
2578 	put_select_sync(info->sync);
2579 
2580 	return B_OK;
2581 }
2582 
2583 
2584 int32
2585 thread_max_threads(void)
2586 {
2587 	return sMaxThreads;
2588 }
2589 
2590 
2591 int32
2592 thread_used_threads(void)
2593 {
2594 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2595 	return sUsedThreads;
2596 }
2597 
2598 
2599 /*!	Returns a user-readable string for a thread state.
2600 	Only for use in the kernel debugger.
2601 */
2602 const char*
2603 thread_state_to_text(Thread* thread, int32 state)
2604 {
2605 	return state_to_text(thread, state);
2606 }
2607 
2608 
2609 int32
2610 thread_get_io_priority(thread_id id)
2611 {
2612 	Thread* thread = Thread::GetAndLock(id);
2613 	if (thread == NULL)
2614 		return B_BAD_THREAD_ID;
2615 	BReference<Thread> threadReference(thread, true);
2616 	ThreadLocker threadLocker(thread, true);
2617 
2618 	int32 priority = thread->io_priority;
2619 	if (priority < 0) {
2620 		// negative I/O priority means using the (CPU) priority
2621 		priority = thread->priority;
2622 	}
2623 
2624 	return priority;
2625 }
2626 
2627 
2628 void
2629 thread_set_io_priority(int32 priority)
2630 {
2631 	Thread* thread = thread_get_current_thread();
2632 	ThreadLocker threadLocker(thread);
2633 
2634 	thread->io_priority = priority;
2635 }
2636 
2637 
2638 status_t
2639 thread_init(kernel_args *args)
2640 {
2641 	TRACE(("thread_init: entry\n"));
2642 
2643 	// create the thread hash table
2644 	new(&sThreadHash) ThreadHashTable();
2645 	if (sThreadHash.Init(128) != B_OK)
2646 		panic("thread_init(): failed to init thread hash table!");
2647 
2648 	// create the thread structure object cache
2649 	sThreadCache = create_object_cache("threads", sizeof(Thread), 16, NULL,
2650 		NULL, NULL);
2651 		// Note: The x86 port requires 16 byte alignment of thread structures.
2652 	if (sThreadCache == NULL)
2653 		panic("thread_init(): failed to allocate thread object cache!");
2654 
2655 	if (arch_thread_init(args) < B_OK)
2656 		panic("arch_thread_init() failed!\n");
2657 
2658 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2659 	sNextThreadID = B_SYSTEM_TEAM + 1;
2660 
2661 	// create an idle thread for each cpu
2662 	for (uint32 i = 0; i < args->num_cpus; i++) {
2663 		Thread *thread;
2664 		area_info info;
2665 		char name[64];
2666 
2667 		sprintf(name, "idle thread %" B_PRIu32, i + 1);
2668 		thread = new(&sIdleThreads[i]) Thread(name,
2669 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2670 		if (thread == NULL || thread->Init(true) != B_OK) {
2671 			panic("error creating idle thread struct\n");
2672 			return B_NO_MEMORY;
2673 		}
2674 
2675 		gCPU[i].running_thread = thread;
2676 
2677 		thread->team = team_get_kernel_team();
2678 		thread->priority = B_IDLE_PRIORITY;
2679 		thread->state = B_THREAD_RUNNING;
2680 		sprintf(name, "idle thread %" B_PRIu32 " kstack", i + 1);
2681 		thread->kernel_stack_area = find_area(name);
2682 
2683 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2684 			panic("error finding idle kstack area\n");
2685 
2686 		thread->kernel_stack_base = (addr_t)info.address;
2687 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2688 
2689 		thread->visible = true;
2690 		insert_thread_into_team(thread->team, thread);
2691 
2692 		scheduler_on_thread_init(thread);
2693 	}
2694 	sUsedThreads = args->num_cpus;
2695 
2696 	// init the notification service
2697 	new(&sNotificationService) ThreadNotificationService();
2698 
2699 	sNotificationService.Register();
2700 
2701 	// start the undertaker thread
2702 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2703 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2704 
2705 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2706 		B_DISPLAY_PRIORITY, NULL);
2707 	if (undertakerThread < 0)
2708 		panic("Failed to create undertaker thread!");
2709 	resume_thread(undertakerThread);
2710 
2711 	// set up some debugger commands
2712 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2713 		"[ <team> ]\n"
2714 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2715 		"all threads of the specified team.\n"
2716 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2717 	add_debugger_command_etc("ready", &dump_thread_list,
2718 		"List all ready threads",
2719 		"\n"
2720 		"Prints a list of all threads in ready state.\n", 0);
2721 	add_debugger_command_etc("running", &dump_thread_list,
2722 		"List all running threads",
2723 		"\n"
2724 		"Prints a list of all threads in running state.\n", 0);
2725 	add_debugger_command_etc("waiting", &dump_thread_list,
2726 		"List all waiting threads (optionally for a specific semaphore)",
2727 		"[ <sem> ]\n"
2728 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2729 		"specified, only the threads waiting on that semaphore are listed.\n"
2730 		"  <sem>  - ID of the semaphore.\n", 0);
2731 	add_debugger_command_etc("realtime", &dump_thread_list,
2732 		"List all realtime threads",
2733 		"\n"
2734 		"Prints a list of all threads with realtime priority.\n", 0);
2735 	add_debugger_command_etc("thread", &dump_thread_info,
2736 		"Dump info about a particular thread",
2737 		"[ -s ] ( <id> | <address> | <name> )*\n"
2738 		"Prints information about the specified thread. If no argument is\n"
2739 		"given the current thread is selected.\n"
2740 		"  -s         - Print info in compact table form (like \"threads\").\n"
2741 		"  <id>       - The ID of the thread.\n"
2742 		"  <address>  - The address of the thread structure.\n"
2743 		"  <name>     - The thread's name.\n", 0);
2744 	add_debugger_command_etc("calling", &dump_thread_list,
2745 		"Show all threads that have a specific address in their call chain",
2746 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2747 	add_debugger_command_etc("unreal", &make_thread_unreal,
2748 		"Set realtime priority threads to normal priority",
2749 		"[ <id> ]\n"
2750 		"Sets the priority of all realtime threads or, if given, the one\n"
2751 		"with the specified ID to \"normal\" priority.\n"
2752 		"  <id>  - The ID of the thread.\n", 0);
2753 	add_debugger_command_etc("suspend", &make_thread_suspended,
2754 		"Suspend a thread",
2755 		"[ <id> ]\n"
2756 		"Suspends the thread with the given ID. If no ID argument is given\n"
2757 		"the current thread is selected.\n"
2758 		"  <id>  - The ID of the thread.\n", 0);
2759 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2760 		"<id>\n"
2761 		"Resumes the specified thread, if it is currently suspended.\n"
2762 		"  <id>  - The ID of the thread.\n", 0);
2763 	add_debugger_command_etc("drop", &drop_into_debugger,
2764 		"Drop a thread into the userland debugger",
2765 		"<id>\n"
2766 		"Drops the specified (userland) thread into the userland debugger\n"
2767 		"after leaving the kernel debugger.\n"
2768 		"  <id>  - The ID of the thread.\n", 0);
2769 	add_debugger_command_etc("priority", &set_thread_prio,
2770 		"Set a thread's priority",
2771 		"<priority> [ <id> ]\n"
2772 		"Sets the priority of the thread with the specified ID to the given\n"
2773 		"priority. If no thread ID is given, the current thread is selected.\n"
2774 		"  <priority>  - The thread's new priority (0 - 120)\n"
2775 		"  <id>        - The ID of the thread.\n", 0);
2776 
2777 	return B_OK;
2778 }
2779 
2780 
2781 status_t
2782 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2783 {
2784 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2785 	// so that get_current_cpu and friends will work, which is crucial for
2786 	// a lot of low level routines
2787 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2788 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2789 	return B_OK;
2790 }
2791 
2792 
2793 //	#pragma mark - thread blocking API
2794 
2795 
2796 static status_t
2797 thread_block_timeout(timer* timer)
2798 {
2799 	Thread* thread = (Thread*)timer->user_data;
2800 	thread_unblock(thread, B_TIMED_OUT);
2801 
2802 	return B_HANDLED_INTERRUPT;
2803 }
2804 
2805 
2806 /*!	Blocks the current thread.
2807 
2808 	The thread is blocked until someone else unblock it. Must be called after a
2809 	call to thread_prepare_to_block(). If the thread has already been unblocked
2810 	after the previous call to thread_prepare_to_block(), this function will
2811 	return immediately. Cf. the documentation of thread_prepare_to_block() for
2812 	more details.
2813 
2814 	The caller must hold the scheduler lock.
2815 
2816 	\param thread The current thread.
2817 	\return The error code passed to the unblocking function. thread_interrupt()
2818 		uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was
2819 		successful while another error code indicates a failure (what that means
2820 		depends on the client code).
2821 */
2822 static inline status_t
2823 thread_block_locked(Thread* thread)
2824 {
2825 	if (thread->wait.status == 1) {
2826 		// check for signals, if interruptible
2827 		if (thread_is_interrupted(thread, thread->wait.flags)) {
2828 			thread->wait.status = B_INTERRUPTED;
2829 		} else
2830 			scheduler_reschedule(B_THREAD_WAITING);
2831 	}
2832 
2833 	return thread->wait.status;
2834 }
2835 
2836 
2837 /*!	Blocks the current thread.
2838 
2839 	The function acquires the scheduler lock and calls thread_block_locked().
2840 	See there for more information.
2841 */
2842 status_t
2843 thread_block()
2844 {
2845 	InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
2846 	return thread_block_locked(thread_get_current_thread());
2847 }
2848 
2849 
2850 /*!	Blocks the current thread with a timeout.
2851 
2852 	The thread is blocked until someone else unblock it or the specified timeout
2853 	occurs. Must be called after a call to thread_prepare_to_block(). If the
2854 	thread has already been unblocked after the previous call to
2855 	thread_prepare_to_block(), this function will return immediately. See
2856 	thread_prepare_to_block() for more details.
2857 
2858 	The caller must not hold the scheduler lock.
2859 
2860 	\param thread The current thread.
2861 	\param timeoutFlags The standard timeout flags:
2862 		- \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait.
2863 		- \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when
2864 			the timeout shall occur.
2865 		- \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT
2866 			is specified, too. Specifies that \a timeout is a real time, not a
2867 			system time.
2868 		If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are
2869 		specified, an infinite timeout is implied and the function behaves like
2870 		thread_block_locked().
2871 	\return The error code passed to the unblocking function. thread_interrupt()
2872 		uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is
2873 		returned. By convention \c B_OK means that the wait was successful while
2874 		another error code indicates a failure (what that means depends on the
2875 		client code).
2876 */
2877 status_t
2878 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2879 {
2880 	Thread* thread = thread_get_current_thread();
2881 
2882 	InterruptsSpinLocker locker(thread->scheduler_lock);
2883 
2884 	if (thread->wait.status != 1)
2885 		return thread->wait.status;
2886 
2887 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2888 		&& timeout != B_INFINITE_TIMEOUT;
2889 
2890 	if (useTimer) {
2891 		// Timer flags: absolute/relative.
2892 		uint32 timerFlags;
2893 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2894 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2895 		} else {
2896 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2897 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2898 				timerFlags |= B_TIMER_REAL_TIME_BASE;
2899 		}
2900 
2901 		// install the timer
2902 		thread->wait.unblock_timer.user_data = thread;
2903 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2904 			timerFlags);
2905 	}
2906 
2907 	// block
2908 	status_t error = thread_block_locked(thread);
2909 
2910 	locker.Unlock();
2911 
2912 	// cancel timer, if it didn't fire
2913 	if (error != B_TIMED_OUT && useTimer)
2914 		cancel_timer(&thread->wait.unblock_timer);
2915 
2916 	return error;
2917 }
2918 
2919 
2920 /*!	Unblocks a thread.
2921 
2922 	Acquires the scheduler lock and calls thread_unblock_locked().
2923 	See there for more information.
2924 */
2925 void
2926 thread_unblock(Thread* thread, status_t status)
2927 {
2928 	InterruptsSpinLocker locker(thread->scheduler_lock);
2929 	thread_unblock_locked(thread, status);
2930 }
2931 
2932 
2933 /*!	Unblocks a userland-blocked thread.
2934 	The caller must not hold any locks.
2935 */
2936 static status_t
2937 user_unblock_thread(thread_id threadID, status_t status)
2938 {
2939 	// get the thread
2940 	Thread* thread = Thread::GetAndLock(threadID);
2941 	if (thread == NULL)
2942 		return B_BAD_THREAD_ID;
2943 	BReference<Thread> threadReference(thread, true);
2944 	ThreadLocker threadLocker(thread, true);
2945 
2946 	if (thread->user_thread == NULL)
2947 		return B_NOT_ALLOWED;
2948 
2949 	InterruptsSpinLocker locker(thread->scheduler_lock);
2950 
2951 	if (thread->user_thread->wait_status > 0) {
2952 		thread->user_thread->wait_status = status;
2953 		thread_unblock_locked(thread, status);
2954 	}
2955 
2956 	return B_OK;
2957 }
2958 
2959 
2960 //	#pragma mark - public kernel API
2961 
2962 
2963 void
2964 exit_thread(status_t returnValue)
2965 {
2966 	Thread *thread = thread_get_current_thread();
2967 	Team* team = thread->team;
2968 
2969 	thread->exit.status = returnValue;
2970 
2971 	// if called from a kernel thread, we don't deliver the signal,
2972 	// we just exit directly to keep the user space behaviour of
2973 	// this function
2974 	if (team != team_get_kernel_team()) {
2975 		// If this is its main thread, set the team's exit status.
2976 		if (thread == team->main_thread) {
2977 			TeamLocker teamLocker(team);
2978 
2979 			if (!team->exit.initialized) {
2980 				team->exit.reason = CLD_EXITED;
2981 				team->exit.signal = 0;
2982 				team->exit.signaling_user = 0;
2983 				team->exit.status = returnValue;
2984 				team->exit.initialized = true;
2985 			}
2986 
2987 			teamLocker.Unlock();
2988 		}
2989 
2990 		Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
2991 		send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
2992 	} else
2993 		thread_exit();
2994 }
2995 
2996 
2997 status_t
2998 kill_thread(thread_id id)
2999 {
3000 	if (id <= 0)
3001 		return B_BAD_VALUE;
3002 
3003 	Thread* currentThread = thread_get_current_thread();
3004 
3005 	Signal signal(SIGKILLTHR, SI_USER, B_OK, currentThread->team->id);
3006 	return send_signal_to_thread_id(id, signal, 0);
3007 }
3008 
3009 
3010 status_t
3011 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
3012 {
3013 	return send_data_etc(thread, code, buffer, bufferSize, 0);
3014 }
3015 
3016 
3017 int32
3018 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
3019 {
3020 	return receive_data_etc(sender, buffer, bufferSize, 0);
3021 }
3022 
3023 
3024 bool
3025 has_data(thread_id thread)
3026 {
3027 	// TODO: The thread argument is ignored.
3028 	int32 count;
3029 
3030 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
3031 			&count) != B_OK)
3032 		return false;
3033 
3034 	return count == 0 ? false : true;
3035 }
3036 
3037 
3038 status_t
3039 _get_thread_info(thread_id id, thread_info *info, size_t size)
3040 {
3041 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
3042 		return B_BAD_VALUE;
3043 
3044 	// get the thread
3045 	Thread* thread = Thread::GetAndLock(id);
3046 	if (thread == NULL)
3047 		return B_BAD_THREAD_ID;
3048 	BReference<Thread> threadReference(thread, true);
3049 	ThreadLocker threadLocker(thread, true);
3050 
3051 	// fill the info -- also requires the scheduler lock to be held
3052 	InterruptsSpinLocker locker(thread->scheduler_lock);
3053 
3054 	fill_thread_info(thread, info, size);
3055 
3056 	return B_OK;
3057 }
3058 
3059 
3060 status_t
3061 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
3062 	size_t size)
3063 {
3064 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
3065 		return B_BAD_VALUE;
3066 
3067 	int32 lastID = *_cookie;
3068 
3069 	// get the team
3070 	Team* team = Team::GetAndLock(teamID);
3071 	if (team == NULL)
3072 		return B_BAD_VALUE;
3073 	BReference<Team> teamReference(team, true);
3074 	TeamLocker teamLocker(team, true);
3075 
3076 	Thread* thread = NULL;
3077 
3078 	if (lastID == 0) {
3079 		// We start with the main thread
3080 		thread = team->main_thread;
3081 	} else {
3082 		// Find the one thread with an ID greater than ours (as long as the IDs
3083 		// don't wrap they are always sorted from highest to lowest).
3084 		// TODO: That is broken not only when the IDs wrap, but also for the
3085 		// kernel team, to which threads are added when they are dying.
3086 		for (Thread* next = team->thread_list; next != NULL;
3087 				next = next->team_next) {
3088 			if (next->id <= lastID)
3089 				break;
3090 
3091 			thread = next;
3092 		}
3093 	}
3094 
3095 	if (thread == NULL)
3096 		return B_BAD_VALUE;
3097 
3098 	lastID = thread->id;
3099 	*_cookie = lastID;
3100 
3101 	ThreadLocker threadLocker(thread);
3102 	InterruptsSpinLocker locker(thread->scheduler_lock);
3103 
3104 	fill_thread_info(thread, info, size);
3105 
3106 	return B_OK;
3107 }
3108 
3109 
3110 thread_id
3111 find_thread(const char* name)
3112 {
3113 	if (name == NULL)
3114 		return thread_get_current_thread_id();
3115 
3116 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
3117 
3118 	// TODO: Scanning the whole hash with the thread hash lock held isn't
3119 	// exactly cheap -- although this function is probably used very rarely.
3120 
3121 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
3122 			Thread* thread = it.Next();) {
3123 		if (!thread->visible)
3124 			continue;
3125 
3126 		if (strcmp(thread->name, name) == 0)
3127 			return thread->id;
3128 	}
3129 
3130 	return B_NAME_NOT_FOUND;
3131 }
3132 
3133 
3134 status_t
3135 rename_thread(thread_id id, const char* name)
3136 {
3137 	if (name == NULL)
3138 		return B_BAD_VALUE;
3139 
3140 	// get the thread
3141 	Thread* thread = Thread::GetAndLock(id);
3142 	if (thread == NULL)
3143 		return B_BAD_THREAD_ID;
3144 	BReference<Thread> threadReference(thread, true);
3145 	ThreadLocker threadLocker(thread, true);
3146 
3147 	// check whether the operation is allowed
3148 	if (thread->team != thread_get_current_thread()->team)
3149 		return B_NOT_ALLOWED;
3150 
3151 	strlcpy(thread->name, name, B_OS_NAME_LENGTH);
3152 
3153 	team_id teamID = thread->team->id;
3154 
3155 	threadLocker.Unlock();
3156 
3157 	// notify listeners
3158 	sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id);
3159 		// don't pass the thread structure, as it's unsafe, if it isn't ours
3160 
3161 	return B_OK;
3162 }
3163 
3164 
3165 status_t
3166 set_thread_priority(thread_id id, int32 priority)
3167 {
3168 	// make sure the passed in priority is within bounds
3169 	if (priority > THREAD_MAX_SET_PRIORITY)
3170 		priority = THREAD_MAX_SET_PRIORITY;
3171 	if (priority < THREAD_MIN_SET_PRIORITY)
3172 		priority = THREAD_MIN_SET_PRIORITY;
3173 
3174 	// get the thread
3175 	Thread* thread = Thread::GetAndLock(id);
3176 	if (thread == NULL)
3177 		return B_BAD_THREAD_ID;
3178 	BReference<Thread> threadReference(thread, true);
3179 	ThreadLocker threadLocker(thread, true);
3180 
3181 	// check whether the change is allowed
3182 	if (thread_is_idle_thread(thread))
3183 		return B_NOT_ALLOWED;
3184 
3185 	return scheduler_set_thread_priority(thread, priority);
3186 }
3187 
3188 
3189 status_t
3190 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
3191 {
3192 	return common_snooze_etc(timeout, timebase, flags, NULL);
3193 }
3194 
3195 
3196 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
3197 status_t
3198 snooze(bigtime_t timeout)
3199 {
3200 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
3201 }
3202 
3203 
3204 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
3205 	signals.
3206 */
3207 status_t
3208 snooze_until(bigtime_t timeout, int timebase)
3209 {
3210 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
3211 }
3212 
3213 
3214 status_t
3215 wait_for_thread(thread_id thread, status_t *_returnCode)
3216 {
3217 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
3218 }
3219 
3220 
3221 status_t
3222 suspend_thread(thread_id id)
3223 {
3224 	if (id <= 0)
3225 		return B_BAD_VALUE;
3226 
3227 	Thread* currentThread = thread_get_current_thread();
3228 
3229 	Signal signal(SIGSTOP, SI_USER, B_OK, currentThread->team->id);
3230 	return send_signal_to_thread_id(id, signal, 0);
3231 }
3232 
3233 
3234 status_t
3235 resume_thread(thread_id id)
3236 {
3237 	if (id <= 0)
3238 		return B_BAD_VALUE;
3239 
3240 	Thread* currentThread = thread_get_current_thread();
3241 
3242 	// Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains
3243 	// compatibility to BeOS which documents the combination of suspend_thread()
3244 	// and resume_thread() to interrupt threads waiting on semaphores.
3245 	Signal signal(SIGNAL_CONTINUE_THREAD, SI_USER, B_OK,
3246 		currentThread->team->id);
3247 	return send_signal_to_thread_id(id, signal, 0);
3248 }
3249 
3250 
3251 thread_id
3252 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
3253 	void *arg)
3254 {
3255 	return thread_create_thread(
3256 		ThreadCreationAttributes(function, name, priority, arg),
3257 		true);
3258 }
3259 
3260 
3261 int
3262 getrlimit(int resource, struct rlimit * rlp)
3263 {
3264 	status_t error = common_getrlimit(resource, rlp);
3265 	if (error != B_OK) {
3266 		errno = error;
3267 		return -1;
3268 	}
3269 
3270 	return 0;
3271 }
3272 
3273 
3274 int
3275 setrlimit(int resource, const struct rlimit * rlp)
3276 {
3277 	status_t error = common_setrlimit(resource, rlp);
3278 	if (error != B_OK) {
3279 		errno = error;
3280 		return -1;
3281 	}
3282 
3283 	return 0;
3284 }
3285 
3286 
3287 //	#pragma mark - syscalls
3288 
3289 
3290 void
3291 _user_exit_thread(status_t returnValue)
3292 {
3293 	exit_thread(returnValue);
3294 }
3295 
3296 
3297 status_t
3298 _user_kill_thread(thread_id thread)
3299 {
3300 	// TODO: Don't allow kernel threads to be killed!
3301 	return kill_thread(thread);
3302 }
3303 
3304 
3305 status_t
3306 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int))
3307 {
3308 	// check the cancel function
3309 	if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction))
3310 		return B_BAD_VALUE;
3311 
3312 	// get and lock the thread
3313 	Thread* thread = Thread::GetAndLock(threadID);
3314 	if (thread == NULL)
3315 		return B_BAD_THREAD_ID;
3316 	BReference<Thread> threadReference(thread, true);
3317 	ThreadLocker threadLocker(thread, true);
3318 
3319 	// only threads of the same team can be canceled
3320 	if (thread->team != thread_get_current_thread()->team)
3321 		return B_NOT_ALLOWED;
3322 
3323 	// set the cancel function
3324 	thread->cancel_function = cancelFunction;
3325 
3326 	// send the cancellation signal to the thread
3327 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
3328 	SpinLocker locker(thread->team->signal_lock);
3329 	return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0);
3330 }
3331 
3332 
3333 status_t
3334 _user_resume_thread(thread_id thread)
3335 {
3336 	// TODO: Don't allow kernel threads to be resumed!
3337 	return resume_thread(thread);
3338 }
3339 
3340 
3341 status_t
3342 _user_suspend_thread(thread_id thread)
3343 {
3344 	// TODO: Don't allow kernel threads to be suspended!
3345 	return suspend_thread(thread);
3346 }
3347 
3348 
3349 status_t
3350 _user_rename_thread(thread_id thread, const char *userName)
3351 {
3352 	char name[B_OS_NAME_LENGTH];
3353 
3354 	if (!IS_USER_ADDRESS(userName)
3355 		|| userName == NULL
3356 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
3357 		return B_BAD_ADDRESS;
3358 
3359 	// TODO: Don't allow kernel threads to be renamed!
3360 	return rename_thread(thread, name);
3361 }
3362 
3363 
3364 int32
3365 _user_set_thread_priority(thread_id thread, int32 newPriority)
3366 {
3367 	// TODO: Don't allow setting priority of kernel threads!
3368 	return set_thread_priority(thread, newPriority);
3369 }
3370 
3371 
3372 thread_id
3373 _user_spawn_thread(thread_creation_attributes* userAttributes)
3374 {
3375 	// copy the userland structure to the kernel
3376 	char nameBuffer[B_OS_NAME_LENGTH];
3377 	ThreadCreationAttributes attributes;
3378 	status_t error = attributes.InitFromUserAttributes(userAttributes,
3379 		nameBuffer);
3380 	if (error != B_OK)
3381 		return error;
3382 
3383 	// create the thread
3384 	thread_id threadID = thread_create_thread(attributes, false);
3385 
3386 	if (threadID >= 0)
3387 		user_debug_thread_created(threadID);
3388 
3389 	return threadID;
3390 }
3391 
3392 
3393 status_t
3394 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
3395 	bigtime_t* userRemainingTime)
3396 {
3397 	// We need to store more syscall restart parameters than usual and need a
3398 	// somewhat different handling. Hence we can't use
3399 	// syscall_restart_handle_timeout_pre() but do the job ourselves.
3400 	struct restart_parameters {
3401 		bigtime_t	timeout;
3402 		clockid_t	timebase;
3403 		uint32		flags;
3404 	};
3405 
3406 	Thread* thread = thread_get_current_thread();
3407 
3408 	if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) {
3409 		// The syscall was restarted. Fetch the parameters from the stored
3410 		// restart parameters.
3411 		restart_parameters* restartParameters
3412 			= (restart_parameters*)thread->syscall_restart.parameters;
3413 		timeout = restartParameters->timeout;
3414 		timebase = restartParameters->timebase;
3415 		flags = restartParameters->flags;
3416 	} else {
3417 		// convert relative timeouts to absolute ones
3418 		if ((flags & B_RELATIVE_TIMEOUT) != 0) {
3419 			// not restarted yet and the flags indicate a relative timeout
3420 
3421 			// Make sure we use the system time base, so real-time clock changes
3422 			// won't affect our wait.
3423 			flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE;
3424 			if (timebase == CLOCK_REALTIME)
3425 				timebase = CLOCK_MONOTONIC;
3426 
3427 			// get the current time and make the timeout absolute
3428 			bigtime_t now;
3429 			status_t error = user_timer_get_clock(timebase, now);
3430 			if (error != B_OK)
3431 				return error;
3432 
3433 			timeout += now;
3434 
3435 			// deal with overflow
3436 			if (timeout < 0)
3437 				timeout = B_INFINITE_TIMEOUT;
3438 
3439 			flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
3440 		} else
3441 			flags |= B_ABSOLUTE_TIMEOUT;
3442 	}
3443 
3444 	// snooze
3445 	bigtime_t remainingTime;
3446 	status_t error = common_snooze_etc(timeout, timebase,
3447 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION,
3448 		userRemainingTime != NULL ? &remainingTime : NULL);
3449 
3450 	// If interrupted, copy the remaining time back to userland and prepare the
3451 	// syscall restart.
3452 	if (error == B_INTERRUPTED) {
3453 		if (userRemainingTime != NULL
3454 			&& (!IS_USER_ADDRESS(userRemainingTime)
3455 				|| user_memcpy(userRemainingTime, &remainingTime,
3456 					sizeof(remainingTime)) != B_OK)) {
3457 			return B_BAD_ADDRESS;
3458 		}
3459 
3460 		// store the normalized values in the restart parameters
3461 		restart_parameters* restartParameters
3462 			= (restart_parameters*)thread->syscall_restart.parameters;
3463 		restartParameters->timeout = timeout;
3464 		restartParameters->timebase = timebase;
3465 		restartParameters->flags = flags;
3466 
3467 		// restart the syscall, if possible
3468 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
3469 	}
3470 
3471 	return error;
3472 }
3473 
3474 
3475 void
3476 _user_thread_yield(void)
3477 {
3478 	thread_yield();
3479 }
3480 
3481 
3482 status_t
3483 _user_get_thread_info(thread_id id, thread_info *userInfo)
3484 {
3485 	thread_info info;
3486 	status_t status;
3487 
3488 	if (!IS_USER_ADDRESS(userInfo))
3489 		return B_BAD_ADDRESS;
3490 
3491 	status = _get_thread_info(id, &info, sizeof(thread_info));
3492 
3493 	if (status >= B_OK
3494 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3495 		return B_BAD_ADDRESS;
3496 
3497 	return status;
3498 }
3499 
3500 
3501 status_t
3502 _user_get_next_thread_info(team_id team, int32 *userCookie,
3503 	thread_info *userInfo)
3504 {
3505 	status_t status;
3506 	thread_info info;
3507 	int32 cookie;
3508 
3509 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
3510 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3511 		return B_BAD_ADDRESS;
3512 
3513 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
3514 	if (status < B_OK)
3515 		return status;
3516 
3517 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3518 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3519 		return B_BAD_ADDRESS;
3520 
3521 	return status;
3522 }
3523 
3524 
3525 thread_id
3526 _user_find_thread(const char *userName)
3527 {
3528 	char name[B_OS_NAME_LENGTH];
3529 
3530 	if (userName == NULL)
3531 		return find_thread(NULL);
3532 
3533 	if (!IS_USER_ADDRESS(userName)
3534 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
3535 		return B_BAD_ADDRESS;
3536 
3537 	return find_thread(name);
3538 }
3539 
3540 
3541 status_t
3542 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
3543 {
3544 	status_t returnCode;
3545 	status_t status;
3546 
3547 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
3548 		return B_BAD_ADDRESS;
3549 
3550 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
3551 
3552 	if (status == B_OK && userReturnCode != NULL
3553 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
3554 		return B_BAD_ADDRESS;
3555 	}
3556 
3557 	return syscall_restart_handle_post(status);
3558 }
3559 
3560 
3561 bool
3562 _user_has_data(thread_id thread)
3563 {
3564 	return has_data(thread);
3565 }
3566 
3567 
3568 status_t
3569 _user_send_data(thread_id thread, int32 code, const void *buffer,
3570 	size_t bufferSize)
3571 {
3572 	if (!IS_USER_ADDRESS(buffer))
3573 		return B_BAD_ADDRESS;
3574 
3575 	return send_data_etc(thread, code, buffer, bufferSize,
3576 		B_KILL_CAN_INTERRUPT);
3577 		// supports userland buffers
3578 }
3579 
3580 
3581 status_t
3582 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
3583 {
3584 	thread_id sender;
3585 	status_t code;
3586 
3587 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
3588 		|| !IS_USER_ADDRESS(buffer))
3589 		return B_BAD_ADDRESS;
3590 
3591 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
3592 		// supports userland buffers
3593 
3594 	if (_userSender != NULL)
3595 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
3596 			return B_BAD_ADDRESS;
3597 
3598 	return code;
3599 }
3600 
3601 
3602 status_t
3603 _user_block_thread(uint32 flags, bigtime_t timeout)
3604 {
3605 	syscall_restart_handle_timeout_pre(flags, timeout);
3606 	flags |= B_CAN_INTERRUPT;
3607 
3608 	Thread* thread = thread_get_current_thread();
3609 	ThreadLocker threadLocker(thread);
3610 
3611 	// check, if already done
3612 	if (thread->user_thread->wait_status <= 0)
3613 		return thread->user_thread->wait_status;
3614 
3615 	// nope, so wait
3616 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
3617 
3618 	threadLocker.Unlock();
3619 
3620 	status_t status = thread_block_with_timeout(flags, timeout);
3621 
3622 	threadLocker.Lock();
3623 
3624 	// Interruptions or timeouts can race with other threads unblocking us.
3625 	// Favor a wake-up by another thread, i.e. if someone changed the wait
3626 	// status, use that.
3627 	status_t oldStatus = thread->user_thread->wait_status;
3628 	if (oldStatus > 0)
3629 		thread->user_thread->wait_status = status;
3630 	else
3631 		status = oldStatus;
3632 
3633 	threadLocker.Unlock();
3634 
3635 	return syscall_restart_handle_timeout_post(status, timeout);
3636 }
3637 
3638 
3639 status_t
3640 _user_unblock_thread(thread_id threadID, status_t status)
3641 {
3642 	status_t error = user_unblock_thread(threadID, status);
3643 
3644 	if (error == B_OK)
3645 		scheduler_reschedule_if_necessary();
3646 
3647 	return error;
3648 }
3649 
3650 
3651 status_t
3652 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3653 {
3654 	enum {
3655 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3656 	};
3657 
3658 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3659 		return B_BAD_ADDRESS;
3660 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3661 		return B_BAD_VALUE;
3662 
3663 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3664 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3665 		return B_BAD_ADDRESS;
3666 
3667 	for (uint32 i = 0; i < count; i++)
3668 		user_unblock_thread(threads[i], status);
3669 
3670 	scheduler_reschedule_if_necessary();
3671 
3672 	return B_OK;
3673 }
3674 
3675 
3676 // TODO: the following two functions don't belong here
3677 
3678 
3679 int
3680 _user_getrlimit(int resource, struct rlimit *urlp)
3681 {
3682 	struct rlimit rl;
3683 	int ret;
3684 
3685 	if (urlp == NULL)
3686 		return EINVAL;
3687 
3688 	if (!IS_USER_ADDRESS(urlp))
3689 		return B_BAD_ADDRESS;
3690 
3691 	ret = common_getrlimit(resource, &rl);
3692 
3693 	if (ret == 0) {
3694 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3695 		if (ret < 0)
3696 			return ret;
3697 
3698 		return 0;
3699 	}
3700 
3701 	return ret;
3702 }
3703 
3704 
3705 int
3706 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3707 {
3708 	struct rlimit resourceLimit;
3709 
3710 	if (userResourceLimit == NULL)
3711 		return EINVAL;
3712 
3713 	if (!IS_USER_ADDRESS(userResourceLimit)
3714 		|| user_memcpy(&resourceLimit, userResourceLimit,
3715 			sizeof(struct rlimit)) < B_OK)
3716 		return B_BAD_ADDRESS;
3717 
3718 	return common_setrlimit(resource, &resourceLimit);
3719 }
3720