xref: /haiku/src/system/kernel/thread.cpp (revision aa29a30426c9ab7511c7c88e7223645264209a4c)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <malloc.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/resource.h>
22 
23 #include <algorithm>
24 
25 #include <OS.h>
26 
27 #include <util/AutoLock.h>
28 
29 #include <arch/debug.h>
30 #include <boot/kernel_args.h>
31 #include <condition_variable.h>
32 #include <cpu.h>
33 #include <int.h>
34 #include <kimage.h>
35 #include <kscheduler.h>
36 #include <ksignal.h>
37 #include <Notifications.h>
38 #include <real_time_clock.h>
39 #include <slab/Slab.h>
40 #include <smp.h>
41 #include <syscalls.h>
42 #include <syscall_restart.h>
43 #include <team.h>
44 #include <tls.h>
45 #include <user_runtime.h>
46 #include <user_thread.h>
47 #include <vfs.h>
48 #include <vm/vm.h>
49 #include <vm/VMAddressSpace.h>
50 #include <wait_for_objects.h>
51 
52 #include "TeamThreadTables.h"
53 
54 
55 //#define TRACE_THREAD
56 #ifdef TRACE_THREAD
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 #define THREAD_MAX_MESSAGE_SIZE		65536
64 
65 
66 // #pragma mark - ThreadHashTable
67 
68 
69 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable;
70 
71 
72 // thread list
73 static Thread sIdleThreads[B_MAX_CPU_COUNT];
74 static ThreadHashTable sThreadHash;
75 static spinlock sThreadHashLock = B_SPINLOCK_INITIALIZER;
76 static thread_id sNextThreadID = 2;
77 	// ID 1 is allocated for the kernel by Team::Team() behind our back
78 
79 // some arbitrarily chosen limits -- should probably depend on the available
80 // memory (the limit is not yet enforced)
81 static int32 sMaxThreads = 4096;
82 static int32 sUsedThreads = 0;
83 
84 
85 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
86 	Thread*	thread;
87 	team_id	teamID;
88 
89 	UndertakerEntry(Thread* thread, team_id teamID)
90 		:
91 		thread(thread),
92 		teamID(teamID)
93 	{
94 	}
95 };
96 
97 
98 struct ThreadEntryArguments {
99 	status_t	(*kernelFunction)(void* argument);
100 	void*		argument;
101 	bool		enterUserland;
102 };
103 
104 struct UserThreadEntryArguments : ThreadEntryArguments {
105 	addr_t			userlandEntry;
106 	void*			userlandArgument1;
107 	void*			userlandArgument2;
108 	pthread_t		pthread;
109 	arch_fork_arg*	forkArgs;
110 	uint32			flags;
111 };
112 
113 
114 class ThreadNotificationService : public DefaultNotificationService {
115 public:
116 	ThreadNotificationService()
117 		: DefaultNotificationService("threads")
118 	{
119 	}
120 
121 	void Notify(uint32 eventCode, team_id teamID, thread_id threadID,
122 		Thread* thread = NULL)
123 	{
124 		char eventBuffer[180];
125 		KMessage event;
126 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
127 		event.AddInt32("event", eventCode);
128 		event.AddInt32("team", teamID);
129 		event.AddInt32("thread", threadID);
130 		if (thread != NULL)
131 			event.AddPointer("threadStruct", thread);
132 
133 		DefaultNotificationService::Notify(event, eventCode);
134 	}
135 
136 	void Notify(uint32 eventCode, Thread* thread)
137 	{
138 		return Notify(eventCode, thread->id, thread->team->id, thread);
139 	}
140 };
141 
142 
143 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
144 static ConditionVariable sUndertakerCondition;
145 static ThreadNotificationService sNotificationService;
146 
147 
148 // object cache to allocate thread structures from
149 static object_cache* sThreadCache;
150 
151 
152 // #pragma mark - Thread
153 
154 
155 /*!	Constructs a thread.
156 
157 	\param name The thread's name.
158 	\param threadID The ID to be assigned to the new thread. If
159 		  \code < 0 \endcode a fresh one is allocated.
160 	\param cpu The CPU the thread shall be assigned.
161 */
162 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
163 	:
164 	flags(0),
165 	serial_number(-1),
166 	hash_next(NULL),
167 	team_next(NULL),
168 	queue_next(NULL),
169 	priority(-1),
170 	next_priority(-1),
171 	io_priority(-1),
172 	cpu(cpu),
173 	previous_cpu(NULL),
174 	pinned_to_cpu(0),
175 	sig_block_mask(0),
176 	sigsuspend_original_unblocked_mask(0),
177 	user_signal_context(NULL),
178 	signal_stack_base(0),
179 	signal_stack_size(0),
180 	signal_stack_enabled(false),
181 	in_kernel(true),
182 	was_yielded(false),
183 	user_thread(NULL),
184 	fault_handler(0),
185 	page_faults_allowed(1),
186 	team(NULL),
187 	select_infos(NULL),
188 	kernel_stack_area(-1),
189 	kernel_stack_base(0),
190 	user_stack_area(-1),
191 	user_stack_base(0),
192 	user_local_storage(0),
193 	kernel_errno(0),
194 	user_time(0),
195 	kernel_time(0),
196 	last_time(0),
197 	cpu_clock_offset(0),
198 	post_interrupt_callback(NULL),
199 	post_interrupt_data(NULL)
200 {
201 	id = threadID >= 0 ? threadID : allocate_thread_id();
202 	visible = false;
203 
204 	// init locks
205 	char lockName[32];
206 	snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id);
207 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
208 
209 	B_INITIALIZE_SPINLOCK(&time_lock);
210 
211 	// init name
212 	if (name != NULL)
213 		strlcpy(this->name, name, B_OS_NAME_LENGTH);
214 	else
215 		strcpy(this->name, "unnamed thread");
216 
217 	alarm.period = 0;
218 
219 	exit.status = 0;
220 
221 	list_init(&exit.waiters);
222 
223 	exit.sem = -1;
224 	msg.write_sem = -1;
225 	msg.read_sem = -1;
226 
227 	// add to thread table -- yet invisible
228 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
229 	sThreadHash.Insert(this);
230 }
231 
232 
233 Thread::~Thread()
234 {
235 	// Delete resources that should actually be deleted by the thread itself,
236 	// when it exited, but that might still exist, if the thread was never run.
237 
238 	if (user_stack_area >= 0)
239 		delete_area(user_stack_area);
240 
241 	DeleteUserTimers(false);
242 
243 	// delete the resources, that may remain in either case
244 
245 	if (kernel_stack_area >= 0)
246 		delete_area(kernel_stack_area);
247 
248 	fPendingSignals.Clear();
249 
250 	if (exit.sem >= 0)
251 		delete_sem(exit.sem);
252 	if (msg.write_sem >= 0)
253 		delete_sem(msg.write_sem);
254 	if (msg.read_sem >= 0)
255 		delete_sem(msg.read_sem);
256 
257 	scheduler_on_thread_destroy(this);
258 
259 	mutex_destroy(&fLock);
260 
261 	// remove from thread table
262 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
263 	sThreadHash.Remove(this);
264 }
265 
266 
267 /*static*/ status_t
268 Thread::Create(const char* name, Thread*& _thread)
269 {
270 	Thread* thread = new Thread(name, -1, NULL);
271 	if (thread == NULL)
272 		return B_NO_MEMORY;
273 
274 	status_t error = thread->Init(false);
275 	if (error != B_OK) {
276 		delete thread;
277 		return error;
278 	}
279 
280 	_thread = thread;
281 	return B_OK;
282 }
283 
284 
285 /*static*/ Thread*
286 Thread::Get(thread_id id)
287 {
288 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
289 	Thread* thread = sThreadHash.Lookup(id);
290 	if (thread != NULL)
291 		thread->AcquireReference();
292 	return thread;
293 }
294 
295 
296 /*static*/ Thread*
297 Thread::GetAndLock(thread_id id)
298 {
299 	// look it up and acquire a reference
300 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
301 	Thread* thread = sThreadHash.Lookup(id);
302 	if (thread == NULL)
303 		return NULL;
304 
305 	thread->AcquireReference();
306 	threadHashLocker.Unlock();
307 
308 	// lock and check, if it is still in the hash table
309 	thread->Lock();
310 	threadHashLocker.Lock();
311 
312 	if (sThreadHash.Lookup(id) == thread)
313 		return thread;
314 
315 	threadHashLocker.Unlock();
316 
317 	// nope, the thread is no longer in the hash table
318 	thread->UnlockAndReleaseReference();
319 
320 	return NULL;
321 }
322 
323 
324 /*static*/ Thread*
325 Thread::GetDebug(thread_id id)
326 {
327 	return sThreadHash.Lookup(id, false);
328 }
329 
330 
331 /*static*/ bool
332 Thread::IsAlive(thread_id id)
333 {
334 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
335 	return sThreadHash.Lookup(id) != NULL;
336 }
337 
338 
339 void*
340 Thread::operator new(size_t size)
341 {
342 	return object_cache_alloc(sThreadCache, 0);
343 }
344 
345 
346 void*
347 Thread::operator new(size_t, void* pointer)
348 {
349 	return pointer;
350 }
351 
352 
353 void
354 Thread::operator delete(void* pointer, size_t size)
355 {
356 	object_cache_free(sThreadCache, pointer, 0);
357 }
358 
359 
360 status_t
361 Thread::Init(bool idleThread)
362 {
363 	status_t error = scheduler_on_thread_create(this, idleThread);
364 	if (error != B_OK)
365 		return error;
366 
367 	char temp[64];
368 	snprintf(temp, sizeof(temp), "thread_%ld_retcode_sem", id);
369 	exit.sem = create_sem(0, temp);
370 	if (exit.sem < 0)
371 		return exit.sem;
372 
373 	snprintf(temp, sizeof(temp), "%s send", name);
374 	msg.write_sem = create_sem(1, temp);
375 	if (msg.write_sem < 0)
376 		return msg.write_sem;
377 
378 	snprintf(temp, sizeof(temp), "%s receive", name);
379 	msg.read_sem = create_sem(0, temp);
380 	if (msg.read_sem < 0)
381 		return msg.read_sem;
382 
383 	error = arch_thread_init_thread_struct(this);
384 	if (error != B_OK)
385 		return error;
386 
387 	return B_OK;
388 }
389 
390 
391 /*!	Checks whether the thread is still in the thread hash table.
392 */
393 bool
394 Thread::IsAlive() const
395 {
396 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
397 
398 	return sThreadHash.Lookup(id) != NULL;
399 }
400 
401 
402 void
403 Thread::ResetSignalsOnExec()
404 {
405 	// We are supposed keep the pending signals and the signal mask. Only the
406 	// signal stack, if set, shall be unset.
407 
408 	sigsuspend_original_unblocked_mask = 0;
409 	user_signal_context = NULL;
410 	signal_stack_base = 0;
411 	signal_stack_size = 0;
412 	signal_stack_enabled = false;
413 }
414 
415 
416 /*!	Adds the given user timer to the thread and, if user-defined, assigns it an
417 	ID.
418 
419 	The caller must hold the thread's lock.
420 
421 	\param timer The timer to be added. If it doesn't have an ID yet, it is
422 		considered user-defined and will be assigned an ID.
423 	\return \c B_OK, if the timer was added successfully, another error code
424 		otherwise.
425 */
426 status_t
427 Thread::AddUserTimer(UserTimer* timer)
428 {
429 	// If the timer is user-defined, check timer limit and increment
430 	// user-defined count.
431 	if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer())
432 		return EAGAIN;
433 
434 	fUserTimers.AddTimer(timer);
435 
436 	return B_OK;
437 }
438 
439 
440 /*!	Removes the given user timer from the thread.
441 
442 	The caller must hold the thread's lock.
443 
444 	\param timer The timer to be removed.
445 
446 */
447 void
448 Thread::RemoveUserTimer(UserTimer* timer)
449 {
450 	fUserTimers.RemoveTimer(timer);
451 
452 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
453 		team->UserDefinedTimersRemoved(1);
454 }
455 
456 
457 /*!	Deletes all (or all user-defined) user timers of the thread.
458 
459 	The caller must hold the thread's lock.
460 
461 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
462 		otherwise all timers are deleted.
463 */
464 void
465 Thread::DeleteUserTimers(bool userDefinedOnly)
466 {
467 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
468 	if (count > 0)
469 		team->UserDefinedTimersRemoved(count);
470 }
471 
472 
473 void
474 Thread::DeactivateCPUTimeUserTimers()
475 {
476 	while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head())
477 		timer->Deactivate();
478 }
479 
480 
481 // #pragma mark - ThreadListIterator
482 
483 
484 ThreadListIterator::ThreadListIterator()
485 {
486 	// queue the entry
487 	InterruptsSpinLocker locker(sThreadHashLock);
488 	sThreadHash.InsertIteratorEntry(&fEntry);
489 }
490 
491 
492 ThreadListIterator::~ThreadListIterator()
493 {
494 	// remove the entry
495 	InterruptsSpinLocker locker(sThreadHashLock);
496 	sThreadHash.RemoveIteratorEntry(&fEntry);
497 }
498 
499 
500 Thread*
501 ThreadListIterator::Next()
502 {
503 	// get the next team -- if there is one, get reference for it
504 	InterruptsSpinLocker locker(sThreadHashLock);
505 	Thread* thread = sThreadHash.NextElement(&fEntry);
506 	if (thread != NULL)
507 		thread->AcquireReference();
508 
509 	return thread;
510 }
511 
512 
513 // #pragma mark - ThreadCreationAttributes
514 
515 
516 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function,
517 	const char* name, int32 priority, void* arg, team_id team,
518 	Thread* thread)
519 {
520 	this->entry = NULL;
521 	this->name = name;
522 	this->priority = priority;
523 	this->args1 = NULL;
524 	this->args2 = NULL;
525 	this->stack_address = NULL;
526 	this->stack_size = 0;
527 	this->pthread = NULL;
528 	this->flags = 0;
529 	this->team = team >= 0 ? team : team_get_kernel_team()->id;
530 	this->thread = thread;
531 	this->signal_mask = 0;
532 	this->additional_stack_size = 0;
533 	this->kernelEntry = function;
534 	this->kernelArgument = arg;
535 	this->forkArgs = NULL;
536 }
537 
538 
539 /*!	Initializes the structure from a userland structure.
540 	\param userAttributes The userland structure (must be a userland address).
541 	\param nameBuffer A character array of at least size B_OS_NAME_LENGTH,
542 		which will be used for the \c name field, if the userland structure has
543 		a name. The buffer must remain valid as long as this structure is in
544 		use afterwards (or until it is reinitialized).
545 	\return \c B_OK, if the initialization went fine, another error code
546 		otherwise.
547 */
548 status_t
549 ThreadCreationAttributes::InitFromUserAttributes(
550 	const thread_creation_attributes* userAttributes, char* nameBuffer)
551 {
552 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
553 		|| user_memcpy((thread_creation_attributes*)this, userAttributes,
554 				sizeof(thread_creation_attributes)) != B_OK) {
555 		return B_BAD_ADDRESS;
556 	}
557 
558 	if (stack_size != 0
559 		&& (stack_size < MIN_USER_STACK_SIZE
560 			|| stack_size > MAX_USER_STACK_SIZE)) {
561 		return B_BAD_VALUE;
562 	}
563 
564 	if (entry == NULL || !IS_USER_ADDRESS(entry)
565 		|| (stack_address != NULL && !IS_USER_ADDRESS(stack_address))
566 		|| (name != NULL && (!IS_USER_ADDRESS(name)
567 			|| user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) {
568 		return B_BAD_ADDRESS;
569 	}
570 
571 	name = name != NULL ? nameBuffer : "user thread";
572 
573 	// kernel only attributes (not in thread_creation_attributes):
574 	Thread* currentThread = thread_get_current_thread();
575 	team = currentThread->team->id;
576 	thread = NULL;
577 	signal_mask = currentThread->sig_block_mask;
578 		// inherit the current thread's signal mask
579 	additional_stack_size = 0;
580 	kernelEntry = NULL;
581 	kernelArgument = NULL;
582 	forkArgs = NULL;
583 
584 	return B_OK;
585 }
586 
587 
588 // #pragma mark - private functions
589 
590 
591 /*!	Inserts a thread into a team.
592 	The caller must hold the team's lock, the thread's lock, and the scheduler
593 	lock.
594 */
595 static void
596 insert_thread_into_team(Team *team, Thread *thread)
597 {
598 	thread->team_next = team->thread_list;
599 	team->thread_list = thread;
600 	team->num_threads++;
601 
602 	if (team->num_threads == 1) {
603 		// this was the first thread
604 		team->main_thread = thread;
605 	}
606 	thread->team = team;
607 }
608 
609 
610 /*!	Removes a thread from a team.
611 	The caller must hold the team's lock, the thread's lock, and the scheduler
612 	lock.
613 */
614 static void
615 remove_thread_from_team(Team *team, Thread *thread)
616 {
617 	Thread *temp, *last = NULL;
618 
619 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
620 		if (temp == thread) {
621 			if (last == NULL)
622 				team->thread_list = temp->team_next;
623 			else
624 				last->team_next = temp->team_next;
625 
626 			team->num_threads--;
627 			break;
628 		}
629 		last = temp;
630 	}
631 }
632 
633 
634 static status_t
635 enter_userspace(Thread* thread, UserThreadEntryArguments* args)
636 {
637 	status_t error = arch_thread_init_tls(thread);
638 	if (error != B_OK) {
639 		dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32
640 			")\n", thread->name, thread->id);
641 		free(args->forkArgs);
642 		return error;
643 	}
644 
645 	user_debug_update_new_thread_flags(thread);
646 
647 	// init the thread's user_thread
648 	user_thread* userThread = thread->user_thread;
649 	userThread->pthread = args->pthread;
650 	userThread->flags = 0;
651 	userThread->wait_status = B_OK;
652 	userThread->defer_signals
653 		= (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0;
654 	userThread->pending_signals = 0;
655 
656 	if (args->forkArgs != NULL) {
657 		// This is a fork()ed thread. Copy the fork args onto the stack and
658 		// free them.
659 		arch_fork_arg archArgs = *args->forkArgs;
660 		free(args->forkArgs);
661 
662 		arch_restore_fork_frame(&archArgs);
663 			// this one won't return here
664 		return B_ERROR;
665 	}
666 
667 	// Jump to the entry point in user space. Only returns, if something fails.
668 	return arch_thread_enter_userspace(thread, args->userlandEntry,
669 		args->userlandArgument1, args->userlandArgument2);
670 }
671 
672 
673 status_t
674 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
675 	void* argument1, void* argument2)
676 {
677 	UserThreadEntryArguments entryArgs;
678 	entryArgs.kernelFunction = NULL;
679 	entryArgs.argument = NULL;
680 	entryArgs.enterUserland = true;
681 	entryArgs.userlandEntry = (addr_t)entryFunction;
682 	entryArgs.userlandArgument1 = argument1;
683 	entryArgs.userlandArgument2 = argument2;
684 	entryArgs.pthread = NULL;
685 	entryArgs.forkArgs = NULL;
686 	entryArgs.flags = 0;
687 
688 	return enter_userspace(thread, &entryArgs);
689 }
690 
691 
692 static void
693 common_thread_entry(void* _args)
694 {
695 	Thread* thread = thread_get_current_thread();
696 
697 	// The thread is new and has been scheduled the first time.
698 
699 	// start CPU time based user timers
700 	if (thread->HasActiveCPUTimeUserTimers()
701 		|| thread->team->HasActiveCPUTimeUserTimers()) {
702 		user_timer_continue_cpu_timers(thread, thread->cpu->previous_thread);
703 	}
704 
705 	// notify the user debugger code
706 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
707 		user_debug_thread_scheduled(thread);
708 
709 	// start tracking time
710 	thread->last_time = system_time();
711 
712 	// unlock the scheduler lock and enable interrupts
713 	release_spinlock(&gSchedulerLock);
714 	enable_interrupts();
715 
716 	// call the kernel function, if any
717 	ThreadEntryArguments* args = (ThreadEntryArguments*)_args;
718 	if (args->kernelFunction != NULL)
719 		args->kernelFunction(args->argument);
720 
721 	// If requested, enter userland, now.
722 	if (args->enterUserland) {
723 		enter_userspace(thread, (UserThreadEntryArguments*)args);
724 			// only returns or error
725 
726 		// If that's the team's main thread, init the team exit info.
727 		if (thread == thread->team->main_thread)
728 			team_init_exit_info_on_error(thread->team);
729 	}
730 
731 	// we're done
732 	thread_exit();
733 }
734 
735 
736 /*!	Prepares the given thread's kernel stack for executing its entry function.
737 
738 	The data pointed to by \a data of size \a dataSize are copied to the
739 	thread's kernel stack. A pointer to the copy's data is passed to the entry
740 	function. The entry function is common_thread_entry().
741 
742 	\param thread The thread.
743 	\param data Pointer to data to be copied to the thread's stack and passed
744 		to the entry function.
745 	\param dataSize The size of \a data.
746  */
747 static void
748 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize)
749 {
750 	uint8* stack = (uint8*)thread->kernel_stack_base;
751 	uint8* stackTop = (uint8*)thread->kernel_stack_top;
752 
753 	// clear (or rather invalidate) the kernel stack contents, if compiled with
754 	// debugging
755 #if KDEBUG > 0
756 #	if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS)
757 	memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc,
758 		KERNEL_STACK_SIZE);
759 #	else
760 	memset(stack, 0xcc, KERNEL_STACK_SIZE);
761 #	endif
762 #endif
763 
764 	// copy the data onto the stack, with 16-byte alignment to be on the safe
765 	// side
766 	void* clonedData;
767 #ifdef STACK_GROWS_DOWNWARDS
768 	clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16);
769 	stackTop = (uint8*)clonedData;
770 #else
771 	clonedData = (void*)ROUNDUP((addr_t)stack, 16);
772 	stack = (uint8*)clonedData + ROUNDUP(dataSize, 16);
773 #endif
774 
775 	memcpy(clonedData, data, dataSize);
776 
777 	arch_thread_init_kthread_stack(thread, stack, stackTop,
778 		&common_thread_entry, clonedData);
779 }
780 
781 
782 static status_t
783 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase,
784 	size_t stackSize, size_t additionalSize, char* nameBuffer)
785 {
786 	area_id stackArea = -1;
787 	uint8* stackBase = (uint8*)_stackBase;
788 
789 	if (stackBase != NULL) {
790 		// A stack has been specified. It must be large enough to hold the
791 		// TLS space at least.
792 		STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE);
793 		if (stackSize < MIN_USER_STACK_SIZE)
794 			return B_BAD_VALUE;
795 
796 		stackSize -= TLS_SIZE;
797 	} else {
798 		// No user-defined stack -- allocate one. For non-main threads the stack
799 		// will be between USER_STACK_REGION and the main thread stack area. For
800 		// a main thread the position is fixed.
801 
802 		if (stackSize == 0) {
803 			// Use the default size (a different one for a main thread).
804 			stackSize = thread->id == team->id
805 				? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE;
806 		} else {
807 			// Verify that the given stack size is large enough.
808 			if (stackSize < MIN_USER_STACK_SIZE - TLS_SIZE)
809 				return B_BAD_VALUE;
810 
811 			stackSize = PAGE_ALIGN(stackSize);
812 		}
813 		stackSize += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
814 
815 		size_t areaSize = PAGE_ALIGN(stackSize + TLS_SIZE + additionalSize);
816 
817 		snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%ld_stack", thread->name,
818 			thread->id);
819 
820 		virtual_address_restrictions virtualRestrictions = {};
821 		if (thread->id == team->id) {
822 			// The main thread gets a fixed position at the top of the stack
823 			// address range.
824 			stackBase = (uint8*)(USER_STACK_REGION + USER_STACK_REGION_SIZE
825 				- areaSize);
826 			virtualRestrictions.address_specification = B_EXACT_ADDRESS;
827 
828 		} else {
829 			// not a main thread
830 			stackBase = (uint8*)(addr_t)USER_STACK_REGION;
831 			virtualRestrictions.address_specification = B_BASE_ADDRESS;
832 		}
833 		virtualRestrictions.address = (void*)stackBase;
834 
835 		physical_address_restrictions physicalRestrictions = {};
836 
837 		stackArea = create_area_etc(team->id, nameBuffer,
838 			areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA,
839 			0, &virtualRestrictions, &physicalRestrictions,
840 			(void**)&stackBase);
841 		if (stackArea < 0)
842 			return stackArea;
843 	}
844 
845 	// set the stack
846 	ThreadLocker threadLocker(thread);
847 	thread->user_stack_base = (addr_t)stackBase;
848 	thread->user_stack_size = stackSize;
849 	thread->user_stack_area = stackArea;
850 
851 	return B_OK;
852 }
853 
854 
855 status_t
856 thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
857 	size_t stackSize, size_t additionalSize)
858 {
859 	char nameBuffer[B_OS_NAME_LENGTH];
860 	return create_thread_user_stack(team, thread, stackBase, stackSize,
861 		additionalSize, nameBuffer);
862 }
863 
864 
865 /*!	Creates a new thread.
866 
867 	\param attributes The thread creation attributes, specifying the team in
868 		which to create the thread, as well as a whole bunch of other arguments.
869 	\param kernel \c true, if a kernel-only thread shall be created, \c false,
870 		if the thread shall also be able to run in userland.
871 	\return The ID of the newly created thread (>= 0) or an error code on
872 		failure.
873 */
874 thread_id
875 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
876 {
877 	status_t status = B_OK;
878 
879 	TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name,
880 		attributes.thread, kernel ? "kernel" : "user"));
881 
882 	// get the team
883 	Team* team = Team::Get(attributes.team);
884 	if (team == NULL)
885 		return B_BAD_TEAM_ID;
886 	BReference<Team> teamReference(team, true);
887 
888 	// If a thread object is given, acquire a reference to it, otherwise create
889 	// a new thread object with the given attributes.
890 	Thread* thread = attributes.thread;
891 	if (thread != NULL) {
892 		thread->AcquireReference();
893 	} else {
894 		status = Thread::Create(attributes.name, thread);
895 		if (status != B_OK)
896 			return status;
897 	}
898 	BReference<Thread> threadReference(thread, true);
899 
900 	thread->team = team;
901 		// set already, so, if something goes wrong, the team pointer is
902 		// available for deinitialization
903 	thread->priority = attributes.priority == -1
904 		? B_NORMAL_PRIORITY : attributes.priority;
905 	thread->next_priority = thread->priority;
906 	thread->state = B_THREAD_SUSPENDED;
907 	thread->next_state = B_THREAD_SUSPENDED;
908 
909 	thread->sig_block_mask = attributes.signal_mask;
910 
911 	// init debug structure
912 	init_thread_debug_info(&thread->debug_info);
913 
914 	// create the kernel stack
915 	char stackName[B_OS_NAME_LENGTH];
916 	snprintf(stackName, B_OS_NAME_LENGTH, "%s_%ld_kstack", thread->name,
917 		thread->id);
918 	thread->kernel_stack_area = create_area(stackName,
919 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
920 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
921 		B_FULL_LOCK,
922 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
923 
924 	if (thread->kernel_stack_area < 0) {
925 		// we're not yet part of a team, so we can just bail out
926 		status = thread->kernel_stack_area;
927 
928 		dprintf("create_thread: error creating kernel stack: %s!\n",
929 			strerror(status));
930 
931 		return status;
932 	}
933 
934 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
935 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
936 
937 	if (kernel) {
938 		// Init the thread's kernel stack. It will start executing
939 		// common_thread_entry() with the arguments we prepare here.
940 		ThreadEntryArguments entryArgs;
941 		entryArgs.kernelFunction = attributes.kernelEntry;
942 		entryArgs.argument = attributes.kernelArgument;
943 		entryArgs.enterUserland = false;
944 
945 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
946 	} else {
947 		// create the userland stack, if the thread doesn't have one yet
948 		if (thread->user_stack_base == 0) {
949 			status = create_thread_user_stack(team, thread,
950 				attributes.stack_address, attributes.stack_size,
951 				attributes.additional_stack_size, stackName);
952 			if (status != B_OK)
953 				return status;
954 		}
955 
956 		// Init the thread's kernel stack. It will start executing
957 		// common_thread_entry() with the arguments we prepare here.
958 		UserThreadEntryArguments entryArgs;
959 		entryArgs.kernelFunction = attributes.kernelEntry;
960 		entryArgs.argument = attributes.kernelArgument;
961 		entryArgs.enterUserland = true;
962 		entryArgs.userlandEntry = (addr_t)attributes.entry;
963 		entryArgs.userlandArgument1 = attributes.args1;
964 		entryArgs.userlandArgument2 = attributes.args2;
965 		entryArgs.pthread = attributes.pthread;
966 		entryArgs.forkArgs = attributes.forkArgs;
967 		entryArgs.flags = attributes.flags;
968 
969 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
970 
971 		// create the pre-defined thread timers
972 		status = user_timer_create_thread_timers(team, thread);
973 		if (status != B_OK)
974 			return status;
975 	}
976 
977 	// lock the team and see, if it is still alive
978 	TeamLocker teamLocker(team);
979 	if (team->state >= TEAM_STATE_SHUTDOWN)
980 		return B_BAD_TEAM_ID;
981 
982 	bool debugNewThread = false;
983 	if (!kernel) {
984 		// allocate the user_thread structure, if not already allocated
985 		if (thread->user_thread == NULL) {
986 			thread->user_thread = team_allocate_user_thread(team);
987 			if (thread->user_thread == NULL)
988 				return B_NO_MEMORY;
989 		}
990 
991 		// If the new thread belongs to the same team as the current thread, it
992 		// may inherit some of the thread debug flags.
993 		Thread* currentThread = thread_get_current_thread();
994 		if (currentThread != NULL && currentThread->team == team) {
995 			// inherit all user flags...
996 			int32 debugFlags = atomic_get(&currentThread->debug_info.flags)
997 				& B_THREAD_DEBUG_USER_FLAG_MASK;
998 
999 			// ... save the syscall tracing flags, unless explicitely specified
1000 			if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
1001 				debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
1002 					| B_THREAD_DEBUG_POST_SYSCALL);
1003 			}
1004 
1005 			thread->debug_info.flags = debugFlags;
1006 
1007 			// stop the new thread, if desired
1008 			debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
1009 		}
1010 	}
1011 
1012 	// We're going to make the thread live, now. The thread itself will take
1013 	// over a reference to its Thread object. We acquire another reference for
1014 	// our own use (and threadReference remains armed).
1015 	thread->AcquireReference();
1016 
1017 	ThreadLocker threadLocker(thread);
1018 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1019 	SpinLocker threadHashLocker(sThreadHashLock);
1020 
1021 	// make thread visible in global hash/list
1022 	thread->visible = true;
1023 	sUsedThreads++;
1024 	scheduler_on_thread_init(thread);
1025 
1026 	// Debug the new thread, if the parent thread required that (see above),
1027 	// or the respective global team debug flag is set. But only, if a
1028 	// debugger is installed for the team.
1029 	if (!kernel) {
1030 		int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1031 		debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0;
1032 		if (debugNewThread
1033 			&& (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) {
1034 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
1035 		}
1036 	}
1037 
1038 	// insert thread into team
1039 	insert_thread_into_team(team, thread);
1040 
1041 	threadHashLocker.Unlock();
1042 	schedulerLocker.Unlock();
1043 	threadLocker.Unlock();
1044 	teamLocker.Unlock();
1045 
1046 	// notify listeners
1047 	sNotificationService.Notify(THREAD_ADDED, thread);
1048 
1049 	return thread->id;
1050 }
1051 
1052 
1053 static status_t
1054 undertaker(void* /*args*/)
1055 {
1056 	while (true) {
1057 		// wait for a thread to bury
1058 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1059 
1060 		while (sUndertakerEntries.IsEmpty()) {
1061 			ConditionVariableEntry conditionEntry;
1062 			sUndertakerCondition.Add(&conditionEntry);
1063 			schedulerLocker.Unlock();
1064 
1065 			conditionEntry.Wait();
1066 
1067 			schedulerLocker.Lock();
1068 		}
1069 
1070 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
1071 		schedulerLocker.Unlock();
1072 
1073 		UndertakerEntry entry = *_entry;
1074 			// we need a copy, since the original entry is on the thread's stack
1075 
1076 		// we've got an entry
1077 		Thread* thread = entry.thread;
1078 
1079 		// remove this thread from from the kernel team -- this makes it
1080 		// unaccessible
1081 		Team* kernelTeam = team_get_kernel_team();
1082 		TeamLocker kernelTeamLocker(kernelTeam);
1083 		thread->Lock();
1084 		schedulerLocker.Lock();
1085 
1086 		remove_thread_from_team(kernelTeam, thread);
1087 
1088 		schedulerLocker.Unlock();
1089 		kernelTeamLocker.Unlock();
1090 
1091 		// free the thread structure
1092 		thread->UnlockAndReleaseReference();
1093 	}
1094 
1095 	// can never get here
1096 	return B_OK;
1097 }
1098 
1099 
1100 /*!	Returns the semaphore the thread is currently waiting on.
1101 
1102 	The return value is purely informative.
1103 	The caller must hold the scheduler lock.
1104 
1105 	\param thread The thread.
1106 	\return The ID of the semaphore the thread is currently waiting on or \c -1,
1107 		if it isn't waiting on a semaphore.
1108 */
1109 static sem_id
1110 get_thread_wait_sem(Thread* thread)
1111 {
1112 	if (thread->state == B_THREAD_WAITING
1113 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
1114 		return (sem_id)(addr_t)thread->wait.object;
1115 	}
1116 	return -1;
1117 }
1118 
1119 
1120 /*!	Fills the thread_info structure with information from the specified thread.
1121 	The caller must hold the thread's lock and the scheduler lock.
1122 */
1123 static void
1124 fill_thread_info(Thread *thread, thread_info *info, size_t size)
1125 {
1126 	info->thread = thread->id;
1127 	info->team = thread->team->id;
1128 
1129 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
1130 
1131 	info->sem = -1;
1132 
1133 	if (thread->state == B_THREAD_WAITING) {
1134 		info->state = B_THREAD_WAITING;
1135 
1136 		switch (thread->wait.type) {
1137 			case THREAD_BLOCK_TYPE_SNOOZE:
1138 				info->state = B_THREAD_ASLEEP;
1139 				break;
1140 
1141 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1142 			{
1143 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1144 				if (sem == thread->msg.read_sem)
1145 					info->state = B_THREAD_RECEIVING;
1146 				else
1147 					info->sem = sem;
1148 				break;
1149 			}
1150 
1151 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1152 			default:
1153 				break;
1154 		}
1155 	} else
1156 		info->state = (thread_state)thread->state;
1157 
1158 	info->priority = thread->priority;
1159 	info->stack_base = (void *)thread->user_stack_base;
1160 	info->stack_end = (void *)(thread->user_stack_base
1161 		+ thread->user_stack_size);
1162 
1163 	InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1164 	info->user_time = thread->user_time;
1165 	info->kernel_time = thread->kernel_time;
1166 }
1167 
1168 
1169 static status_t
1170 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
1171 	int32 flags)
1172 {
1173 	// get the thread
1174 	Thread *target = Thread::Get(id);
1175 	if (target == NULL)
1176 		return B_BAD_THREAD_ID;
1177 	BReference<Thread> targetReference(target, true);
1178 
1179 	// get the write semaphore
1180 	ThreadLocker targetLocker(target);
1181 	sem_id cachedSem = target->msg.write_sem;
1182 	targetLocker.Unlock();
1183 
1184 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
1185 		return B_NO_MEMORY;
1186 
1187 	status_t status = acquire_sem_etc(cachedSem, 1, flags, 0);
1188 	if (status == B_INTERRUPTED) {
1189 		// we got interrupted by a signal
1190 		return status;
1191 	}
1192 	if (status != B_OK) {
1193 		// Any other acquisition problems may be due to thread deletion
1194 		return B_BAD_THREAD_ID;
1195 	}
1196 
1197 	void* data;
1198 	if (bufferSize > 0) {
1199 		data = malloc(bufferSize);
1200 		if (data == NULL)
1201 			return B_NO_MEMORY;
1202 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
1203 			free(data);
1204 			return B_BAD_DATA;
1205 		}
1206 	} else
1207 		data = NULL;
1208 
1209 	targetLocker.Lock();
1210 
1211 	// The target thread could have been deleted at this point.
1212 	if (!target->IsAlive()) {
1213 		targetLocker.Unlock();
1214 		free(data);
1215 		return B_BAD_THREAD_ID;
1216 	}
1217 
1218 	// Save message informations
1219 	target->msg.sender = thread_get_current_thread()->id;
1220 	target->msg.code = code;
1221 	target->msg.size = bufferSize;
1222 	target->msg.buffer = data;
1223 	cachedSem = target->msg.read_sem;
1224 
1225 	targetLocker.Unlock();
1226 
1227 	release_sem(cachedSem);
1228 	return B_OK;
1229 }
1230 
1231 
1232 static int32
1233 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
1234 	int32 flags)
1235 {
1236 	Thread *thread = thread_get_current_thread();
1237 	size_t size;
1238 	int32 code;
1239 
1240 	status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
1241 	if (status != B_OK) {
1242 		// Actually, we're not supposed to return error codes
1243 		// but since the only reason this can fail is that we
1244 		// were killed, it's probably okay to do so (but also
1245 		// meaningless).
1246 		return status;
1247 	}
1248 
1249 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
1250 		size = min_c(bufferSize, thread->msg.size);
1251 		status = user_memcpy(buffer, thread->msg.buffer, size);
1252 		if (status != B_OK) {
1253 			free(thread->msg.buffer);
1254 			release_sem(thread->msg.write_sem);
1255 			return status;
1256 		}
1257 	}
1258 
1259 	*_sender = thread->msg.sender;
1260 	code = thread->msg.code;
1261 
1262 	free(thread->msg.buffer);
1263 	release_sem(thread->msg.write_sem);
1264 
1265 	return code;
1266 }
1267 
1268 
1269 static status_t
1270 common_getrlimit(int resource, struct rlimit * rlp)
1271 {
1272 	if (!rlp)
1273 		return B_BAD_ADDRESS;
1274 
1275 	switch (resource) {
1276 		case RLIMIT_NOFILE:
1277 		case RLIMIT_NOVMON:
1278 			return vfs_getrlimit(resource, rlp);
1279 
1280 		case RLIMIT_CORE:
1281 			rlp->rlim_cur = 0;
1282 			rlp->rlim_max = 0;
1283 			return B_OK;
1284 
1285 		case RLIMIT_STACK:
1286 		{
1287 			Thread *thread = thread_get_current_thread();
1288 			rlp->rlim_cur = thread->user_stack_size;
1289 			rlp->rlim_max = thread->user_stack_size;
1290 			return B_OK;
1291 		}
1292 
1293 		default:
1294 			return EINVAL;
1295 	}
1296 
1297 	return B_OK;
1298 }
1299 
1300 
1301 static status_t
1302 common_setrlimit(int resource, const struct rlimit * rlp)
1303 {
1304 	if (!rlp)
1305 		return B_BAD_ADDRESS;
1306 
1307 	switch (resource) {
1308 		case RLIMIT_NOFILE:
1309 		case RLIMIT_NOVMON:
1310 			return vfs_setrlimit(resource, rlp);
1311 
1312 		case RLIMIT_CORE:
1313 			// We don't support core file, so allow settings to 0/0 only.
1314 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
1315 				return EINVAL;
1316 			return B_OK;
1317 
1318 		default:
1319 			return EINVAL;
1320 	}
1321 
1322 	return B_OK;
1323 }
1324 
1325 
1326 static status_t
1327 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags,
1328 	bigtime_t* _remainingTime)
1329 {
1330 	switch (clockID) {
1331 		case CLOCK_REALTIME:
1332 			// make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall
1333 			// through
1334 			flags |= B_TIMEOUT_REAL_TIME_BASE;
1335 		case CLOCK_MONOTONIC:
1336 		{
1337 			// Store the start time, for the case that we get interrupted and
1338 			// need to return the remaining time. For absolute timeouts we can
1339 			// still get he time later, if needed.
1340 			bigtime_t startTime
1341 				= _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0
1342 					? system_time() : 0;
1343 
1344 			Thread* thread = thread_get_current_thread();
1345 
1346 			InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1347 
1348 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE,
1349 				NULL);
1350 			status_t status = thread_block_with_timeout_locked(flags, timeout);
1351 
1352 			if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
1353 				return B_OK;
1354 
1355 			// If interrupted, compute the remaining time, if requested.
1356 			if (status == B_INTERRUPTED && _remainingTime != NULL) {
1357 				if ((flags & B_RELATIVE_TIMEOUT) != 0) {
1358 					*_remainingTime = std::max(
1359 						startTime + timeout - system_time(), (bigtime_t)0);
1360 				} else {
1361 					bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0
1362 						? real_time_clock_usecs() : system_time();
1363 					*_remainingTime = std::max(timeout - now, (bigtime_t)0);
1364 				}
1365 			}
1366 
1367 			return status;
1368 		}
1369 
1370 		case CLOCK_THREAD_CPUTIME_ID:
1371 			// Waiting for ourselves to do something isn't particularly
1372 			// productive.
1373 			return B_BAD_VALUE;
1374 
1375 		case CLOCK_PROCESS_CPUTIME_ID:
1376 		default:
1377 			// We don't have to support those, but we are allowed to. Could be
1378 			// done be creating a UserTimer on the fly with a custom UserEvent
1379 			// that would just wake us up.
1380 			return ENOTSUP;
1381 	}
1382 }
1383 
1384 
1385 //	#pragma mark - debugger calls
1386 
1387 
1388 static int
1389 make_thread_unreal(int argc, char **argv)
1390 {
1391 	int32 id = -1;
1392 
1393 	if (argc > 2) {
1394 		print_debugger_command_usage(argv[0]);
1395 		return 0;
1396 	}
1397 
1398 	if (argc > 1)
1399 		id = strtoul(argv[1], NULL, 0);
1400 
1401 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1402 			Thread* thread = it.Next();) {
1403 		if (id != -1 && thread->id != id)
1404 			continue;
1405 
1406 		if (thread->priority > B_DISPLAY_PRIORITY) {
1407 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
1408 			kprintf("thread %ld made unreal\n", thread->id);
1409 		}
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 
1416 static int
1417 set_thread_prio(int argc, char **argv)
1418 {
1419 	int32 id;
1420 	int32 prio;
1421 
1422 	if (argc > 3 || argc < 2) {
1423 		print_debugger_command_usage(argv[0]);
1424 		return 0;
1425 	}
1426 
1427 	prio = strtoul(argv[1], NULL, 0);
1428 	if (prio > THREAD_MAX_SET_PRIORITY)
1429 		prio = THREAD_MAX_SET_PRIORITY;
1430 	if (prio < THREAD_MIN_SET_PRIORITY)
1431 		prio = THREAD_MIN_SET_PRIORITY;
1432 
1433 	if (argc > 2)
1434 		id = strtoul(argv[2], NULL, 0);
1435 	else
1436 		id = thread_get_current_thread()->id;
1437 
1438 	bool found = false;
1439 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1440 			Thread* thread = it.Next();) {
1441 		if (thread->id != id)
1442 			continue;
1443 		thread->priority = thread->next_priority = prio;
1444 		kprintf("thread %ld set to priority %ld\n", id, prio);
1445 		found = true;
1446 		break;
1447 	}
1448 	if (!found)
1449 		kprintf("thread %ld (%#lx) not found\n", id, id);
1450 
1451 	return 0;
1452 }
1453 
1454 
1455 static int
1456 make_thread_suspended(int argc, char **argv)
1457 {
1458 	int32 id;
1459 
1460 	if (argc > 2) {
1461 		print_debugger_command_usage(argv[0]);
1462 		return 0;
1463 	}
1464 
1465 	if (argc == 1)
1466 		id = thread_get_current_thread()->id;
1467 	else
1468 		id = strtoul(argv[1], NULL, 0);
1469 
1470 	bool found = false;
1471 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1472 			Thread* thread = it.Next();) {
1473 		if (thread->id != id)
1474 			continue;
1475 
1476 		thread->next_state = B_THREAD_SUSPENDED;
1477 		kprintf("thread %ld suspended\n", id);
1478 		found = true;
1479 		break;
1480 	}
1481 	if (!found)
1482 		kprintf("thread %ld (%#lx) not found\n", id, id);
1483 
1484 	return 0;
1485 }
1486 
1487 
1488 static int
1489 make_thread_resumed(int argc, char **argv)
1490 {
1491 	int32 id;
1492 
1493 	if (argc != 2) {
1494 		print_debugger_command_usage(argv[0]);
1495 		return 0;
1496 	}
1497 
1498 	// force user to enter a thread id, as using
1499 	// the current thread is usually not intended
1500 	id = strtoul(argv[1], NULL, 0);
1501 
1502 	bool found = false;
1503 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1504 			Thread* thread = it.Next();) {
1505 		if (thread->id != id)
1506 			continue;
1507 
1508 		if (thread->state == B_THREAD_SUSPENDED) {
1509 			scheduler_enqueue_in_run_queue(thread);
1510 			kprintf("thread %ld resumed\n", thread->id);
1511 		}
1512 		found = true;
1513 		break;
1514 	}
1515 	if (!found)
1516 		kprintf("thread %ld (%#lx) not found\n", id, id);
1517 
1518 	return 0;
1519 }
1520 
1521 
1522 static int
1523 drop_into_debugger(int argc, char **argv)
1524 {
1525 	status_t err;
1526 	int32 id;
1527 
1528 	if (argc > 2) {
1529 		print_debugger_command_usage(argv[0]);
1530 		return 0;
1531 	}
1532 
1533 	if (argc == 1)
1534 		id = thread_get_current_thread()->id;
1535 	else
1536 		id = strtoul(argv[1], NULL, 0);
1537 
1538 	err = _user_debug_thread(id);
1539 		// TODO: This is a non-trivial syscall doing some locking, so this is
1540 		// really nasty and may go seriously wrong.
1541 	if (err)
1542 		kprintf("drop failed\n");
1543 	else
1544 		kprintf("thread %ld dropped into user debugger\n", id);
1545 
1546 	return 0;
1547 }
1548 
1549 
1550 /*!	Returns a user-readable string for a thread state.
1551 	Only for use in the kernel debugger.
1552 */
1553 static const char *
1554 state_to_text(Thread *thread, int32 state)
1555 {
1556 	switch (state) {
1557 		case B_THREAD_READY:
1558 			return "ready";
1559 
1560 		case B_THREAD_RUNNING:
1561 			return "running";
1562 
1563 		case B_THREAD_WAITING:
1564 		{
1565 			if (thread != NULL) {
1566 				switch (thread->wait.type) {
1567 					case THREAD_BLOCK_TYPE_SNOOZE:
1568 						return "zzz";
1569 
1570 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1571 					{
1572 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1573 						if (sem == thread->msg.read_sem)
1574 							return "receive";
1575 						break;
1576 					}
1577 				}
1578 			}
1579 
1580 			return "waiting";
1581 		}
1582 
1583 		case B_THREAD_SUSPENDED:
1584 			return "suspended";
1585 
1586 		case THREAD_STATE_FREE_ON_RESCHED:
1587 			return "death";
1588 
1589 		default:
1590 			return "UNKNOWN";
1591 	}
1592 }
1593 
1594 
1595 static void
1596 print_thread_list_table_head()
1597 {
1598 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1599 		"  team  name\n");
1600 }
1601 
1602 
1603 static void
1604 _dump_thread_info(Thread *thread, bool shortInfo)
1605 {
1606 	if (shortInfo) {
1607 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1608 			thread->state));
1609 
1610 		// does it block on a semaphore or a condition variable?
1611 		if (thread->state == B_THREAD_WAITING) {
1612 			switch (thread->wait.type) {
1613 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1614 				{
1615 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1616 					if (sem == thread->msg.read_sem)
1617 						kprintf("                    ");
1618 					else
1619 						kprintf("sem  %12ld   ", sem);
1620 					break;
1621 				}
1622 
1623 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1624 					kprintf("cvar   %p   ", thread->wait.object);
1625 					break;
1626 
1627 				case THREAD_BLOCK_TYPE_SNOOZE:
1628 					kprintf("                    ");
1629 					break;
1630 
1631 				case THREAD_BLOCK_TYPE_SIGNAL:
1632 					kprintf("signal              ");
1633 					break;
1634 
1635 				case THREAD_BLOCK_TYPE_MUTEX:
1636 					kprintf("mutex  %p   ", thread->wait.object);
1637 					break;
1638 
1639 				case THREAD_BLOCK_TYPE_RW_LOCK:
1640 					kprintf("rwlock %p   ", thread->wait.object);
1641 					break;
1642 
1643 				case THREAD_BLOCK_TYPE_OTHER:
1644 					kprintf("other               ");
1645 					break;
1646 
1647 				default:
1648 					kprintf("???    %p   ", thread->wait.object);
1649 					break;
1650 			}
1651 		} else
1652 			kprintf("        -           ");
1653 
1654 		// on which CPU does it run?
1655 		if (thread->cpu)
1656 			kprintf("%2d", thread->cpu->cpu_num);
1657 		else
1658 			kprintf(" -");
1659 
1660 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1661 			(void *)thread->kernel_stack_base, thread->team->id,
1662 			thread->name != NULL ? thread->name : "<NULL>");
1663 
1664 		return;
1665 	}
1666 
1667 	// print the long info
1668 
1669 	struct thread_death_entry *death = NULL;
1670 
1671 	kprintf("THREAD: %p\n", thread);
1672 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1673 	kprintf("serial_number:      %" B_PRId64 "\n", thread->serial_number);
1674 	kprintf("name:               \"%s\"\n", thread->name);
1675 	kprintf("hash_next:          %p\nteam_next:          %p\nq_next:             %p\n",
1676 		thread->hash_next, thread->team_next, thread->queue_next);
1677 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1678 		thread->next_priority, thread->io_priority);
1679 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1680 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1681 	kprintf("cpu:                %p ", thread->cpu);
1682 	if (thread->cpu)
1683 		kprintf("(%d)\n", thread->cpu->cpu_num);
1684 	else
1685 		kprintf("\n");
1686 	kprintf("sig_pending:        %#llx (blocked: %#llx"
1687 		", before sigsuspend(): %#llx)\n",
1688 		(long long)thread->ThreadPendingSignals(),
1689 		(long long)thread->sig_block_mask,
1690 		(long long)thread->sigsuspend_original_unblocked_mask);
1691 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1692 
1693 	if (thread->state == B_THREAD_WAITING) {
1694 		kprintf("waiting for:        ");
1695 
1696 		switch (thread->wait.type) {
1697 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1698 			{
1699 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1700 				if (sem == thread->msg.read_sem)
1701 					kprintf("data\n");
1702 				else
1703 					kprintf("semaphore %ld\n", sem);
1704 				break;
1705 			}
1706 
1707 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1708 				kprintf("condition variable %p\n", thread->wait.object);
1709 				break;
1710 
1711 			case THREAD_BLOCK_TYPE_SNOOZE:
1712 				kprintf("snooze()\n");
1713 				break;
1714 
1715 			case THREAD_BLOCK_TYPE_SIGNAL:
1716 				kprintf("signal\n");
1717 				break;
1718 
1719 			case THREAD_BLOCK_TYPE_MUTEX:
1720 				kprintf("mutex %p\n", thread->wait.object);
1721 				break;
1722 
1723 			case THREAD_BLOCK_TYPE_RW_LOCK:
1724 				kprintf("rwlock %p\n", thread->wait.object);
1725 				break;
1726 
1727 			case THREAD_BLOCK_TYPE_OTHER:
1728 				kprintf("other (%s)\n", (char*)thread->wait.object);
1729 				break;
1730 
1731 			default:
1732 				kprintf("unknown (%p)\n", thread->wait.object);
1733 				break;
1734 		}
1735 	}
1736 
1737 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1738 	kprintf("team:               %p, \"%s\"\n", thread->team,
1739 		thread->team->Name());
1740 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1741 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1742 	kprintf("  exit.waiters:\n");
1743 	while ((death = (struct thread_death_entry*)list_get_next_item(
1744 			&thread->exit.waiters, death)) != NULL) {
1745 		kprintf("\t%p (thread %ld)\n", death, death->thread);
1746 	}
1747 
1748 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1749 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1750 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1751 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1752 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1753 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1754 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1755 		strerror(thread->kernel_errno));
1756 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1757 	kprintf("user_time:          %Ld\n", thread->user_time);
1758 	kprintf("flags:              0x%lx\n", thread->flags);
1759 	kprintf("architecture dependant section:\n");
1760 	arch_thread_dump_info(&thread->arch_info);
1761 }
1762 
1763 
1764 static int
1765 dump_thread_info(int argc, char **argv)
1766 {
1767 	bool shortInfo = false;
1768 	int argi = 1;
1769 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1770 		shortInfo = true;
1771 		print_thread_list_table_head();
1772 		argi++;
1773 	}
1774 
1775 	if (argi == argc) {
1776 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1777 		return 0;
1778 	}
1779 
1780 	for (; argi < argc; argi++) {
1781 		const char *name = argv[argi];
1782 		int32 id = strtoul(name, NULL, 0);
1783 
1784 		if (IS_KERNEL_ADDRESS(id)) {
1785 			// semi-hack
1786 			_dump_thread_info((Thread *)id, shortInfo);
1787 			continue;
1788 		}
1789 
1790 		// walk through the thread list, trying to match name or id
1791 		bool found = false;
1792 		for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1793 				Thread* thread = it.Next();) {
1794 			if (!strcmp(name, thread->name) || thread->id == id) {
1795 				_dump_thread_info(thread, shortInfo);
1796 				found = true;
1797 				break;
1798 			}
1799 		}
1800 
1801 		if (!found)
1802 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1803 	}
1804 
1805 	return 0;
1806 }
1807 
1808 
1809 static int
1810 dump_thread_list(int argc, char **argv)
1811 {
1812 	bool realTimeOnly = false;
1813 	bool calling = false;
1814 	const char *callSymbol = NULL;
1815 	addr_t callStart = 0;
1816 	addr_t callEnd = 0;
1817 	int32 requiredState = 0;
1818 	team_id team = -1;
1819 	sem_id sem = -1;
1820 
1821 	if (!strcmp(argv[0], "realtime"))
1822 		realTimeOnly = true;
1823 	else if (!strcmp(argv[0], "ready"))
1824 		requiredState = B_THREAD_READY;
1825 	else if (!strcmp(argv[0], "running"))
1826 		requiredState = B_THREAD_RUNNING;
1827 	else if (!strcmp(argv[0], "waiting")) {
1828 		requiredState = B_THREAD_WAITING;
1829 
1830 		if (argc > 1) {
1831 			sem = strtoul(argv[1], NULL, 0);
1832 			if (sem == 0)
1833 				kprintf("ignoring invalid semaphore argument.\n");
1834 		}
1835 	} else if (!strcmp(argv[0], "calling")) {
1836 		if (argc < 2) {
1837 			kprintf("Need to give a symbol name or start and end arguments.\n");
1838 			return 0;
1839 		} else if (argc == 3) {
1840 			callStart = parse_expression(argv[1]);
1841 			callEnd = parse_expression(argv[2]);
1842 		} else
1843 			callSymbol = argv[1];
1844 
1845 		calling = true;
1846 	} else if (argc > 1) {
1847 		team = strtoul(argv[1], NULL, 0);
1848 		if (team == 0)
1849 			kprintf("ignoring invalid team argument.\n");
1850 	}
1851 
1852 	print_thread_list_table_head();
1853 
1854 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1855 			Thread* thread = it.Next();) {
1856 		// filter out threads not matching the search criteria
1857 		if ((requiredState && thread->state != requiredState)
1858 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1859 					callStart, callEnd))
1860 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1861 			|| (team > 0 && thread->team->id != team)
1862 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1863 			continue;
1864 
1865 		_dump_thread_info(thread, true);
1866 	}
1867 	return 0;
1868 }
1869 
1870 
1871 //	#pragma mark - private kernel API
1872 
1873 
1874 void
1875 thread_exit(void)
1876 {
1877 	cpu_status state;
1878 	Thread* thread = thread_get_current_thread();
1879 	Team* team = thread->team;
1880 	Team* kernelTeam = team_get_kernel_team();
1881 	status_t status;
1882 	struct thread_debug_info debugInfo;
1883 	team_id teamID = team->id;
1884 
1885 	TRACE(("thread %ld exiting w/return code %#lx\n", thread->id,
1886 		thread->exit.status));
1887 
1888 	if (!are_interrupts_enabled())
1889 		panic("thread_exit() called with interrupts disabled!\n");
1890 
1891 	// boost our priority to get this over with
1892 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1893 
1894 	if (team != kernelTeam) {
1895 		// Cancel previously installed alarm timer, if any. Hold the scheduler
1896 		// lock to make sure that when cancel_timer() returns, the alarm timer
1897 		// hook will not be invoked anymore (since
1898 		// B_TIMER_ACQUIRE_SCHEDULER_LOCK is used).
1899 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1900 		cancel_timer(&thread->alarm);
1901 		schedulerLocker.Unlock();
1902 
1903 		// Delete all user timers associated with the thread.
1904 		ThreadLocker threadLocker(thread);
1905 		thread->DeleteUserTimers(false);
1906 
1907 		// detach the thread's user thread
1908 		user_thread* userThread = thread->user_thread;
1909 		thread->user_thread = NULL;
1910 
1911 		threadLocker.Unlock();
1912 
1913 		// Delete the thread's user thread, if it's not the main thread. If it
1914 		// is, we can save the work, since it will be deleted with the team's
1915 		// address space.
1916 		if (thread != team->main_thread)
1917 			team_free_user_thread(team, userThread);
1918 	}
1919 
1920 	// remember the user stack area -- we will delete it below
1921 	area_id userStackArea = -1;
1922 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1923 		userStackArea = thread->user_stack_area;
1924 		thread->user_stack_area = -1;
1925 	}
1926 
1927 	struct job_control_entry *death = NULL;
1928 	struct thread_death_entry* threadDeathEntry = NULL;
1929 	bool deleteTeam = false;
1930 	port_id debuggerPort = -1;
1931 
1932 	if (team != kernelTeam) {
1933 		user_debug_thread_exiting(thread);
1934 
1935 		if (team->main_thread == thread) {
1936 			// The main thread is exiting. Shut down the whole team.
1937 			deleteTeam = true;
1938 
1939 			// kill off all other threads and the user debugger facilities
1940 			debuggerPort = team_shutdown_team(team);
1941 
1942 			// acquire necessary locks, which are: process group lock, kernel
1943 			// team lock, parent team lock, and the team lock
1944 			team->LockProcessGroup();
1945 			kernelTeam->Lock();
1946 			team->LockTeamAndParent(true);
1947 		} else {
1948 			threadDeathEntry
1949 				= (thread_death_entry*)malloc(sizeof(thread_death_entry));
1950 
1951 			// acquire necessary locks, which are: kernel team lock and the team
1952 			// lock
1953 			kernelTeam->Lock();
1954 			team->Lock();
1955 		}
1956 
1957 		ThreadLocker threadLocker(thread);
1958 
1959 		state = disable_interrupts();
1960 
1961 		// swap address spaces, to make sure we're running on the kernel's pgdir
1962 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1963 
1964 		SpinLocker schedulerLocker(gSchedulerLock);
1965 			// removing the thread and putting its death entry to the parent
1966 			// team needs to be an atomic operation
1967 
1968 		// remember how long this thread lasted
1969 		bigtime_t now = system_time();
1970 		InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1971 		thread->kernel_time += now - thread->last_time;
1972 		thread->last_time = now;
1973 		threadTimeLocker.Unlock();
1974 
1975 		team->dead_threads_kernel_time += thread->kernel_time;
1976 		team->dead_threads_user_time += thread->user_time;
1977 
1978 		// stop/update thread/team CPU time user timers
1979 		if (thread->HasActiveCPUTimeUserTimers()
1980 			|| team->HasActiveCPUTimeUserTimers()) {
1981 			user_timer_stop_cpu_timers(thread, NULL);
1982 		}
1983 
1984 		// deactivate CPU time user timers for the thread
1985 		if (thread->HasActiveCPUTimeUserTimers())
1986 			thread->DeactivateCPUTimeUserTimers();
1987 
1988 		// put the thread into the kernel team until it dies
1989 		remove_thread_from_team(team, thread);
1990 		insert_thread_into_team(kernelTeam, thread);
1991 
1992 		if (team->death_entry != NULL) {
1993 			if (--team->death_entry->remaining_threads == 0)
1994 				team->death_entry->condition.NotifyOne(true, B_OK);
1995 		}
1996 
1997 		if (deleteTeam) {
1998 			Team* parent = team->parent;
1999 
2000 			// Set the team job control state to "dead" and detach the job
2001 			// control entry from our team struct.
2002 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL,
2003 				true);
2004 			death = team->job_control_entry;
2005 			team->job_control_entry = NULL;
2006 
2007 			if (death != NULL) {
2008 				death->InitDeadState();
2009 
2010 				// team_set_job_control_state() already moved our entry
2011 				// into the parent's list. We just check the soft limit of
2012 				// death entries.
2013 				if (parent->dead_children.count > MAX_DEAD_CHILDREN) {
2014 					death = parent->dead_children.entries.RemoveHead();
2015 					parent->dead_children.count--;
2016 				} else
2017 					death = NULL;
2018 			}
2019 
2020 			schedulerLocker.Unlock();
2021 			restore_interrupts(state);
2022 
2023 			threadLocker.Unlock();
2024 
2025 			// Get a temporary reference to the team's process group
2026 			// -- team_remove_team() removes the team from the group, which
2027 			// might destroy it otherwise and we wouldn't be able to unlock it.
2028 			ProcessGroup* group = team->group;
2029 			group->AcquireReference();
2030 
2031 			pid_t foregroundGroupToSignal;
2032 			team_remove_team(team, foregroundGroupToSignal);
2033 
2034 			// unlock everything but the parent team
2035 			team->Unlock();
2036 			if (parent != kernelTeam)
2037 				kernelTeam->Unlock();
2038 			group->Unlock();
2039 			group->ReleaseReference();
2040 
2041 			// Send SIGCHLD to the parent as long as we still have its lock.
2042 			// This makes job control state change + signalling atomic.
2043 			Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id);
2044 			if (team->exit.reason == CLD_EXITED) {
2045 				childSignal.SetStatus(team->exit.status);
2046 			} else {
2047 				childSignal.SetStatus(team->exit.signal);
2048 				childSignal.SetSendingUser(team->exit.signaling_user);
2049 			}
2050 			send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE);
2051 
2052 			// also unlock the parent
2053 			parent->Unlock();
2054 
2055 			// If the team was a session leader with controlling TTY, we have
2056 			// to send SIGHUP to the foreground process group.
2057 			if (foregroundGroupToSignal >= 0) {
2058 				Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id);
2059 				send_signal_to_process_group(foregroundGroupToSignal,
2060 					groupSignal, B_DO_NOT_RESCHEDULE);
2061 			}
2062 		} else {
2063 			// The thread is not the main thread. We store a thread death entry
2064 			// for it, unless someone is already waiting for it.
2065 			if (threadDeathEntry != NULL
2066 				&& list_is_empty(&thread->exit.waiters)) {
2067 				threadDeathEntry->thread = thread->id;
2068 				threadDeathEntry->status = thread->exit.status;
2069 
2070 				// add entry -- remove an old one, if we hit the limit
2071 				list_add_item(&team->dead_threads, threadDeathEntry);
2072 				team->dead_threads_count++;
2073 				threadDeathEntry = NULL;
2074 
2075 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
2076 					threadDeathEntry
2077 						= (thread_death_entry*)list_remove_head_item(
2078 							&team->dead_threads);
2079 					team->dead_threads_count--;
2080 				}
2081 			}
2082 
2083 			schedulerLocker.Unlock();
2084 			restore_interrupts(state);
2085 
2086 			threadLocker.Unlock();
2087 			team->Unlock();
2088 			kernelTeam->Unlock();
2089 		}
2090 
2091 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
2092 	}
2093 
2094 	free(threadDeathEntry);
2095 
2096 	// delete the team if we're its main thread
2097 	if (deleteTeam) {
2098 		team_delete_team(team, debuggerPort);
2099 
2100 		// we need to delete any death entry that made it to here
2101 		delete death;
2102 	}
2103 
2104 	ThreadLocker threadLocker(thread);
2105 
2106 	state = disable_interrupts();
2107 	SpinLocker schedulerLocker(gSchedulerLock);
2108 
2109 	// mark invisible in global hash/list, so it's no longer accessible
2110 	SpinLocker threadHashLocker(sThreadHashLock);
2111 	thread->visible = false;
2112 	sUsedThreads--;
2113 	threadHashLocker.Unlock();
2114 
2115 	// Stop debugging for this thread
2116 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2117 	debugInfo = thread->debug_info;
2118 	clear_thread_debug_info(&thread->debug_info, true);
2119 	threadDebugInfoLocker.Unlock();
2120 
2121 	// Remove the select infos. We notify them a little later.
2122 	select_info* selectInfos = thread->select_infos;
2123 	thread->select_infos = NULL;
2124 
2125 	schedulerLocker.Unlock();
2126 	restore_interrupts(state);
2127 
2128 	threadLocker.Unlock();
2129 
2130 	destroy_thread_debug_info(&debugInfo);
2131 
2132 	// notify select infos
2133 	select_info* info = selectInfos;
2134 	while (info != NULL) {
2135 		select_sync* sync = info->sync;
2136 
2137 		notify_select_events(info, B_EVENT_INVALID);
2138 		info = info->next;
2139 		put_select_sync(sync);
2140 	}
2141 
2142 	// notify listeners
2143 	sNotificationService.Notify(THREAD_REMOVED, thread);
2144 
2145 	// shutdown the thread messaging
2146 
2147 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
2148 	if (status == B_WOULD_BLOCK) {
2149 		// there is data waiting for us, so let us eat it
2150 		thread_id sender;
2151 
2152 		delete_sem(thread->msg.write_sem);
2153 			// first, let's remove all possibly waiting writers
2154 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
2155 	} else {
2156 		// we probably own the semaphore here, and we're the last to do so
2157 		delete_sem(thread->msg.write_sem);
2158 	}
2159 	// now we can safely remove the msg.read_sem
2160 	delete_sem(thread->msg.read_sem);
2161 
2162 	// fill all death entries and delete the sem that others will use to wait
2163 	// for us
2164 	{
2165 		sem_id cachedExitSem = thread->exit.sem;
2166 
2167 		ThreadLocker threadLocker(thread);
2168 
2169 		// make sure no one will grab this semaphore again
2170 		thread->exit.sem = -1;
2171 
2172 		// fill all death entries
2173 		thread_death_entry* entry = NULL;
2174 		while ((entry = (thread_death_entry*)list_get_next_item(
2175 				&thread->exit.waiters, entry)) != NULL) {
2176 			entry->status = thread->exit.status;
2177 		}
2178 
2179 		threadLocker.Unlock();
2180 
2181 		delete_sem(cachedExitSem);
2182 	}
2183 
2184 	// delete the user stack, if this was a user thread
2185 	if (!deleteTeam && userStackArea >= 0) {
2186 		// We postponed deleting the user stack until now, since this way all
2187 		// notifications for the thread's death are out already and all other
2188 		// threads waiting for this thread's death and some object on its stack
2189 		// will wake up before we (try to) delete the stack area. Of most
2190 		// relevance is probably the case where this is the main thread and
2191 		// other threads use objects on its stack -- so we want them terminated
2192 		// first.
2193 		// When the team is deleted, all areas are deleted anyway, so we don't
2194 		// need to do that explicitly in that case.
2195 		vm_delete_area(teamID, userStackArea, true);
2196 	}
2197 
2198 	// notify the debugger
2199 	if (teamID != kernelTeam->id)
2200 		user_debug_thread_deleted(teamID, thread->id);
2201 
2202 	// enqueue in the undertaker list and reschedule for the last time
2203 	UndertakerEntry undertakerEntry(thread, teamID);
2204 
2205 	disable_interrupts();
2206 	schedulerLocker.Lock();
2207 
2208 	sUndertakerEntries.Add(&undertakerEntry);
2209 	sUndertakerCondition.NotifyOne(true);
2210 
2211 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
2212 	scheduler_reschedule();
2213 
2214 	panic("never can get here\n");
2215 }
2216 
2217 
2218 /*!	Called in the interrupt handler code when a thread enters
2219 	the kernel for any reason.
2220 	Only tracks time for now.
2221 	Interrupts are disabled.
2222 */
2223 void
2224 thread_at_kernel_entry(bigtime_t now)
2225 {
2226 	Thread *thread = thread_get_current_thread();
2227 
2228 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
2229 
2230 	// track user time
2231 	SpinLocker threadTimeLocker(thread->time_lock);
2232 	thread->user_time += now - thread->last_time;
2233 	thread->last_time = now;
2234 	thread->in_kernel = true;
2235 	threadTimeLocker.Unlock();
2236 }
2237 
2238 
2239 /*!	Called whenever a thread exits kernel space to user space.
2240 	Tracks time, handles signals, ...
2241 	Interrupts must be enabled. When the function returns, interrupts will be
2242 	disabled.
2243 	The function may not return. This e.g. happens when the thread has received
2244 	a deadly signal.
2245 */
2246 void
2247 thread_at_kernel_exit(void)
2248 {
2249 	Thread *thread = thread_get_current_thread();
2250 
2251 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
2252 
2253 	handle_signals(thread);
2254 
2255 	disable_interrupts();
2256 
2257 	// track kernel time
2258 	bigtime_t now = system_time();
2259 	SpinLocker threadTimeLocker(thread->time_lock);
2260 	thread->in_kernel = false;
2261 	thread->kernel_time += now - thread->last_time;
2262 	thread->last_time = now;
2263 }
2264 
2265 
2266 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
2267 	and no debugging shall be done.
2268 	Interrupts must be disabled.
2269 */
2270 void
2271 thread_at_kernel_exit_no_signals(void)
2272 {
2273 	Thread *thread = thread_get_current_thread();
2274 
2275 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
2276 
2277 	// track kernel time
2278 	bigtime_t now = system_time();
2279 	SpinLocker threadTimeLocker(thread->time_lock);
2280 	thread->in_kernel = false;
2281 	thread->kernel_time += now - thread->last_time;
2282 	thread->last_time = now;
2283 }
2284 
2285 
2286 void
2287 thread_reset_for_exec(void)
2288 {
2289 	Thread* thread = thread_get_current_thread();
2290 
2291 	ThreadLocker threadLocker(thread);
2292 
2293 	// delete user-defined timers
2294 	thread->DeleteUserTimers(true);
2295 
2296 	// cancel pre-defined timer
2297 	if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID))
2298 		timer->Cancel();
2299 
2300 	// reset user_thread and user stack
2301 	thread->user_thread = NULL;
2302 	thread->user_stack_area = -1;
2303 	thread->user_stack_base = 0;
2304 	thread->user_stack_size = 0;
2305 
2306 	// reset signals
2307 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2308 
2309 	thread->ResetSignalsOnExec();
2310 
2311 	// reset thread CPU time clock
2312 	thread->cpu_clock_offset = -thread->CPUTime(false);
2313 
2314 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
2315 }
2316 
2317 
2318 /*! Insert a thread to the tail of a queue */
2319 void
2320 thread_enqueue(Thread *thread, struct thread_queue *queue)
2321 {
2322 	thread->queue_next = NULL;
2323 	if (queue->head == NULL) {
2324 		queue->head = thread;
2325 		queue->tail = thread;
2326 	} else {
2327 		queue->tail->queue_next = thread;
2328 		queue->tail = thread;
2329 	}
2330 }
2331 
2332 
2333 Thread *
2334 thread_lookat_queue(struct thread_queue *queue)
2335 {
2336 	return queue->head;
2337 }
2338 
2339 
2340 Thread *
2341 thread_dequeue(struct thread_queue *queue)
2342 {
2343 	Thread *thread = queue->head;
2344 
2345 	if (thread != NULL) {
2346 		queue->head = thread->queue_next;
2347 		if (queue->tail == thread)
2348 			queue->tail = NULL;
2349 	}
2350 	return thread;
2351 }
2352 
2353 
2354 Thread *
2355 thread_dequeue_id(struct thread_queue *q, thread_id id)
2356 {
2357 	Thread *thread;
2358 	Thread *last = NULL;
2359 
2360 	thread = q->head;
2361 	while (thread != NULL) {
2362 		if (thread->id == id) {
2363 			if (last == NULL)
2364 				q->head = thread->queue_next;
2365 			else
2366 				last->queue_next = thread->queue_next;
2367 
2368 			if (q->tail == thread)
2369 				q->tail = last;
2370 			break;
2371 		}
2372 		last = thread;
2373 		thread = thread->queue_next;
2374 	}
2375 	return thread;
2376 }
2377 
2378 
2379 thread_id
2380 allocate_thread_id()
2381 {
2382 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2383 
2384 	// find the next unused ID
2385 	thread_id id;
2386 	do {
2387 		id = sNextThreadID++;
2388 
2389 		// deal with integer overflow
2390 		if (sNextThreadID < 0)
2391 			sNextThreadID = 2;
2392 
2393 		// check whether the ID is already in use
2394 	} while (sThreadHash.Lookup(id, false) != NULL);
2395 
2396 	return id;
2397 }
2398 
2399 
2400 thread_id
2401 peek_next_thread_id()
2402 {
2403 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2404 	return sNextThreadID;
2405 }
2406 
2407 
2408 /*!	Yield the CPU to other threads.
2409 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
2410 	If \c false, it will continue to run, if there's no other thread in ready
2411 	state, and if it has a higher priority than the other ready threads, it
2412 	still has a good chance to continue.
2413 */
2414 void
2415 thread_yield(bool force)
2416 {
2417 	if (force) {
2418 		// snooze for roughly 3 thread quantums
2419 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
2420 #if 0
2421 		cpu_status state;
2422 
2423 		Thread *thread = thread_get_current_thread();
2424 		if (thread == NULL)
2425 			return;
2426 
2427 		InterruptsSpinLocker _(gSchedulerLock);
2428 
2429 		// mark the thread as yielded, so it will not be scheduled next
2430 		//thread->was_yielded = true;
2431 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
2432 		scheduler_reschedule();
2433 #endif
2434 	} else {
2435 		Thread *thread = thread_get_current_thread();
2436 		if (thread == NULL)
2437 			return;
2438 
2439 		// Don't force the thread off the CPU, just reschedule.
2440 		InterruptsSpinLocker _(gSchedulerLock);
2441 		scheduler_reschedule();
2442 	}
2443 }
2444 
2445 
2446 /*!	Kernel private thread creation function.
2447 */
2448 thread_id
2449 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
2450 	void *arg, team_id team)
2451 {
2452 	return thread_create_thread(
2453 		ThreadCreationAttributes(function, name, priority, arg, team),
2454 		true);
2455 }
2456 
2457 
2458 status_t
2459 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
2460 	status_t *_returnCode)
2461 {
2462 	if (id < 0)
2463 		return B_BAD_THREAD_ID;
2464 
2465 	// get the thread, queue our death entry, and fetch the semaphore we have to
2466 	// wait on
2467 	sem_id exitSem = B_BAD_THREAD_ID;
2468 	struct thread_death_entry death;
2469 
2470 	Thread* thread = Thread::GetAndLock(id);
2471 	if (thread != NULL) {
2472 		// remember the semaphore we have to wait on and place our death entry
2473 		exitSem = thread->exit.sem;
2474 		if (exitSem >= 0)
2475 			list_add_link_to_head(&thread->exit.waiters, &death);
2476 
2477 		thread->UnlockAndReleaseReference();
2478 
2479 		if (exitSem < 0)
2480 			return B_BAD_THREAD_ID;
2481 	} else {
2482 		// we couldn't find this thread -- maybe it's already gone, and we'll
2483 		// find its death entry in our team
2484 		Team* team = thread_get_current_thread()->team;
2485 		TeamLocker teamLocker(team);
2486 
2487 		// check the child death entries first (i.e. main threads of child
2488 		// teams)
2489 		bool deleteEntry;
2490 		job_control_entry* freeDeath
2491 			= team_get_death_entry(team, id, &deleteEntry);
2492 		if (freeDeath != NULL) {
2493 			death.status = freeDeath->status;
2494 			if (deleteEntry)
2495 				delete freeDeath;
2496 		} else {
2497 			// check the thread death entries of the team (non-main threads)
2498 			thread_death_entry* threadDeathEntry = NULL;
2499 			while ((threadDeathEntry = (thread_death_entry*)list_get_next_item(
2500 					&team->dead_threads, threadDeathEntry)) != NULL) {
2501 				if (threadDeathEntry->thread == id) {
2502 					list_remove_item(&team->dead_threads, threadDeathEntry);
2503 					team->dead_threads_count--;
2504 					death.status = threadDeathEntry->status;
2505 					free(threadDeathEntry);
2506 					break;
2507 				}
2508 			}
2509 
2510 			if (threadDeathEntry == NULL)
2511 				return B_BAD_THREAD_ID;
2512 		}
2513 
2514 		// we found the thread's death entry in our team
2515 		if (_returnCode)
2516 			*_returnCode = death.status;
2517 
2518 		return B_OK;
2519 	}
2520 
2521 	// we need to wait for the death of the thread
2522 
2523 	resume_thread(id);
2524 		// make sure we don't wait forever on a suspended thread
2525 
2526 	status_t status = acquire_sem_etc(exitSem, 1, flags, timeout);
2527 
2528 	if (status == B_OK) {
2529 		// this should never happen as the thread deletes the semaphore on exit
2530 		panic("could acquire exit_sem for thread %ld\n", id);
2531 	} else if (status == B_BAD_SEM_ID) {
2532 		// this is the way the thread normally exits
2533 		status = B_OK;
2534 	} else {
2535 		// We were probably interrupted or the timeout occurred; we need to
2536 		// remove our death entry now.
2537 		thread = Thread::GetAndLock(id);
2538 		if (thread != NULL) {
2539 			list_remove_link(&death);
2540 			thread->UnlockAndReleaseReference();
2541 		} else {
2542 			// The thread is already gone, so we need to wait uninterruptibly
2543 			// for its exit semaphore to make sure our death entry stays valid.
2544 			// It won't take long, since the thread is apparently already in the
2545 			// middle of the cleanup.
2546 			acquire_sem(exitSem);
2547 			status = B_OK;
2548 		}
2549 	}
2550 
2551 	if (status == B_OK && _returnCode != NULL)
2552 		*_returnCode = death.status;
2553 
2554 	return status;
2555 }
2556 
2557 
2558 status_t
2559 select_thread(int32 id, struct select_info* info, bool kernel)
2560 {
2561 	// get and lock the thread
2562 	Thread* thread = Thread::GetAndLock(id);
2563 	if (thread == NULL)
2564 		return B_BAD_THREAD_ID;
2565 	BReference<Thread> threadReference(thread, true);
2566 	ThreadLocker threadLocker(thread, true);
2567 
2568 	// We support only B_EVENT_INVALID at the moment.
2569 	info->selected_events &= B_EVENT_INVALID;
2570 
2571 	// add info to list
2572 	if (info->selected_events != 0) {
2573 		info->next = thread->select_infos;
2574 		thread->select_infos = info;
2575 
2576 		// we need a sync reference
2577 		atomic_add(&info->sync->ref_count, 1);
2578 	}
2579 
2580 	return B_OK;
2581 }
2582 
2583 
2584 status_t
2585 deselect_thread(int32 id, struct select_info* info, bool kernel)
2586 {
2587 	// get and lock the thread
2588 	Thread* thread = Thread::GetAndLock(id);
2589 	if (thread == NULL)
2590 		return B_BAD_THREAD_ID;
2591 	BReference<Thread> threadReference(thread, true);
2592 	ThreadLocker threadLocker(thread, true);
2593 
2594 	// remove info from list
2595 	select_info** infoLocation = &thread->select_infos;
2596 	while (*infoLocation != NULL && *infoLocation != info)
2597 		infoLocation = &(*infoLocation)->next;
2598 
2599 	if (*infoLocation != info)
2600 		return B_OK;
2601 
2602 	*infoLocation = info->next;
2603 
2604 	threadLocker.Unlock();
2605 
2606 	// surrender sync reference
2607 	put_select_sync(info->sync);
2608 
2609 	return B_OK;
2610 }
2611 
2612 
2613 int32
2614 thread_max_threads(void)
2615 {
2616 	return sMaxThreads;
2617 }
2618 
2619 
2620 int32
2621 thread_used_threads(void)
2622 {
2623 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2624 	return sUsedThreads;
2625 }
2626 
2627 
2628 /*!	Returns a user-readable string for a thread state.
2629 	Only for use in the kernel debugger.
2630 */
2631 const char*
2632 thread_state_to_text(Thread* thread, int32 state)
2633 {
2634 	return state_to_text(thread, state);
2635 }
2636 
2637 
2638 int32
2639 thread_get_io_priority(thread_id id)
2640 {
2641 	Thread* thread = Thread::GetAndLock(id);
2642 	if (thread == NULL)
2643 		return B_BAD_THREAD_ID;
2644 	BReference<Thread> threadReference(thread, true);
2645 	ThreadLocker threadLocker(thread, true);
2646 
2647 	int32 priority = thread->io_priority;
2648 	if (priority < 0) {
2649 		// negative I/O priority means using the (CPU) priority
2650 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2651 		priority = thread->priority;
2652 	}
2653 
2654 	return priority;
2655 }
2656 
2657 
2658 void
2659 thread_set_io_priority(int32 priority)
2660 {
2661 	Thread* thread = thread_get_current_thread();
2662 	ThreadLocker threadLocker(thread);
2663 
2664 	thread->io_priority = priority;
2665 }
2666 
2667 
2668 status_t
2669 thread_init(kernel_args *args)
2670 {
2671 	TRACE(("thread_init: entry\n"));
2672 
2673 	// create the thread hash table
2674 	new(&sThreadHash) ThreadHashTable();
2675 	if (sThreadHash.Init(128) != B_OK)
2676 		panic("thread_init(): failed to init thread hash table!");
2677 
2678 	// create the thread structure object cache
2679 	sThreadCache = create_object_cache("threads", sizeof(Thread), 16, NULL,
2680 		NULL, NULL);
2681 		// Note: The x86 port requires 16 byte alignment of thread structures.
2682 	if (sThreadCache == NULL)
2683 		panic("thread_init(): failed to allocate thread object cache!");
2684 
2685 	if (arch_thread_init(args) < B_OK)
2686 		panic("arch_thread_init() failed!\n");
2687 
2688 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2689 	sNextThreadID = B_SYSTEM_TEAM + 1;
2690 
2691 	// create an idle thread for each cpu
2692 	for (uint32 i = 0; i < args->num_cpus; i++) {
2693 		Thread *thread;
2694 		area_info info;
2695 		char name[64];
2696 
2697 		sprintf(name, "idle thread %lu", i + 1);
2698 		thread = new(&sIdleThreads[i]) Thread(name,
2699 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2700 		if (thread == NULL || thread->Init(true) != B_OK) {
2701 			panic("error creating idle thread struct\n");
2702 			return B_NO_MEMORY;
2703 		}
2704 
2705 		gCPU[i].running_thread = thread;
2706 
2707 		thread->team = team_get_kernel_team();
2708 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2709 		thread->state = B_THREAD_RUNNING;
2710 		thread->next_state = B_THREAD_READY;
2711 		sprintf(name, "idle thread %lu kstack", i + 1);
2712 		thread->kernel_stack_area = find_area(name);
2713 
2714 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2715 			panic("error finding idle kstack area\n");
2716 
2717 		thread->kernel_stack_base = (addr_t)info.address;
2718 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2719 
2720 		thread->visible = true;
2721 		insert_thread_into_team(thread->team, thread);
2722 	}
2723 	sUsedThreads = args->num_cpus;
2724 
2725 	// init the notification service
2726 	new(&sNotificationService) ThreadNotificationService();
2727 
2728 	// start the undertaker thread
2729 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2730 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2731 
2732 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2733 		B_DISPLAY_PRIORITY, NULL);
2734 	if (undertakerThread < 0)
2735 		panic("Failed to create undertaker thread!");
2736 	resume_thread(undertakerThread);
2737 
2738 	// set up some debugger commands
2739 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2740 		"[ <team> ]\n"
2741 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2742 		"all threads of the specified team.\n"
2743 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2744 	add_debugger_command_etc("ready", &dump_thread_list,
2745 		"List all ready threads",
2746 		"\n"
2747 		"Prints a list of all threads in ready state.\n", 0);
2748 	add_debugger_command_etc("running", &dump_thread_list,
2749 		"List all running threads",
2750 		"\n"
2751 		"Prints a list of all threads in running state.\n", 0);
2752 	add_debugger_command_etc("waiting", &dump_thread_list,
2753 		"List all waiting threads (optionally for a specific semaphore)",
2754 		"[ <sem> ]\n"
2755 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2756 		"specified, only the threads waiting on that semaphore are listed.\n"
2757 		"  <sem>  - ID of the semaphore.\n", 0);
2758 	add_debugger_command_etc("realtime", &dump_thread_list,
2759 		"List all realtime threads",
2760 		"\n"
2761 		"Prints a list of all threads with realtime priority.\n", 0);
2762 	add_debugger_command_etc("thread", &dump_thread_info,
2763 		"Dump info about a particular thread",
2764 		"[ -s ] ( <id> | <address> | <name> )*\n"
2765 		"Prints information about the specified thread. If no argument is\n"
2766 		"given the current thread is selected.\n"
2767 		"  -s         - Print info in compact table form (like \"threads\").\n"
2768 		"  <id>       - The ID of the thread.\n"
2769 		"  <address>  - The address of the thread structure.\n"
2770 		"  <name>     - The thread's name.\n", 0);
2771 	add_debugger_command_etc("calling", &dump_thread_list,
2772 		"Show all threads that have a specific address in their call chain",
2773 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2774 	add_debugger_command_etc("unreal", &make_thread_unreal,
2775 		"Set realtime priority threads to normal priority",
2776 		"[ <id> ]\n"
2777 		"Sets the priority of all realtime threads or, if given, the one\n"
2778 		"with the specified ID to \"normal\" priority.\n"
2779 		"  <id>  - The ID of the thread.\n", 0);
2780 	add_debugger_command_etc("suspend", &make_thread_suspended,
2781 		"Suspend a thread",
2782 		"[ <id> ]\n"
2783 		"Suspends the thread with the given ID. If no ID argument is given\n"
2784 		"the current thread is selected.\n"
2785 		"  <id>  - The ID of the thread.\n", 0);
2786 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2787 		"<id>\n"
2788 		"Resumes the specified thread, if it is currently suspended.\n"
2789 		"  <id>  - The ID of the thread.\n", 0);
2790 	add_debugger_command_etc("drop", &drop_into_debugger,
2791 		"Drop a thread into the userland debugger",
2792 		"<id>\n"
2793 		"Drops the specified (userland) thread into the userland debugger\n"
2794 		"after leaving the kernel debugger.\n"
2795 		"  <id>  - The ID of the thread.\n", 0);
2796 	add_debugger_command_etc("priority", &set_thread_prio,
2797 		"Set a thread's priority",
2798 		"<priority> [ <id> ]\n"
2799 		"Sets the priority of the thread with the specified ID to the given\n"
2800 		"priority. If no thread ID is given, the current thread is selected.\n"
2801 		"  <priority>  - The thread's new priority (0 - 120)\n"
2802 		"  <id>        - The ID of the thread.\n", 0);
2803 
2804 	return B_OK;
2805 }
2806 
2807 
2808 status_t
2809 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2810 {
2811 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2812 	// so that get_current_cpu and friends will work, which is crucial for
2813 	// a lot of low level routines
2814 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2815 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2816 	return B_OK;
2817 }
2818 
2819 
2820 //	#pragma mark - thread blocking API
2821 
2822 
2823 static status_t
2824 thread_block_timeout(timer* timer)
2825 {
2826 	// The timer has been installed with B_TIMER_ACQUIRE_SCHEDULER_LOCK, so
2827 	// we're holding the scheduler lock already. This makes things comfortably
2828 	// easy.
2829 
2830 	Thread* thread = (Thread*)timer->user_data;
2831 	thread_unblock_locked(thread, B_TIMED_OUT);
2832 
2833 	return B_HANDLED_INTERRUPT;
2834 }
2835 
2836 
2837 /*!	Blocks the current thread.
2838 
2839 	The function acquires the scheduler lock and calls thread_block_locked().
2840 	See there for more information.
2841 */
2842 status_t
2843 thread_block()
2844 {
2845 	InterruptsSpinLocker _(gSchedulerLock);
2846 	return thread_block_locked(thread_get_current_thread());
2847 }
2848 
2849 
2850 /*!	Blocks the current thread with a timeout.
2851 
2852 	Acquires the scheduler lock and calls thread_block_with_timeout_locked().
2853 	See there for more information.
2854 */
2855 status_t
2856 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2857 {
2858 	InterruptsSpinLocker _(gSchedulerLock);
2859 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2860 }
2861 
2862 
2863 /*!	Blocks the current thread with a timeout.
2864 
2865 	The thread is blocked until someone else unblock it or the specified timeout
2866 	occurs. Must be called after a call to thread_prepare_to_block(). If the
2867 	thread has already been unblocked after the previous call to
2868 	thread_prepare_to_block(), this function will return immediately. See
2869 	thread_prepare_to_block() for more details.
2870 
2871 	The caller must hold the scheduler lock.
2872 
2873 	\param thread The current thread.
2874 	\param timeoutFlags The standard timeout flags:
2875 		- \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait.
2876 		- \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when
2877 			the timeout shall occur.
2878 		- \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT
2879 			is specified, too. Specifies that \a timeout is a real time, not a
2880 			system time.
2881 		If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are
2882 		specified, an infinite timeout is implied and the function behaves like
2883 		thread_block_locked().
2884 	\return The error code passed to the unblocking function. thread_interrupt()
2885 		uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is
2886 		returned. By convention \c B_OK means that the wait was successful while
2887 		another error code indicates a failure (what that means depends on the
2888 		client code).
2889 */
2890 status_t
2891 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2892 {
2893 	Thread* thread = thread_get_current_thread();
2894 
2895 	if (thread->wait.status != 1)
2896 		return thread->wait.status;
2897 
2898 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2899 		&& timeout != B_INFINITE_TIMEOUT;
2900 
2901 	if (useTimer) {
2902 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2903 		// avoids nasty race conditions and deadlock problems that could
2904 		// otherwise occur between our cancel_timer() and a concurrently
2905 		// executing thread_block_timeout().
2906 		uint32 timerFlags;
2907 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2908 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2909 		} else {
2910 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2911 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2912 				timerFlags |= B_TIMER_REAL_TIME_BASE;
2913 		}
2914 		timerFlags |= B_TIMER_ACQUIRE_SCHEDULER_LOCK;
2915 
2916 		// install the timer
2917 		thread->wait.unblock_timer.user_data = thread;
2918 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2919 			timerFlags);
2920 	}
2921 
2922 	// block
2923 	status_t error = thread_block_locked(thread);
2924 
2925 	// cancel timer, if it didn't fire
2926 	if (error != B_TIMED_OUT && useTimer)
2927 		cancel_timer(&thread->wait.unblock_timer);
2928 
2929 	return error;
2930 }
2931 
2932 
2933 /*!	Unblocks a userland-blocked thread.
2934 	The caller must not hold any locks.
2935 */
2936 static status_t
2937 user_unblock_thread(thread_id threadID, status_t status)
2938 {
2939 	// get the thread
2940 	Thread* thread = Thread::GetAndLock(threadID);
2941 	if (thread == NULL)
2942 		return B_BAD_THREAD_ID;
2943 	BReference<Thread> threadReference(thread, true);
2944 	ThreadLocker threadLocker(thread, true);
2945 
2946 	if (thread->user_thread == NULL)
2947 		return B_NOT_ALLOWED;
2948 
2949 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2950 
2951 	if (thread->user_thread->wait_status > 0) {
2952 		thread->user_thread->wait_status = status;
2953 		thread_unblock_locked(thread, status);
2954 	}
2955 
2956 	return B_OK;
2957 }
2958 
2959 
2960 //	#pragma mark - public kernel API
2961 
2962 
2963 void
2964 exit_thread(status_t returnValue)
2965 {
2966 	Thread *thread = thread_get_current_thread();
2967 	Team* team = thread->team;
2968 
2969 	thread->exit.status = returnValue;
2970 
2971 	// if called from a kernel thread, we don't deliver the signal,
2972 	// we just exit directly to keep the user space behaviour of
2973 	// this function
2974 	if (team != team_get_kernel_team()) {
2975 		// If this is its main thread, set the team's exit status.
2976 		if (thread == team->main_thread) {
2977 			TeamLocker teamLocker(team);
2978 
2979 			if (!team->exit.initialized) {
2980 				team->exit.reason = CLD_EXITED;
2981 				team->exit.signal = 0;
2982 				team->exit.signaling_user = 0;
2983 				team->exit.status = returnValue;
2984 				team->exit.initialized = true;
2985 			}
2986 
2987 			teamLocker.Unlock();
2988 		}
2989 
2990 		Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
2991 		send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
2992 	} else
2993 		thread_exit();
2994 }
2995 
2996 
2997 status_t
2998 kill_thread(thread_id id)
2999 {
3000 	if (id <= 0)
3001 		return B_BAD_VALUE;
3002 
3003 	Thread* currentThread = thread_get_current_thread();
3004 
3005 	Signal signal(SIGKILLTHR, SI_USER, B_OK, currentThread->team->id);
3006 	return send_signal_to_thread_id(id, signal, 0);
3007 }
3008 
3009 
3010 status_t
3011 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
3012 {
3013 	return send_data_etc(thread, code, buffer, bufferSize, 0);
3014 }
3015 
3016 
3017 int32
3018 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
3019 {
3020 	return receive_data_etc(sender, buffer, bufferSize, 0);
3021 }
3022 
3023 
3024 bool
3025 has_data(thread_id thread)
3026 {
3027 	// TODO: The thread argument is ignored.
3028 	int32 count;
3029 
3030 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
3031 			&count) != B_OK)
3032 		return false;
3033 
3034 	return count == 0 ? false : true;
3035 }
3036 
3037 
3038 status_t
3039 _get_thread_info(thread_id id, thread_info *info, size_t size)
3040 {
3041 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
3042 		return B_BAD_VALUE;
3043 
3044 	// get the thread
3045 	Thread* thread = Thread::GetAndLock(id);
3046 	if (thread == NULL)
3047 		return B_BAD_THREAD_ID;
3048 	BReference<Thread> threadReference(thread, true);
3049 	ThreadLocker threadLocker(thread, true);
3050 
3051 	// fill the info -- also requires the scheduler lock to be held
3052 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3053 
3054 	fill_thread_info(thread, info, size);
3055 
3056 	return B_OK;
3057 }
3058 
3059 
3060 status_t
3061 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
3062 	size_t size)
3063 {
3064 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
3065 		return B_BAD_VALUE;
3066 
3067 	int32 lastID = *_cookie;
3068 
3069 	// get the team
3070 	Team* team = Team::GetAndLock(teamID);
3071 	if (team == NULL)
3072 		return B_BAD_VALUE;
3073 	BReference<Team> teamReference(team, true);
3074 	TeamLocker teamLocker(team, true);
3075 
3076 	Thread* thread = NULL;
3077 
3078 	if (lastID == 0) {
3079 		// We start with the main thread
3080 		thread = team->main_thread;
3081 	} else {
3082 		// Find the one thread with an ID greater than ours (as long as the IDs
3083 		// don't wrap they are always sorted from highest to lowest).
3084 		// TODO: That is broken not only when the IDs wrap, but also for the
3085 		// kernel team, to which threads are added when they are dying.
3086 		for (Thread* next = team->thread_list; next != NULL;
3087 				next = next->team_next) {
3088 			if (next->id <= lastID)
3089 				break;
3090 
3091 			thread = next;
3092 		}
3093 	}
3094 
3095 	if (thread == NULL)
3096 		return B_BAD_VALUE;
3097 
3098 	lastID = thread->id;
3099 	*_cookie = lastID;
3100 
3101 	ThreadLocker threadLocker(thread);
3102 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3103 
3104 	fill_thread_info(thread, info, size);
3105 
3106 	return B_OK;
3107 }
3108 
3109 
3110 thread_id
3111 find_thread(const char* name)
3112 {
3113 	if (name == NULL)
3114 		return thread_get_current_thread_id();
3115 
3116 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
3117 
3118 	// TODO: Scanning the whole hash with the thread hash lock held isn't
3119 	// exactly cheap -- although this function is probably used very rarely.
3120 
3121 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
3122 			Thread* thread = it.Next();) {
3123 		if (!thread->visible)
3124 			continue;
3125 
3126 		if (strcmp(thread->name, name) == 0)
3127 			return thread->id;
3128 	}
3129 
3130 	return B_NAME_NOT_FOUND;
3131 }
3132 
3133 
3134 status_t
3135 rename_thread(thread_id id, const char* name)
3136 {
3137 	if (name == NULL)
3138 		return B_BAD_VALUE;
3139 
3140 	// get the thread
3141 	Thread* thread = Thread::GetAndLock(id);
3142 	if (thread == NULL)
3143 		return B_BAD_THREAD_ID;
3144 	BReference<Thread> threadReference(thread, true);
3145 	ThreadLocker threadLocker(thread, true);
3146 
3147 	// check whether the operation is allowed
3148 	if (thread->team != thread_get_current_thread()->team)
3149 		return B_NOT_ALLOWED;
3150 
3151 	strlcpy(thread->name, name, B_OS_NAME_LENGTH);
3152 
3153 	team_id teamID = thread->team->id;
3154 
3155 	threadLocker.Unlock();
3156 
3157 	// notify listeners
3158 	sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id);
3159 		// don't pass the thread structure, as it's unsafe, if it isn't ours
3160 
3161 	return B_OK;
3162 }
3163 
3164 
3165 status_t
3166 set_thread_priority(thread_id id, int32 priority)
3167 {
3168 	int32 oldPriority;
3169 
3170 	// make sure the passed in priority is within bounds
3171 	if (priority > THREAD_MAX_SET_PRIORITY)
3172 		priority = THREAD_MAX_SET_PRIORITY;
3173 	if (priority < THREAD_MIN_SET_PRIORITY)
3174 		priority = THREAD_MIN_SET_PRIORITY;
3175 
3176 	// get the thread
3177 	Thread* thread = Thread::GetAndLock(id);
3178 	if (thread == NULL)
3179 		return B_BAD_THREAD_ID;
3180 	BReference<Thread> threadReference(thread, true);
3181 	ThreadLocker threadLocker(thread, true);
3182 
3183 	// check whether the change is allowed
3184 	if (thread_is_idle_thread(thread))
3185 		return B_NOT_ALLOWED;
3186 
3187 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3188 
3189 	if (thread == thread_get_current_thread()) {
3190 		// It's ourself, so we know we aren't in the run queue, and we can
3191 		// manipulate our structure directly.
3192 		oldPriority = thread->priority;
3193 		thread->priority = thread->next_priority = priority;
3194 	} else {
3195 		oldPriority = thread->priority;
3196 		scheduler_set_thread_priority(thread, priority);
3197 	}
3198 
3199 	return oldPriority;
3200 }
3201 
3202 
3203 status_t
3204 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
3205 {
3206 	return common_snooze_etc(timeout, timebase, flags, NULL);
3207 }
3208 
3209 
3210 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
3211 status_t
3212 snooze(bigtime_t timeout)
3213 {
3214 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
3215 }
3216 
3217 
3218 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
3219 	signals.
3220 */
3221 status_t
3222 snooze_until(bigtime_t timeout, int timebase)
3223 {
3224 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
3225 }
3226 
3227 
3228 status_t
3229 wait_for_thread(thread_id thread, status_t *_returnCode)
3230 {
3231 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
3232 }
3233 
3234 
3235 status_t
3236 suspend_thread(thread_id id)
3237 {
3238 	if (id <= 0)
3239 		return B_BAD_VALUE;
3240 
3241 	Thread* currentThread = thread_get_current_thread();
3242 
3243 	Signal signal(SIGSTOP, SI_USER, B_OK, currentThread->team->id);
3244 	return send_signal_to_thread_id(id, signal, 0);
3245 }
3246 
3247 
3248 status_t
3249 resume_thread(thread_id id)
3250 {
3251 	if (id <= 0)
3252 		return B_BAD_VALUE;
3253 
3254 	Thread* currentThread = thread_get_current_thread();
3255 
3256 	// Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains
3257 	// compatibility to BeOS which documents the combination of suspend_thread()
3258 	// and resume_thread() to interrupt threads waiting on semaphores.
3259 	Signal signal(SIGNAL_CONTINUE_THREAD, SI_USER, B_OK,
3260 		currentThread->team->id);
3261 	return send_signal_to_thread_id(id, signal, 0);
3262 }
3263 
3264 
3265 thread_id
3266 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
3267 	void *arg)
3268 {
3269 	return thread_create_thread(
3270 		ThreadCreationAttributes(function, name, priority, arg),
3271 		true);
3272 }
3273 
3274 
3275 int
3276 getrlimit(int resource, struct rlimit * rlp)
3277 {
3278 	status_t error = common_getrlimit(resource, rlp);
3279 	if (error != B_OK) {
3280 		errno = error;
3281 		return -1;
3282 	}
3283 
3284 	return 0;
3285 }
3286 
3287 
3288 int
3289 setrlimit(int resource, const struct rlimit * rlp)
3290 {
3291 	status_t error = common_setrlimit(resource, rlp);
3292 	if (error != B_OK) {
3293 		errno = error;
3294 		return -1;
3295 	}
3296 
3297 	return 0;
3298 }
3299 
3300 
3301 //	#pragma mark - syscalls
3302 
3303 
3304 void
3305 _user_exit_thread(status_t returnValue)
3306 {
3307 	exit_thread(returnValue);
3308 }
3309 
3310 
3311 status_t
3312 _user_kill_thread(thread_id thread)
3313 {
3314 	// TODO: Don't allow kernel threads to be killed!
3315 	return kill_thread(thread);
3316 }
3317 
3318 
3319 status_t
3320 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int))
3321 {
3322 	// check the cancel function
3323 	if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction))
3324 		return B_BAD_VALUE;
3325 
3326 	// get and lock the thread
3327 	Thread* thread = Thread::GetAndLock(threadID);
3328 	if (thread == NULL)
3329 		return B_BAD_THREAD_ID;
3330 	BReference<Thread> threadReference(thread, true);
3331 	ThreadLocker threadLocker(thread, true);
3332 
3333 	// only threads of the same team can be canceled
3334 	if (thread->team != thread_get_current_thread()->team)
3335 		return B_NOT_ALLOWED;
3336 
3337 	// set the cancel function
3338 	thread->cancel_function = cancelFunction;
3339 
3340 	// send the cancellation signal to the thread
3341 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3342 	return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0);
3343 }
3344 
3345 
3346 status_t
3347 _user_resume_thread(thread_id thread)
3348 {
3349 	// TODO: Don't allow kernel threads to be resumed!
3350 	return resume_thread(thread);
3351 }
3352 
3353 
3354 status_t
3355 _user_suspend_thread(thread_id thread)
3356 {
3357 	// TODO: Don't allow kernel threads to be suspended!
3358 	return suspend_thread(thread);
3359 }
3360 
3361 
3362 status_t
3363 _user_rename_thread(thread_id thread, const char *userName)
3364 {
3365 	char name[B_OS_NAME_LENGTH];
3366 
3367 	if (!IS_USER_ADDRESS(userName)
3368 		|| userName == NULL
3369 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
3370 		return B_BAD_ADDRESS;
3371 
3372 	// TODO: Don't allow kernel threads to be renamed!
3373 	return rename_thread(thread, name);
3374 }
3375 
3376 
3377 int32
3378 _user_set_thread_priority(thread_id thread, int32 newPriority)
3379 {
3380 	// TODO: Don't allow setting priority of kernel threads!
3381 	return set_thread_priority(thread, newPriority);
3382 }
3383 
3384 
3385 thread_id
3386 _user_spawn_thread(thread_creation_attributes* userAttributes)
3387 {
3388 	// copy the userland structure to the kernel
3389 	char nameBuffer[B_OS_NAME_LENGTH];
3390 	ThreadCreationAttributes attributes;
3391 	status_t error = attributes.InitFromUserAttributes(userAttributes,
3392 		nameBuffer);
3393 	if (error != B_OK)
3394 		return error;
3395 
3396 	// create the thread
3397 	thread_id threadID = thread_create_thread(attributes, false);
3398 
3399 	if (threadID >= 0)
3400 		user_debug_thread_created(threadID);
3401 
3402 	return threadID;
3403 }
3404 
3405 
3406 status_t
3407 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
3408 	bigtime_t* userRemainingTime)
3409 {
3410 	// We need to store more syscall restart parameters than usual and need a
3411 	// somewhat different handling. Hence we can't use
3412 	// syscall_restart_handle_timeout_pre() but do the job ourselves.
3413 	struct restart_parameters {
3414 		bigtime_t	timeout;
3415 		clockid_t	timebase;
3416 		uint32		flags;
3417 	};
3418 
3419 	Thread* thread = thread_get_current_thread();
3420 
3421 	if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) {
3422 		// The syscall was restarted. Fetch the parameters from the stored
3423 		// restart parameters.
3424 		restart_parameters* restartParameters
3425 			= (restart_parameters*)thread->syscall_restart.parameters;
3426 		timeout = restartParameters->timeout;
3427 		timebase = restartParameters->timebase;
3428 		flags = restartParameters->flags;
3429 	} else {
3430 		// convert relative timeouts to absolute ones
3431 		if ((flags & B_RELATIVE_TIMEOUT) != 0) {
3432 			// not restarted yet and the flags indicate a relative timeout
3433 
3434 			// Make sure we use the system time base, so real-time clock changes
3435 			// won't affect our wait.
3436 			flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE;
3437 			if (timebase == CLOCK_REALTIME)
3438 				timebase = CLOCK_MONOTONIC;
3439 
3440 			// get the current time and make the timeout absolute
3441 			bigtime_t now;
3442 			status_t error = user_timer_get_clock(timebase, now);
3443 			if (error != B_OK)
3444 				return error;
3445 
3446 			timeout += now;
3447 
3448 			// deal with overflow
3449 			if (timeout < 0)
3450 				timeout = B_INFINITE_TIMEOUT;
3451 
3452 			flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
3453 		} else
3454 			flags |= B_ABSOLUTE_TIMEOUT;
3455 	}
3456 
3457 	// snooze
3458 	bigtime_t remainingTime;
3459 	status_t error = common_snooze_etc(timeout, timebase,
3460 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION,
3461 		userRemainingTime != NULL ? &remainingTime : NULL);
3462 
3463 	// If interrupted, copy the remaining time back to userland and prepare the
3464 	// syscall restart.
3465 	if (error == B_INTERRUPTED) {
3466 		if (userRemainingTime != NULL
3467 			&& (!IS_USER_ADDRESS(userRemainingTime)
3468 				|| user_memcpy(userRemainingTime, &remainingTime,
3469 					sizeof(remainingTime)) != B_OK)) {
3470 			return B_BAD_ADDRESS;
3471 		}
3472 
3473 		// store the normalized values in the restart parameters
3474 		restart_parameters* restartParameters
3475 			= (restart_parameters*)thread->syscall_restart.parameters;
3476 		restartParameters->timeout = timeout;
3477 		restartParameters->timebase = timebase;
3478 		restartParameters->flags = flags;
3479 
3480 		// restart the syscall, if possible
3481 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
3482 	}
3483 
3484 	return error;
3485 }
3486 
3487 
3488 void
3489 _user_thread_yield(void)
3490 {
3491 	thread_yield(true);
3492 }
3493 
3494 
3495 status_t
3496 _user_get_thread_info(thread_id id, thread_info *userInfo)
3497 {
3498 	thread_info info;
3499 	status_t status;
3500 
3501 	if (!IS_USER_ADDRESS(userInfo))
3502 		return B_BAD_ADDRESS;
3503 
3504 	status = _get_thread_info(id, &info, sizeof(thread_info));
3505 
3506 	if (status >= B_OK
3507 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3508 		return B_BAD_ADDRESS;
3509 
3510 	return status;
3511 }
3512 
3513 
3514 status_t
3515 _user_get_next_thread_info(team_id team, int32 *userCookie,
3516 	thread_info *userInfo)
3517 {
3518 	status_t status;
3519 	thread_info info;
3520 	int32 cookie;
3521 
3522 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
3523 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3524 		return B_BAD_ADDRESS;
3525 
3526 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
3527 	if (status < B_OK)
3528 		return status;
3529 
3530 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3531 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3532 		return B_BAD_ADDRESS;
3533 
3534 	return status;
3535 }
3536 
3537 
3538 thread_id
3539 _user_find_thread(const char *userName)
3540 {
3541 	char name[B_OS_NAME_LENGTH];
3542 
3543 	if (userName == NULL)
3544 		return find_thread(NULL);
3545 
3546 	if (!IS_USER_ADDRESS(userName)
3547 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
3548 		return B_BAD_ADDRESS;
3549 
3550 	return find_thread(name);
3551 }
3552 
3553 
3554 status_t
3555 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
3556 {
3557 	status_t returnCode;
3558 	status_t status;
3559 
3560 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
3561 		return B_BAD_ADDRESS;
3562 
3563 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
3564 
3565 	if (status == B_OK && userReturnCode != NULL
3566 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
3567 		return B_BAD_ADDRESS;
3568 	}
3569 
3570 	return syscall_restart_handle_post(status);
3571 }
3572 
3573 
3574 bool
3575 _user_has_data(thread_id thread)
3576 {
3577 	return has_data(thread);
3578 }
3579 
3580 
3581 status_t
3582 _user_send_data(thread_id thread, int32 code, const void *buffer,
3583 	size_t bufferSize)
3584 {
3585 	if (!IS_USER_ADDRESS(buffer))
3586 		return B_BAD_ADDRESS;
3587 
3588 	return send_data_etc(thread, code, buffer, bufferSize,
3589 		B_KILL_CAN_INTERRUPT);
3590 		// supports userland buffers
3591 }
3592 
3593 
3594 status_t
3595 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
3596 {
3597 	thread_id sender;
3598 	status_t code;
3599 
3600 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
3601 		|| !IS_USER_ADDRESS(buffer))
3602 		return B_BAD_ADDRESS;
3603 
3604 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
3605 		// supports userland buffers
3606 
3607 	if (_userSender != NULL)
3608 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
3609 			return B_BAD_ADDRESS;
3610 
3611 	return code;
3612 }
3613 
3614 
3615 status_t
3616 _user_block_thread(uint32 flags, bigtime_t timeout)
3617 {
3618 	syscall_restart_handle_timeout_pre(flags, timeout);
3619 	flags |= B_CAN_INTERRUPT;
3620 
3621 	Thread* thread = thread_get_current_thread();
3622 	ThreadLocker threadLocker(thread);
3623 
3624 	// check, if already done
3625 	if (thread->user_thread->wait_status <= 0)
3626 		return thread->user_thread->wait_status;
3627 
3628 	// nope, so wait
3629 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
3630 
3631 	threadLocker.Unlock();
3632 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3633 
3634 	status_t status = thread_block_with_timeout_locked(flags, timeout);
3635 
3636 	schedulerLocker.Unlock();
3637 	threadLocker.Lock();
3638 
3639 	// Interruptions or timeouts can race with other threads unblocking us.
3640 	// Favor a wake-up by another thread, i.e. if someone changed the wait
3641 	// status, use that.
3642 	status_t oldStatus = thread->user_thread->wait_status;
3643 	if (oldStatus > 0)
3644 		thread->user_thread->wait_status = status;
3645 	else
3646 		status = oldStatus;
3647 
3648 	threadLocker.Unlock();
3649 
3650 	return syscall_restart_handle_timeout_post(status, timeout);
3651 }
3652 
3653 
3654 status_t
3655 _user_unblock_thread(thread_id threadID, status_t status)
3656 {
3657 	status_t error = user_unblock_thread(threadID, status);
3658 
3659 	if (error == B_OK)
3660 		scheduler_reschedule_if_necessary();
3661 
3662 	return error;
3663 }
3664 
3665 
3666 status_t
3667 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3668 {
3669 	enum {
3670 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3671 	};
3672 
3673 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3674 		return B_BAD_ADDRESS;
3675 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3676 		return B_BAD_VALUE;
3677 
3678 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3679 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3680 		return B_BAD_ADDRESS;
3681 
3682 	for (uint32 i = 0; i < count; i++)
3683 		user_unblock_thread(threads[i], status);
3684 
3685 	scheduler_reschedule_if_necessary();
3686 
3687 	return B_OK;
3688 }
3689 
3690 
3691 // TODO: the following two functions don't belong here
3692 
3693 
3694 int
3695 _user_getrlimit(int resource, struct rlimit *urlp)
3696 {
3697 	struct rlimit rl;
3698 	int ret;
3699 
3700 	if (urlp == NULL)
3701 		return EINVAL;
3702 
3703 	if (!IS_USER_ADDRESS(urlp))
3704 		return B_BAD_ADDRESS;
3705 
3706 	ret = common_getrlimit(resource, &rl);
3707 
3708 	if (ret == 0) {
3709 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3710 		if (ret < 0)
3711 			return ret;
3712 
3713 		return 0;
3714 	}
3715 
3716 	return ret;
3717 }
3718 
3719 
3720 int
3721 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3722 {
3723 	struct rlimit resourceLimit;
3724 
3725 	if (userResourceLimit == NULL)
3726 		return EINVAL;
3727 
3728 	if (!IS_USER_ADDRESS(userResourceLimit)
3729 		|| user_memcpy(&resourceLimit, userResourceLimit,
3730 			sizeof(struct rlimit)) < B_OK)
3731 		return B_BAD_ADDRESS;
3732 
3733 	return common_setrlimit(resource, &resourceLimit);
3734 }
3735