xref: /haiku/src/system/kernel/thread.cpp (revision 0044a8c39ab5721051b6279506d1a8c511e20453)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <malloc.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/resource.h>
22 
23 #include <algorithm>
24 
25 #include <OS.h>
26 
27 #include <util/AutoLock.h>
28 
29 #include <arch/debug.h>
30 #include <boot/kernel_args.h>
31 #include <condition_variable.h>
32 #include <cpu.h>
33 #include <int.h>
34 #include <kimage.h>
35 #include <kscheduler.h>
36 #include <ksignal.h>
37 #include <Notifications.h>
38 #include <real_time_clock.h>
39 #include <slab/Slab.h>
40 #include <smp.h>
41 #include <syscalls.h>
42 #include <syscall_restart.h>
43 #include <team.h>
44 #include <tls.h>
45 #include <user_runtime.h>
46 #include <user_thread.h>
47 #include <vfs.h>
48 #include <vm/vm.h>
49 #include <vm/VMAddressSpace.h>
50 #include <wait_for_objects.h>
51 
52 #include "TeamThreadTables.h"
53 
54 
55 //#define TRACE_THREAD
56 #ifdef TRACE_THREAD
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 #define THREAD_MAX_MESSAGE_SIZE		65536
64 
65 
66 // #pragma mark - ThreadHashTable
67 
68 
69 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable;
70 
71 
72 // thread list
73 static Thread sIdleThreads[B_MAX_CPU_COUNT];
74 static ThreadHashTable sThreadHash;
75 static spinlock sThreadHashLock = B_SPINLOCK_INITIALIZER;
76 static thread_id sNextThreadID = 2;
77 	// ID 1 is allocated for the kernel by Team::Team() behind our back
78 
79 // some arbitrarily chosen limits -- should probably depend on the available
80 // memory (the limit is not yet enforced)
81 static int32 sMaxThreads = 4096;
82 static int32 sUsedThreads = 0;
83 
84 
85 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
86 	Thread*	thread;
87 	team_id	teamID;
88 
89 	UndertakerEntry(Thread* thread, team_id teamID)
90 		:
91 		thread(thread),
92 		teamID(teamID)
93 	{
94 	}
95 };
96 
97 
98 struct ThreadEntryArguments {
99 	status_t	(*kernelFunction)(void* argument);
100 	void*		argument;
101 	bool		enterUserland;
102 };
103 
104 struct UserThreadEntryArguments : ThreadEntryArguments {
105 	addr_t			userlandEntry;
106 	void*			userlandArgument1;
107 	void*			userlandArgument2;
108 	pthread_t		pthread;
109 	arch_fork_arg*	forkArgs;
110 	uint32			flags;
111 };
112 
113 
114 class ThreadNotificationService : public DefaultNotificationService {
115 public:
116 	ThreadNotificationService()
117 		: DefaultNotificationService("threads")
118 	{
119 	}
120 
121 	void Notify(uint32 eventCode, team_id teamID, thread_id threadID,
122 		Thread* thread = NULL)
123 	{
124 		char eventBuffer[128];
125 		KMessage event;
126 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
127 		event.AddInt32("event", eventCode);
128 		event.AddInt32("team", teamID);
129 		event.AddInt32("thread", threadID);
130 		if (thread != NULL)
131 			event.AddPointer("threadStruct", thread);
132 
133 		DefaultNotificationService::Notify(event, eventCode);
134 	}
135 
136 	void Notify(uint32 eventCode, Thread* thread)
137 	{
138 		return Notify(eventCode, thread->id, thread->team->id, thread);
139 	}
140 };
141 
142 
143 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
144 static ConditionVariable sUndertakerCondition;
145 static ThreadNotificationService sNotificationService;
146 
147 
148 // object cache to allocate thread structures from
149 static object_cache* sThreadCache;
150 
151 
152 // #pragma mark - Thread
153 
154 
155 /*!	Constructs a thread.
156 
157 	\param name The thread's name.
158 	\param threadID The ID to be assigned to the new thread. If
159 		  \code < 0 \endcode a fresh one is allocated.
160 	\param cpu The CPU the thread shall be assigned.
161 */
162 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
163 	:
164 	flags(0),
165 	serial_number(-1),
166 	hash_next(NULL),
167 	team_next(NULL),
168 	queue_next(NULL),
169 	priority(-1),
170 	next_priority(-1),
171 	io_priority(-1),
172 	cpu(cpu),
173 	previous_cpu(NULL),
174 	pinned_to_cpu(0),
175 	sig_block_mask(0),
176 	sigsuspend_original_unblocked_mask(0),
177 	user_signal_context(NULL),
178 	signal_stack_base(0),
179 	signal_stack_size(0),
180 	signal_stack_enabled(false),
181 	in_kernel(true),
182 	was_yielded(false),
183 	user_thread(NULL),
184 	fault_handler(0),
185 	page_faults_allowed(1),
186 	team(NULL),
187 	select_infos(NULL),
188 	kernel_stack_area(-1),
189 	kernel_stack_base(0),
190 	user_stack_area(-1),
191 	user_stack_base(0),
192 	user_local_storage(0),
193 	kernel_errno(0),
194 	user_time(0),
195 	kernel_time(0),
196 	last_time(0),
197 	cpu_clock_offset(0),
198 	post_interrupt_callback(NULL),
199 	post_interrupt_data(NULL)
200 {
201 	id = threadID >= 0 ? threadID : allocate_thread_id();
202 	visible = false;
203 
204 	// init locks
205 	char lockName[32];
206 	snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id);
207 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
208 
209 	B_INITIALIZE_SPINLOCK(&time_lock);
210 
211 	// init name
212 	if (name != NULL)
213 		strlcpy(this->name, name, B_OS_NAME_LENGTH);
214 	else
215 		strcpy(this->name, "unnamed thread");
216 
217 	alarm.period = 0;
218 
219 	exit.status = 0;
220 
221 	list_init(&exit.waiters);
222 
223 	exit.sem = -1;
224 	msg.write_sem = -1;
225 	msg.read_sem = -1;
226 
227 	// add to thread table -- yet invisible
228 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
229 	sThreadHash.Insert(this);
230 }
231 
232 
233 Thread::~Thread()
234 {
235 	// Delete resources that should actually be deleted by the thread itself,
236 	// when it exited, but that might still exist, if the thread was never run.
237 
238 	if (user_stack_area >= 0)
239 		delete_area(user_stack_area);
240 
241 	DeleteUserTimers(false);
242 
243 	// delete the resources, that may remain in either case
244 
245 	if (kernel_stack_area >= 0)
246 		delete_area(kernel_stack_area);
247 
248 	fPendingSignals.Clear();
249 
250 	if (exit.sem >= 0)
251 		delete_sem(exit.sem);
252 	if (msg.write_sem >= 0)
253 		delete_sem(msg.write_sem);
254 	if (msg.read_sem >= 0)
255 		delete_sem(msg.read_sem);
256 
257 	scheduler_on_thread_destroy(this);
258 
259 	mutex_destroy(&fLock);
260 
261 	// remove from thread table
262 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
263 	sThreadHash.Remove(this);
264 }
265 
266 
267 /*static*/ status_t
268 Thread::Create(const char* name, Thread*& _thread)
269 {
270 	Thread* thread = new Thread(name, -1, NULL);
271 	if (thread == NULL)
272 		return B_NO_MEMORY;
273 
274 	status_t error = thread->Init(false);
275 	if (error != B_OK) {
276 		delete thread;
277 		return error;
278 	}
279 
280 	_thread = thread;
281 	return B_OK;
282 }
283 
284 
285 /*static*/ Thread*
286 Thread::Get(thread_id id)
287 {
288 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
289 	Thread* thread = sThreadHash.Lookup(id);
290 	if (thread != NULL)
291 		thread->AcquireReference();
292 	return thread;
293 }
294 
295 
296 /*static*/ Thread*
297 Thread::GetAndLock(thread_id id)
298 {
299 	// look it up and acquire a reference
300 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
301 	Thread* thread = sThreadHash.Lookup(id);
302 	if (thread == NULL)
303 		return NULL;
304 
305 	thread->AcquireReference();
306 	threadHashLocker.Unlock();
307 
308 	// lock and check, if it is still in the hash table
309 	thread->Lock();
310 	threadHashLocker.Lock();
311 
312 	if (sThreadHash.Lookup(id) == thread)
313 		return thread;
314 
315 	threadHashLocker.Unlock();
316 
317 	// nope, the thread is no longer in the hash table
318 	thread->UnlockAndReleaseReference();
319 
320 	return NULL;
321 }
322 
323 
324 /*static*/ Thread*
325 Thread::GetDebug(thread_id id)
326 {
327 	return sThreadHash.Lookup(id, false);
328 }
329 
330 
331 /*static*/ bool
332 Thread::IsAlive(thread_id id)
333 {
334 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
335 	return sThreadHash.Lookup(id) != NULL;
336 }
337 
338 
339 void*
340 Thread::operator new(size_t size)
341 {
342 	return object_cache_alloc(sThreadCache, 0);
343 }
344 
345 
346 void*
347 Thread::operator new(size_t, void* pointer)
348 {
349 	return pointer;
350 }
351 
352 
353 void
354 Thread::operator delete(void* pointer, size_t size)
355 {
356 	object_cache_free(sThreadCache, pointer, 0);
357 }
358 
359 
360 status_t
361 Thread::Init(bool idleThread)
362 {
363 	status_t error = scheduler_on_thread_create(this, idleThread);
364 	if (error != B_OK)
365 		return error;
366 
367 	char temp[64];
368 	sprintf(temp, "thread_%ld_retcode_sem", id);
369 	exit.sem = create_sem(0, temp);
370 	if (exit.sem < 0)
371 		return exit.sem;
372 
373 	sprintf(temp, "%s send", name);
374 	msg.write_sem = create_sem(1, temp);
375 	if (msg.write_sem < 0)
376 		return msg.write_sem;
377 
378 	sprintf(temp, "%s receive", name);
379 	msg.read_sem = create_sem(0, temp);
380 	if (msg.read_sem < 0)
381 		return msg.read_sem;
382 
383 	error = arch_thread_init_thread_struct(this);
384 	if (error != B_OK)
385 		return error;
386 
387 	return B_OK;
388 }
389 
390 
391 /*!	Checks whether the thread is still in the thread hash table.
392 */
393 bool
394 Thread::IsAlive() const
395 {
396 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
397 
398 	return sThreadHash.Lookup(id) != NULL;
399 }
400 
401 
402 void
403 Thread::ResetSignalsOnExec()
404 {
405 	// We are supposed keep the pending signals and the signal mask. Only the
406 	// signal stack, if set, shall be unset.
407 
408 	sigsuspend_original_unblocked_mask = 0;
409 	user_signal_context = NULL;
410 	signal_stack_base = 0;
411 	signal_stack_size = 0;
412 	signal_stack_enabled = false;
413 }
414 
415 
416 /*!	Adds the given user timer to the thread and, if user-defined, assigns it an
417 	ID.
418 
419 	The caller must hold the thread's lock.
420 
421 	\param timer The timer to be added. If it doesn't have an ID yet, it is
422 		considered user-defined and will be assigned an ID.
423 	\return \c B_OK, if the timer was added successfully, another error code
424 		otherwise.
425 */
426 status_t
427 Thread::AddUserTimer(UserTimer* timer)
428 {
429 	// If the timer is user-defined, check timer limit and increment
430 	// user-defined count.
431 	if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer())
432 		return EAGAIN;
433 
434 	fUserTimers.AddTimer(timer);
435 
436 	return B_OK;
437 }
438 
439 
440 /*!	Removes the given user timer from the thread.
441 
442 	The caller must hold the thread's lock.
443 
444 	\param timer The timer to be removed.
445 
446 */
447 void
448 Thread::RemoveUserTimer(UserTimer* timer)
449 {
450 	fUserTimers.RemoveTimer(timer);
451 
452 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
453 		team->UserDefinedTimersRemoved(1);
454 }
455 
456 
457 /*!	Deletes all (or all user-defined) user timers of the thread.
458 
459 	The caller must hold the thread's lock.
460 
461 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
462 		otherwise all timers are deleted.
463 */
464 void
465 Thread::DeleteUserTimers(bool userDefinedOnly)
466 {
467 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
468 	if (count > 0)
469 		team->UserDefinedTimersRemoved(count);
470 }
471 
472 
473 void
474 Thread::DeactivateCPUTimeUserTimers()
475 {
476 	while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head())
477 		timer->Deactivate();
478 }
479 
480 
481 // #pragma mark - ThreadListIterator
482 
483 
484 ThreadListIterator::ThreadListIterator()
485 {
486 	// queue the entry
487 	InterruptsSpinLocker locker(sThreadHashLock);
488 	sThreadHash.InsertIteratorEntry(&fEntry);
489 }
490 
491 
492 ThreadListIterator::~ThreadListIterator()
493 {
494 	// remove the entry
495 	InterruptsSpinLocker locker(sThreadHashLock);
496 	sThreadHash.RemoveIteratorEntry(&fEntry);
497 }
498 
499 
500 Thread*
501 ThreadListIterator::Next()
502 {
503 	// get the next team -- if there is one, get reference for it
504 	InterruptsSpinLocker locker(sThreadHashLock);
505 	Thread* thread = sThreadHash.NextElement(&fEntry);
506 	if (thread != NULL)
507 		thread->AcquireReference();
508 
509 	return thread;
510 }
511 
512 
513 // #pragma mark - ThreadCreationAttributes
514 
515 
516 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function,
517 	const char* name, int32 priority, void* arg, team_id team,
518 	Thread* thread)
519 {
520 	this->entry = NULL;
521 	this->name = name;
522 	this->priority = priority;
523 	this->args1 = NULL;
524 	this->args2 = NULL;
525 	this->stack_address = NULL;
526 	this->stack_size = 0;
527 	this->pthread = NULL;
528 	this->flags = 0;
529 	this->team = team >= 0 ? team : team_get_kernel_team()->id;
530 	this->thread = thread;
531 	this->signal_mask = 0;
532 	this->additional_stack_size = 0;
533 	this->kernelEntry = function;
534 	this->kernelArgument = arg;
535 	this->forkArgs = NULL;
536 }
537 
538 
539 /*!	Initializes the structure from a userland structure.
540 	\param userAttributes The userland structure (must be a userland address).
541 	\param nameBuffer A character array of at least size B_OS_NAME_LENGTH,
542 		which will be used for the \c name field, if the userland structure has
543 		a name. The buffer must remain valid as long as this structure is in
544 		use afterwards (or until it is reinitialized).
545 	\return \c B_OK, if the initialization went fine, another error code
546 		otherwise.
547 */
548 status_t
549 ThreadCreationAttributes::InitFromUserAttributes(
550 	const thread_creation_attributes* userAttributes, char* nameBuffer)
551 {
552 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
553 		|| user_memcpy((thread_creation_attributes*)this, userAttributes,
554 				sizeof(thread_creation_attributes)) != B_OK) {
555 		return B_BAD_ADDRESS;
556 	}
557 
558 	if (stack_size != 0
559 		&& (stack_size < MIN_USER_STACK_SIZE
560 			|| stack_size > MAX_USER_STACK_SIZE)) {
561 		return B_BAD_VALUE;
562 	}
563 
564 	if (entry == NULL || !IS_USER_ADDRESS(entry)
565 		|| (stack_address != NULL && !IS_USER_ADDRESS(stack_address))
566 		|| (name != NULL && (!IS_USER_ADDRESS(name)
567 			|| user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) {
568 		return B_BAD_ADDRESS;
569 	}
570 
571 	name = name != NULL ? nameBuffer : "user thread";
572 
573 	// kernel only attributes (not in thread_creation_attributes):
574 	Thread* currentThread = thread_get_current_thread();
575 	team = currentThread->team->id;
576 	thread = NULL;
577 	signal_mask = currentThread->sig_block_mask;
578 		// inherit the current thread's signal mask
579 	additional_stack_size = 0;
580 	kernelEntry = NULL;
581 	kernelArgument = NULL;
582 	forkArgs = NULL;
583 
584 	return B_OK;
585 }
586 
587 
588 // #pragma mark - private functions
589 
590 
591 /*!	Inserts a thread into a team.
592 	The caller must hold the team's lock, the thread's lock, and the scheduler
593 	lock.
594 */
595 static void
596 insert_thread_into_team(Team *team, Thread *thread)
597 {
598 	thread->team_next = team->thread_list;
599 	team->thread_list = thread;
600 	team->num_threads++;
601 
602 	if (team->num_threads == 1) {
603 		// this was the first thread
604 		team->main_thread = thread;
605 	}
606 	thread->team = team;
607 }
608 
609 
610 /*!	Removes a thread from a team.
611 	The caller must hold the team's lock, the thread's lock, and the scheduler
612 	lock.
613 */
614 static void
615 remove_thread_from_team(Team *team, Thread *thread)
616 {
617 	Thread *temp, *last = NULL;
618 
619 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
620 		if (temp == thread) {
621 			if (last == NULL)
622 				team->thread_list = temp->team_next;
623 			else
624 				last->team_next = temp->team_next;
625 
626 			team->num_threads--;
627 			break;
628 		}
629 		last = temp;
630 	}
631 }
632 
633 
634 static status_t
635 enter_userspace(Thread* thread, UserThreadEntryArguments* args)
636 {
637 	status_t error = arch_thread_init_tls(thread);
638 	if (error != B_OK) {
639 		dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32
640 			")\n", thread->name, thread->id);
641 		free(args->forkArgs);
642 		return error;
643 	}
644 
645 	user_debug_update_new_thread_flags(thread);
646 
647 	// init the thread's user_thread
648 	user_thread* userThread = thread->user_thread;
649 	userThread->pthread = args->pthread;
650 	userThread->flags = 0;
651 	userThread->wait_status = B_OK;
652 	userThread->defer_signals
653 		= (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0;
654 	userThread->pending_signals = 0;
655 
656 	if (args->forkArgs != NULL) {
657 		// This is a fork()ed thread. Copy the fork args onto the stack and
658 		// free them.
659 		arch_fork_arg archArgs = *args->forkArgs;
660 		free(args->forkArgs);
661 
662 		arch_restore_fork_frame(&archArgs);
663 			// this one won't return here
664 		return B_ERROR;
665 	}
666 
667 	// Jump to the entry point in user space. Only returns, if something fails.
668 	return arch_thread_enter_userspace(thread, args->userlandEntry,
669 		args->userlandArgument1, args->userlandArgument2);
670 }
671 
672 
673 status_t
674 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
675 	void* argument1, void* argument2)
676 {
677 	UserThreadEntryArguments entryArgs;
678 	entryArgs.kernelFunction = NULL;
679 	entryArgs.argument = NULL;
680 	entryArgs.enterUserland = true;
681 	entryArgs.userlandEntry = (addr_t)entryFunction;
682 	entryArgs.userlandArgument1 = argument1;
683 	entryArgs.userlandArgument2 = argument2;
684 	entryArgs.pthread = NULL;
685 	entryArgs.forkArgs = NULL;
686 	entryArgs.flags = 0;
687 
688 	return enter_userspace(thread, &entryArgs);
689 }
690 
691 
692 static void
693 common_thread_entry(void* _args)
694 {
695 	Thread* thread = thread_get_current_thread();
696 
697 	// The thread is new and has been scheduled the first time.
698 
699 	// start CPU time based user timers
700 	if (thread->HasActiveCPUTimeUserTimers()
701 		|| thread->team->HasActiveCPUTimeUserTimers()) {
702 		user_timer_continue_cpu_timers(thread, thread->cpu->previous_thread);
703 	}
704 
705 	// notify the user debugger code
706 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
707 		user_debug_thread_scheduled(thread);
708 
709 	// start tracking time
710 	thread->last_time = system_time();
711 
712 	// unlock the scheduler lock and enable interrupts
713 	release_spinlock(&gSchedulerLock);
714 	enable_interrupts();
715 
716 	// call the kernel function, if any
717 	ThreadEntryArguments* args = (ThreadEntryArguments*)_args;
718 	if (args->kernelFunction != NULL)
719 		args->kernelFunction(args->argument);
720 
721 	// If requested, enter userland, now.
722 	if (args->enterUserland) {
723 		enter_userspace(thread, (UserThreadEntryArguments*)args);
724 			// only returns or error
725 
726 		// If that's the team's main thread, init the team exit info.
727 		if (thread == thread->team->main_thread)
728 			team_init_exit_info_on_error(thread->team);
729 	}
730 
731 	// we're done
732 	thread_exit();
733 }
734 
735 
736 /*!	Prepares the given thread's kernel stack for executing its entry function.
737 
738 	The data pointed to by \a data of size \a dataSize are copied to the
739 	thread's kernel stack. A pointer to the copy's data is passed to the entry
740 	function. The entry function is common_thread_entry().
741 
742 	\param thread The thread.
743 	\param data Pointer to data to be copied to the thread's stack and passed
744 		to the entry function.
745 	\param dataSize The size of \a data.
746  */
747 static void
748 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize)
749 {
750 	uint8* stack = (uint8*)thread->kernel_stack_base;
751 	uint8* stackTop = (uint8*)thread->kernel_stack_top;
752 
753 	// clear (or rather invalidate) the kernel stack contents, if compiled with
754 	// debugging
755 #if KDEBUG > 0
756 #	if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS)
757 	memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc,
758 		KERNEL_STACK_SIZE);
759 #	else
760 	memset(stack, 0xcc, KERNEL_STACK_SIZE);
761 #	endif
762 #endif
763 
764 	// copy the data onto the stack, with 16-byte alignment to be on the safe
765 	// side
766 	void* clonedData;
767 #ifdef STACK_GROWS_DOWNWARDS
768 	clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16);
769 	stackTop = (uint8*)clonedData;
770 #else
771 	clonedData = (void*)ROUNDUP((addr_t)stack, 16);
772 	stack = (uint8*)clonedData + ROUNDUP(dataSize, 16);
773 #endif
774 
775 	memcpy(clonedData, data, dataSize);
776 
777 	arch_thread_init_kthread_stack(thread, stack, stackTop,
778 		&common_thread_entry, clonedData);
779 }
780 
781 
782 static status_t
783 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase,
784 	size_t stackSize, size_t additionalSize, char* nameBuffer)
785 {
786 	area_id stackArea = -1;
787 	uint8* stackBase = (uint8*)_stackBase;
788 
789 	if (stackBase != NULL) {
790 		// A stack has been specified. It must be large enough to hold the
791 		// TLS space at least.
792 		STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE);
793 		if (stackSize < MIN_USER_STACK_SIZE)
794 			return B_BAD_VALUE;
795 
796 		stackBase -= TLS_SIZE;
797 	}
798 
799 	if (stackBase == NULL) {
800 		// No user-defined stack -- allocate one. For non-main threads the stack
801 		// will be between USER_STACK_REGION and the main thread stack area. For
802 		// a main thread the position is fixed.
803 
804 		if (stackSize == 0) {
805 			// Use the default size (a different one for a main thread).
806 			stackSize = thread->id == team->id
807 				? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE;
808 		} else {
809 			// Verify that the given stack size is large enough.
810 			if (stackSize < MIN_USER_STACK_SIZE - TLS_SIZE)
811 				return B_BAD_VALUE;
812 
813 			stackSize = PAGE_ALIGN(stackSize);
814 		}
815 		stackSize += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
816 
817 		size_t areaSize = PAGE_ALIGN(stackSize + TLS_SIZE + additionalSize);
818 
819 		snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%ld_stack", thread->name,
820 			thread->id);
821 
822 		virtual_address_restrictions virtualRestrictions = {};
823 		if (thread->id == team->id) {
824 			// The main thread gets a fixed position at the top of the stack
825 			// address range.
826 			stackBase = (uint8*)(USER_STACK_REGION + USER_STACK_REGION_SIZE
827 				- areaSize);
828 			virtualRestrictions.address_specification = B_EXACT_ADDRESS;
829 
830 		} else {
831 			// not a main thread
832 			stackBase = (uint8*)(addr_t)USER_STACK_REGION;
833 			virtualRestrictions.address_specification = B_BASE_ADDRESS;
834 		}
835 		virtualRestrictions.address = (void*)stackBase;
836 
837 		physical_address_restrictions physicalRestrictions = {};
838 
839 		stackArea = create_area_etc(team->id, nameBuffer,
840 			areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA,
841 			0, &virtualRestrictions, &physicalRestrictions,
842 			(void**)&stackBase);
843 		if (stackArea < 0)
844 			return stackArea;
845 	}
846 
847 	// set the stack
848 	ThreadLocker threadLocker(thread);
849 	thread->user_stack_base = (addr_t)stackBase;
850 	thread->user_stack_size = stackSize;
851 	thread->user_stack_area = stackArea;
852 
853 	return B_OK;
854 }
855 
856 
857 status_t
858 thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
859 	size_t stackSize, size_t additionalSize)
860 {
861 	char nameBuffer[B_OS_NAME_LENGTH];
862 	return create_thread_user_stack(team, thread, stackBase, stackSize,
863 		additionalSize, nameBuffer);
864 }
865 
866 
867 /*!	Creates a new thread.
868 
869 	\param attributes The thread creation attributes, specifying the team in
870 		which to create the thread, as well as a whole bunch of other arguments.
871 	\param kernel \c true, if a kernel-only thread shall be created, \c false,
872 		if the thread shall also be able to run in userland.
873 	\return The ID of the newly created thread (>= 0) or an error code on
874 		failure.
875 */
876 thread_id
877 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
878 {
879 	status_t status = B_OK;
880 
881 	TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name,
882 		attributes.thread, kernel ? "kernel" : "user"));
883 
884 	// get the team
885 	Team* team = Team::Get(attributes.team);
886 	if (team == NULL)
887 		return B_BAD_TEAM_ID;
888 	BReference<Team> teamReference(team, true);
889 
890 	// If a thread object is given, acquire a reference to it, otherwise create
891 	// a new thread object with the given attributes.
892 	Thread* thread = attributes.thread;
893 	if (thread != NULL) {
894 		thread->AcquireReference();
895 	} else {
896 		status = Thread::Create(attributes.name, thread);
897 		if (status != B_OK)
898 			return status;
899 	}
900 	BReference<Thread> threadReference(thread, true);
901 
902 	thread->team = team;
903 		// set already, so, if something goes wrong, the team pointer is
904 		// available for deinitialization
905 	thread->priority = attributes.priority == -1
906 		? B_NORMAL_PRIORITY : attributes.priority;
907 	thread->next_priority = thread->priority;
908 	thread->state = B_THREAD_SUSPENDED;
909 	thread->next_state = B_THREAD_SUSPENDED;
910 
911 	thread->sig_block_mask = attributes.signal_mask;
912 
913 	// init debug structure
914 	init_thread_debug_info(&thread->debug_info);
915 
916 	// create the kernel stack
917 	char stackName[B_OS_NAME_LENGTH];
918 	snprintf(stackName, B_OS_NAME_LENGTH, "%s_%ld_kstack", thread->name,
919 		thread->id);
920 	thread->kernel_stack_area = create_area(stackName,
921 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
922 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
923 		B_FULL_LOCK,
924 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
925 
926 	if (thread->kernel_stack_area < 0) {
927 		// we're not yet part of a team, so we can just bail out
928 		status = thread->kernel_stack_area;
929 
930 		dprintf("create_thread: error creating kernel stack: %s!\n",
931 			strerror(status));
932 
933 		return status;
934 	}
935 
936 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
937 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
938 
939 	if (kernel) {
940 		// Init the thread's kernel stack. It will start executing
941 		// common_thread_entry() with the arguments we prepare here.
942 		ThreadEntryArguments entryArgs;
943 		entryArgs.kernelFunction = attributes.kernelEntry;
944 		entryArgs.argument = attributes.kernelArgument;
945 		entryArgs.enterUserland = false;
946 
947 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
948 	} else {
949 		// create the userland stack, if the thread doesn't have one yet
950 		if (thread->user_stack_base == 0) {
951 			status = create_thread_user_stack(team, thread,
952 				attributes.stack_address, attributes.stack_size,
953 				attributes.additional_stack_size, stackName);
954 			if (status != B_OK)
955 				return status;
956 		}
957 
958 		// Init the thread's kernel stack. It will start executing
959 		// common_thread_entry() with the arguments we prepare here.
960 		UserThreadEntryArguments entryArgs;
961 		entryArgs.kernelFunction = attributes.kernelEntry;
962 		entryArgs.argument = attributes.kernelArgument;
963 		entryArgs.enterUserland = true;
964 		entryArgs.userlandEntry = (addr_t)attributes.entry;
965 		entryArgs.userlandArgument1 = attributes.args1;
966 		entryArgs.userlandArgument2 = attributes.args2;
967 		entryArgs.pthread = attributes.pthread;
968 		entryArgs.forkArgs = attributes.forkArgs;
969 		entryArgs.flags = attributes.flags;
970 
971 		init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
972 
973 		// create the pre-defined thread timers
974 		status = user_timer_create_thread_timers(team, thread);
975 		if (status != B_OK)
976 			return status;
977 	}
978 
979 	// lock the team and see, if it is still alive
980 	TeamLocker teamLocker(team);
981 	if (team->state >= TEAM_STATE_SHUTDOWN)
982 		return B_BAD_TEAM_ID;
983 
984 	bool debugNewThread = false;
985 	if (!kernel) {
986 		// allocate the user_thread structure, if not already allocated
987 		if (thread->user_thread == NULL) {
988 			thread->user_thread = team_allocate_user_thread(team);
989 			if (thread->user_thread == NULL)
990 				return B_NO_MEMORY;
991 		}
992 
993 		// If the new thread belongs to the same team as the current thread, it
994 		// may inherit some of the thread debug flags.
995 		Thread* currentThread = thread_get_current_thread();
996 		if (currentThread != NULL && currentThread->team == team) {
997 			// inherit all user flags...
998 			int32 debugFlags = atomic_get(&currentThread->debug_info.flags)
999 				& B_THREAD_DEBUG_USER_FLAG_MASK;
1000 
1001 			// ... save the syscall tracing flags, unless explicitely specified
1002 			if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
1003 				debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
1004 					| B_THREAD_DEBUG_POST_SYSCALL);
1005 			}
1006 
1007 			thread->debug_info.flags = debugFlags;
1008 
1009 			// stop the new thread, if desired
1010 			debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
1011 		}
1012 	}
1013 
1014 	// We're going to make the thread live, now. The thread itself will take
1015 	// over a reference to its Thread object. We acquire another reference for
1016 	// our own use (and threadReference remains armed).
1017 	thread->AcquireReference();
1018 
1019 	ThreadLocker threadLocker(thread);
1020 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1021 	SpinLocker threadHashLocker(sThreadHashLock);
1022 
1023 	// make thread visible in global hash/list
1024 	thread->visible = true;
1025 	sUsedThreads++;
1026 	scheduler_on_thread_init(thread);
1027 
1028 	// Debug the new thread, if the parent thread required that (see above),
1029 	// or the respective global team debug flag is set. But only, if a
1030 	// debugger is installed for the team.
1031 	if (!kernel) {
1032 		int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1033 		debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0;
1034 		if (debugNewThread
1035 			&& (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) {
1036 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
1037 		}
1038 	}
1039 
1040 	// insert thread into team
1041 	insert_thread_into_team(team, thread);
1042 
1043 	threadHashLocker.Unlock();
1044 	schedulerLocker.Unlock();
1045 	threadLocker.Unlock();
1046 	teamLocker.Unlock();
1047 
1048 	// notify listeners
1049 	sNotificationService.Notify(THREAD_ADDED, thread);
1050 
1051 	return thread->id;
1052 }
1053 
1054 
1055 static status_t
1056 undertaker(void* /*args*/)
1057 {
1058 	while (true) {
1059 		// wait for a thread to bury
1060 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1061 
1062 		while (sUndertakerEntries.IsEmpty()) {
1063 			ConditionVariableEntry conditionEntry;
1064 			sUndertakerCondition.Add(&conditionEntry);
1065 			schedulerLocker.Unlock();
1066 
1067 			conditionEntry.Wait();
1068 
1069 			schedulerLocker.Lock();
1070 		}
1071 
1072 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
1073 		schedulerLocker.Unlock();
1074 
1075 		UndertakerEntry entry = *_entry;
1076 			// we need a copy, since the original entry is on the thread's stack
1077 
1078 		// we've got an entry
1079 		Thread* thread = entry.thread;
1080 
1081 		// remove this thread from from the kernel team -- this makes it
1082 		// unaccessible
1083 		Team* kernelTeam = team_get_kernel_team();
1084 		TeamLocker kernelTeamLocker(kernelTeam);
1085 		thread->Lock();
1086 		schedulerLocker.Lock();
1087 
1088 		remove_thread_from_team(kernelTeam, thread);
1089 
1090 		schedulerLocker.Unlock();
1091 		kernelTeamLocker.Unlock();
1092 
1093 		// free the thread structure
1094 		thread->UnlockAndReleaseReference();
1095 	}
1096 
1097 	// can never get here
1098 	return B_OK;
1099 }
1100 
1101 
1102 /*!	Returns the semaphore the thread is currently waiting on.
1103 
1104 	The return value is purely informative.
1105 	The caller must hold the scheduler lock.
1106 
1107 	\param thread The thread.
1108 	\return The ID of the semaphore the thread is currently waiting on or \c -1,
1109 		if it isn't waiting on a semaphore.
1110 */
1111 static sem_id
1112 get_thread_wait_sem(Thread* thread)
1113 {
1114 	if (thread->state == B_THREAD_WAITING
1115 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
1116 		return (sem_id)(addr_t)thread->wait.object;
1117 	}
1118 	return -1;
1119 }
1120 
1121 
1122 /*!	Fills the thread_info structure with information from the specified thread.
1123 	The caller must hold the thread's lock and the scheduler lock.
1124 */
1125 static void
1126 fill_thread_info(Thread *thread, thread_info *info, size_t size)
1127 {
1128 	info->thread = thread->id;
1129 	info->team = thread->team->id;
1130 
1131 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
1132 
1133 	info->sem = -1;
1134 
1135 	if (thread->state == B_THREAD_WAITING) {
1136 		info->state = B_THREAD_WAITING;
1137 
1138 		switch (thread->wait.type) {
1139 			case THREAD_BLOCK_TYPE_SNOOZE:
1140 				info->state = B_THREAD_ASLEEP;
1141 				break;
1142 
1143 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1144 			{
1145 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1146 				if (sem == thread->msg.read_sem)
1147 					info->state = B_THREAD_RECEIVING;
1148 				else
1149 					info->sem = sem;
1150 				break;
1151 			}
1152 
1153 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1154 			default:
1155 				break;
1156 		}
1157 	} else
1158 		info->state = (thread_state)thread->state;
1159 
1160 	info->priority = thread->priority;
1161 	info->stack_base = (void *)thread->user_stack_base;
1162 	info->stack_end = (void *)(thread->user_stack_base
1163 		+ thread->user_stack_size);
1164 
1165 	InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1166 	info->user_time = thread->user_time;
1167 	info->kernel_time = thread->kernel_time;
1168 }
1169 
1170 
1171 static status_t
1172 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
1173 	int32 flags)
1174 {
1175 	// get the thread
1176 	Thread *target = Thread::Get(id);
1177 	if (target == NULL)
1178 		return B_BAD_THREAD_ID;
1179 	BReference<Thread> targetReference(target, true);
1180 
1181 	// get the write semaphore
1182 	ThreadLocker targetLocker(target);
1183 	sem_id cachedSem = target->msg.write_sem;
1184 	targetLocker.Unlock();
1185 
1186 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
1187 		return B_NO_MEMORY;
1188 
1189 	status_t status = acquire_sem_etc(cachedSem, 1, flags, 0);
1190 	if (status == B_INTERRUPTED) {
1191 		// we got interrupted by a signal
1192 		return status;
1193 	}
1194 	if (status != B_OK) {
1195 		// Any other acquisition problems may be due to thread deletion
1196 		return B_BAD_THREAD_ID;
1197 	}
1198 
1199 	void* data;
1200 	if (bufferSize > 0) {
1201 		data = malloc(bufferSize);
1202 		if (data == NULL)
1203 			return B_NO_MEMORY;
1204 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
1205 			free(data);
1206 			return B_BAD_DATA;
1207 		}
1208 	} else
1209 		data = NULL;
1210 
1211 	targetLocker.Lock();
1212 
1213 	// The target thread could have been deleted at this point.
1214 	if (!target->IsAlive()) {
1215 		targetLocker.Unlock();
1216 		free(data);
1217 		return B_BAD_THREAD_ID;
1218 	}
1219 
1220 	// Save message informations
1221 	target->msg.sender = thread_get_current_thread()->id;
1222 	target->msg.code = code;
1223 	target->msg.size = bufferSize;
1224 	target->msg.buffer = data;
1225 	cachedSem = target->msg.read_sem;
1226 
1227 	targetLocker.Unlock();
1228 
1229 	release_sem(cachedSem);
1230 	return B_OK;
1231 }
1232 
1233 
1234 static int32
1235 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
1236 	int32 flags)
1237 {
1238 	Thread *thread = thread_get_current_thread();
1239 	size_t size;
1240 	int32 code;
1241 
1242 	status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
1243 	if (status != B_OK) {
1244 		// Actually, we're not supposed to return error codes
1245 		// but since the only reason this can fail is that we
1246 		// were killed, it's probably okay to do so (but also
1247 		// meaningless).
1248 		return status;
1249 	}
1250 
1251 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
1252 		size = min_c(bufferSize, thread->msg.size);
1253 		status = user_memcpy(buffer, thread->msg.buffer, size);
1254 		if (status != B_OK) {
1255 			free(thread->msg.buffer);
1256 			release_sem(thread->msg.write_sem);
1257 			return status;
1258 		}
1259 	}
1260 
1261 	*_sender = thread->msg.sender;
1262 	code = thread->msg.code;
1263 
1264 	free(thread->msg.buffer);
1265 	release_sem(thread->msg.write_sem);
1266 
1267 	return code;
1268 }
1269 
1270 
1271 static status_t
1272 common_getrlimit(int resource, struct rlimit * rlp)
1273 {
1274 	if (!rlp)
1275 		return B_BAD_ADDRESS;
1276 
1277 	switch (resource) {
1278 		case RLIMIT_NOFILE:
1279 		case RLIMIT_NOVMON:
1280 			return vfs_getrlimit(resource, rlp);
1281 
1282 		case RLIMIT_CORE:
1283 			rlp->rlim_cur = 0;
1284 			rlp->rlim_max = 0;
1285 			return B_OK;
1286 
1287 		case RLIMIT_STACK:
1288 		{
1289 			Thread *thread = thread_get_current_thread();
1290 			rlp->rlim_cur = thread->user_stack_size;
1291 			rlp->rlim_max = thread->user_stack_size;
1292 			return B_OK;
1293 		}
1294 
1295 		default:
1296 			return EINVAL;
1297 	}
1298 
1299 	return B_OK;
1300 }
1301 
1302 
1303 static status_t
1304 common_setrlimit(int resource, const struct rlimit * rlp)
1305 {
1306 	if (!rlp)
1307 		return B_BAD_ADDRESS;
1308 
1309 	switch (resource) {
1310 		case RLIMIT_NOFILE:
1311 		case RLIMIT_NOVMON:
1312 			return vfs_setrlimit(resource, rlp);
1313 
1314 		case RLIMIT_CORE:
1315 			// We don't support core file, so allow settings to 0/0 only.
1316 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
1317 				return EINVAL;
1318 			return B_OK;
1319 
1320 		default:
1321 			return EINVAL;
1322 	}
1323 
1324 	return B_OK;
1325 }
1326 
1327 
1328 static status_t
1329 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags,
1330 	bigtime_t* _remainingTime)
1331 {
1332 	switch (clockID) {
1333 		case CLOCK_REALTIME:
1334 			// make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall
1335 			// through
1336 			flags |= B_TIMEOUT_REAL_TIME_BASE;
1337 		case CLOCK_MONOTONIC:
1338 		{
1339 			// Store the start time, for the case that we get interrupted and
1340 			// need to return the remaining time. For absolute timeouts we can
1341 			// still get he time later, if needed.
1342 			bigtime_t startTime
1343 				= _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0
1344 					? system_time() : 0;
1345 
1346 			Thread* thread = thread_get_current_thread();
1347 
1348 			InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1349 
1350 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE,
1351 				NULL);
1352 			status_t status = thread_block_with_timeout_locked(flags, timeout);
1353 
1354 			if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
1355 				return B_OK;
1356 
1357 			// If interrupted, compute the remaining time, if requested.
1358 			if (status == B_INTERRUPTED && _remainingTime != NULL) {
1359 				if ((flags & B_RELATIVE_TIMEOUT) != 0) {
1360 					*_remainingTime = std::max(
1361 						startTime + timeout - system_time(), (bigtime_t)0);
1362 				} else {
1363 					bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0
1364 						? real_time_clock_usecs() : system_time();
1365 					*_remainingTime = std::max(timeout - now, (bigtime_t)0);
1366 				}
1367 			}
1368 
1369 			return status;
1370 		}
1371 
1372 		case CLOCK_THREAD_CPUTIME_ID:
1373 			// Waiting for ourselves to do something isn't particularly
1374 			// productive.
1375 			return B_BAD_VALUE;
1376 
1377 		case CLOCK_PROCESS_CPUTIME_ID:
1378 		default:
1379 			// We don't have to support those, but we are allowed to. Could be
1380 			// done be creating a UserTimer on the fly with a custom UserEvent
1381 			// that would just wake us up.
1382 			return ENOTSUP;
1383 	}
1384 }
1385 
1386 
1387 //	#pragma mark - debugger calls
1388 
1389 
1390 static int
1391 make_thread_unreal(int argc, char **argv)
1392 {
1393 	int32 id = -1;
1394 
1395 	if (argc > 2) {
1396 		print_debugger_command_usage(argv[0]);
1397 		return 0;
1398 	}
1399 
1400 	if (argc > 1)
1401 		id = strtoul(argv[1], NULL, 0);
1402 
1403 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1404 			Thread* thread = it.Next();) {
1405 		if (id != -1 && thread->id != id)
1406 			continue;
1407 
1408 		if (thread->priority > B_DISPLAY_PRIORITY) {
1409 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
1410 			kprintf("thread %ld made unreal\n", thread->id);
1411 		}
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 
1418 static int
1419 set_thread_prio(int argc, char **argv)
1420 {
1421 	int32 id;
1422 	int32 prio;
1423 
1424 	if (argc > 3 || argc < 2) {
1425 		print_debugger_command_usage(argv[0]);
1426 		return 0;
1427 	}
1428 
1429 	prio = strtoul(argv[1], NULL, 0);
1430 	if (prio > THREAD_MAX_SET_PRIORITY)
1431 		prio = THREAD_MAX_SET_PRIORITY;
1432 	if (prio < THREAD_MIN_SET_PRIORITY)
1433 		prio = THREAD_MIN_SET_PRIORITY;
1434 
1435 	if (argc > 2)
1436 		id = strtoul(argv[2], NULL, 0);
1437 	else
1438 		id = thread_get_current_thread()->id;
1439 
1440 	bool found = false;
1441 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1442 			Thread* thread = it.Next();) {
1443 		if (thread->id != id)
1444 			continue;
1445 		thread->priority = thread->next_priority = prio;
1446 		kprintf("thread %ld set to priority %ld\n", id, prio);
1447 		found = true;
1448 		break;
1449 	}
1450 	if (!found)
1451 		kprintf("thread %ld (%#lx) not found\n", id, id);
1452 
1453 	return 0;
1454 }
1455 
1456 
1457 static int
1458 make_thread_suspended(int argc, char **argv)
1459 {
1460 	int32 id;
1461 
1462 	if (argc > 2) {
1463 		print_debugger_command_usage(argv[0]);
1464 		return 0;
1465 	}
1466 
1467 	if (argc == 1)
1468 		id = thread_get_current_thread()->id;
1469 	else
1470 		id = strtoul(argv[1], NULL, 0);
1471 
1472 	bool found = false;
1473 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1474 			Thread* thread = it.Next();) {
1475 		if (thread->id != id)
1476 			continue;
1477 
1478 		thread->next_state = B_THREAD_SUSPENDED;
1479 		kprintf("thread %ld suspended\n", id);
1480 		found = true;
1481 		break;
1482 	}
1483 	if (!found)
1484 		kprintf("thread %ld (%#lx) not found\n", id, id);
1485 
1486 	return 0;
1487 }
1488 
1489 
1490 static int
1491 make_thread_resumed(int argc, char **argv)
1492 {
1493 	int32 id;
1494 
1495 	if (argc != 2) {
1496 		print_debugger_command_usage(argv[0]);
1497 		return 0;
1498 	}
1499 
1500 	// force user to enter a thread id, as using
1501 	// the current thread is usually not intended
1502 	id = strtoul(argv[1], NULL, 0);
1503 
1504 	bool found = false;
1505 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1506 			Thread* thread = it.Next();) {
1507 		if (thread->id != id)
1508 			continue;
1509 
1510 		if (thread->state == B_THREAD_SUSPENDED) {
1511 			scheduler_enqueue_in_run_queue(thread);
1512 			kprintf("thread %ld resumed\n", thread->id);
1513 		}
1514 		found = true;
1515 		break;
1516 	}
1517 	if (!found)
1518 		kprintf("thread %ld (%#lx) not found\n", id, id);
1519 
1520 	return 0;
1521 }
1522 
1523 
1524 static int
1525 drop_into_debugger(int argc, char **argv)
1526 {
1527 	status_t err;
1528 	int32 id;
1529 
1530 	if (argc > 2) {
1531 		print_debugger_command_usage(argv[0]);
1532 		return 0;
1533 	}
1534 
1535 	if (argc == 1)
1536 		id = thread_get_current_thread()->id;
1537 	else
1538 		id = strtoul(argv[1], NULL, 0);
1539 
1540 	err = _user_debug_thread(id);
1541 		// TODO: This is a non-trivial syscall doing some locking, so this is
1542 		// really nasty and may go seriously wrong.
1543 	if (err)
1544 		kprintf("drop failed\n");
1545 	else
1546 		kprintf("thread %ld dropped into user debugger\n", id);
1547 
1548 	return 0;
1549 }
1550 
1551 
1552 /*!	Returns a user-readable string for a thread state.
1553 	Only for use in the kernel debugger.
1554 */
1555 static const char *
1556 state_to_text(Thread *thread, int32 state)
1557 {
1558 	switch (state) {
1559 		case B_THREAD_READY:
1560 			return "ready";
1561 
1562 		case B_THREAD_RUNNING:
1563 			return "running";
1564 
1565 		case B_THREAD_WAITING:
1566 		{
1567 			if (thread != NULL) {
1568 				switch (thread->wait.type) {
1569 					case THREAD_BLOCK_TYPE_SNOOZE:
1570 						return "zzz";
1571 
1572 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1573 					{
1574 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1575 						if (sem == thread->msg.read_sem)
1576 							return "receive";
1577 						break;
1578 					}
1579 				}
1580 			}
1581 
1582 			return "waiting";
1583 		}
1584 
1585 		case B_THREAD_SUSPENDED:
1586 			return "suspended";
1587 
1588 		case THREAD_STATE_FREE_ON_RESCHED:
1589 			return "death";
1590 
1591 		default:
1592 			return "UNKNOWN";
1593 	}
1594 }
1595 
1596 
1597 static void
1598 print_thread_list_table_head()
1599 {
1600 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1601 		"  team  name\n");
1602 }
1603 
1604 
1605 static void
1606 _dump_thread_info(Thread *thread, bool shortInfo)
1607 {
1608 	if (shortInfo) {
1609 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1610 			thread->state));
1611 
1612 		// does it block on a semaphore or a condition variable?
1613 		if (thread->state == B_THREAD_WAITING) {
1614 			switch (thread->wait.type) {
1615 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1616 				{
1617 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1618 					if (sem == thread->msg.read_sem)
1619 						kprintf("                    ");
1620 					else
1621 						kprintf("sem  %12ld   ", sem);
1622 					break;
1623 				}
1624 
1625 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1626 					kprintf("cvar   %p   ", thread->wait.object);
1627 					break;
1628 
1629 				case THREAD_BLOCK_TYPE_SNOOZE:
1630 					kprintf("                    ");
1631 					break;
1632 
1633 				case THREAD_BLOCK_TYPE_SIGNAL:
1634 					kprintf("signal              ");
1635 					break;
1636 
1637 				case THREAD_BLOCK_TYPE_MUTEX:
1638 					kprintf("mutex  %p   ", thread->wait.object);
1639 					break;
1640 
1641 				case THREAD_BLOCK_TYPE_RW_LOCK:
1642 					kprintf("rwlock %p   ", thread->wait.object);
1643 					break;
1644 
1645 				case THREAD_BLOCK_TYPE_OTHER:
1646 					kprintf("other               ");
1647 					break;
1648 
1649 				default:
1650 					kprintf("???    %p   ", thread->wait.object);
1651 					break;
1652 			}
1653 		} else
1654 			kprintf("        -           ");
1655 
1656 		// on which CPU does it run?
1657 		if (thread->cpu)
1658 			kprintf("%2d", thread->cpu->cpu_num);
1659 		else
1660 			kprintf(" -");
1661 
1662 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1663 			(void *)thread->kernel_stack_base, thread->team->id,
1664 			thread->name != NULL ? thread->name : "<NULL>");
1665 
1666 		return;
1667 	}
1668 
1669 	// print the long info
1670 
1671 	struct thread_death_entry *death = NULL;
1672 
1673 	kprintf("THREAD: %p\n", thread);
1674 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1675 	kprintf("serial_number:      %" B_PRId64 "\n", thread->serial_number);
1676 	kprintf("name:               \"%s\"\n", thread->name);
1677 	kprintf("hash_next:          %p\nteam_next:          %p\nq_next:             %p\n",
1678 		thread->hash_next, thread->team_next, thread->queue_next);
1679 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1680 		thread->next_priority, thread->io_priority);
1681 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1682 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1683 	kprintf("cpu:                %p ", thread->cpu);
1684 	if (thread->cpu)
1685 		kprintf("(%d)\n", thread->cpu->cpu_num);
1686 	else
1687 		kprintf("\n");
1688 	kprintf("sig_pending:        %#llx (blocked: %#llx"
1689 		", before sigsuspend(): %#llx)\n",
1690 		(long long)thread->ThreadPendingSignals(),
1691 		(long long)thread->sig_block_mask,
1692 		(long long)thread->sigsuspend_original_unblocked_mask);
1693 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1694 
1695 	if (thread->state == B_THREAD_WAITING) {
1696 		kprintf("waiting for:        ");
1697 
1698 		switch (thread->wait.type) {
1699 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1700 			{
1701 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1702 				if (sem == thread->msg.read_sem)
1703 					kprintf("data\n");
1704 				else
1705 					kprintf("semaphore %ld\n", sem);
1706 				break;
1707 			}
1708 
1709 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1710 				kprintf("condition variable %p\n", thread->wait.object);
1711 				break;
1712 
1713 			case THREAD_BLOCK_TYPE_SNOOZE:
1714 				kprintf("snooze()\n");
1715 				break;
1716 
1717 			case THREAD_BLOCK_TYPE_SIGNAL:
1718 				kprintf("signal\n");
1719 				break;
1720 
1721 			case THREAD_BLOCK_TYPE_MUTEX:
1722 				kprintf("mutex %p\n", thread->wait.object);
1723 				break;
1724 
1725 			case THREAD_BLOCK_TYPE_RW_LOCK:
1726 				kprintf("rwlock %p\n", thread->wait.object);
1727 				break;
1728 
1729 			case THREAD_BLOCK_TYPE_OTHER:
1730 				kprintf("other (%s)\n", (char*)thread->wait.object);
1731 				break;
1732 
1733 			default:
1734 				kprintf("unknown (%p)\n", thread->wait.object);
1735 				break;
1736 		}
1737 	}
1738 
1739 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1740 	kprintf("team:               %p, \"%s\"\n", thread->team,
1741 		thread->team->Name());
1742 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1743 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1744 	kprintf("  exit.waiters:\n");
1745 	while ((death = (struct thread_death_entry*)list_get_next_item(
1746 			&thread->exit.waiters, death)) != NULL) {
1747 		kprintf("\t%p (thread %ld)\n", death, death->thread);
1748 	}
1749 
1750 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1751 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1752 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1753 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1754 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1755 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1756 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1757 		strerror(thread->kernel_errno));
1758 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1759 	kprintf("user_time:          %Ld\n", thread->user_time);
1760 	kprintf("flags:              0x%lx\n", thread->flags);
1761 	kprintf("architecture dependant section:\n");
1762 	arch_thread_dump_info(&thread->arch_info);
1763 }
1764 
1765 
1766 static int
1767 dump_thread_info(int argc, char **argv)
1768 {
1769 	bool shortInfo = false;
1770 	int argi = 1;
1771 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1772 		shortInfo = true;
1773 		print_thread_list_table_head();
1774 		argi++;
1775 	}
1776 
1777 	if (argi == argc) {
1778 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1779 		return 0;
1780 	}
1781 
1782 	for (; argi < argc; argi++) {
1783 		const char *name = argv[argi];
1784 		int32 id = strtoul(name, NULL, 0);
1785 
1786 		if (IS_KERNEL_ADDRESS(id)) {
1787 			// semi-hack
1788 			_dump_thread_info((Thread *)id, shortInfo);
1789 			continue;
1790 		}
1791 
1792 		// walk through the thread list, trying to match name or id
1793 		bool found = false;
1794 		for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1795 				Thread* thread = it.Next();) {
1796 			if (!strcmp(name, thread->name) || thread->id == id) {
1797 				_dump_thread_info(thread, shortInfo);
1798 				found = true;
1799 				break;
1800 			}
1801 		}
1802 
1803 		if (!found)
1804 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 
1811 static int
1812 dump_thread_list(int argc, char **argv)
1813 {
1814 	bool realTimeOnly = false;
1815 	bool calling = false;
1816 	const char *callSymbol = NULL;
1817 	addr_t callStart = 0;
1818 	addr_t callEnd = 0;
1819 	int32 requiredState = 0;
1820 	team_id team = -1;
1821 	sem_id sem = -1;
1822 
1823 	if (!strcmp(argv[0], "realtime"))
1824 		realTimeOnly = true;
1825 	else if (!strcmp(argv[0], "ready"))
1826 		requiredState = B_THREAD_READY;
1827 	else if (!strcmp(argv[0], "running"))
1828 		requiredState = B_THREAD_RUNNING;
1829 	else if (!strcmp(argv[0], "waiting")) {
1830 		requiredState = B_THREAD_WAITING;
1831 
1832 		if (argc > 1) {
1833 			sem = strtoul(argv[1], NULL, 0);
1834 			if (sem == 0)
1835 				kprintf("ignoring invalid semaphore argument.\n");
1836 		}
1837 	} else if (!strcmp(argv[0], "calling")) {
1838 		if (argc < 2) {
1839 			kprintf("Need to give a symbol name or start and end arguments.\n");
1840 			return 0;
1841 		} else if (argc == 3) {
1842 			callStart = parse_expression(argv[1]);
1843 			callEnd = parse_expression(argv[2]);
1844 		} else
1845 			callSymbol = argv[1];
1846 
1847 		calling = true;
1848 	} else if (argc > 1) {
1849 		team = strtoul(argv[1], NULL, 0);
1850 		if (team == 0)
1851 			kprintf("ignoring invalid team argument.\n");
1852 	}
1853 
1854 	print_thread_list_table_head();
1855 
1856 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1857 			Thread* thread = it.Next();) {
1858 		// filter out threads not matching the search criteria
1859 		if ((requiredState && thread->state != requiredState)
1860 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1861 					callStart, callEnd))
1862 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1863 			|| (team > 0 && thread->team->id != team)
1864 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1865 			continue;
1866 
1867 		_dump_thread_info(thread, true);
1868 	}
1869 	return 0;
1870 }
1871 
1872 
1873 //	#pragma mark - private kernel API
1874 
1875 
1876 void
1877 thread_exit(void)
1878 {
1879 	cpu_status state;
1880 	Thread* thread = thread_get_current_thread();
1881 	Team* team = thread->team;
1882 	Team* kernelTeam = team_get_kernel_team();
1883 	thread_id parentID = -1;
1884 	status_t status;
1885 	struct thread_debug_info debugInfo;
1886 	team_id teamID = team->id;
1887 
1888 	TRACE(("thread %ld exiting w/return code %#lx\n", thread->id,
1889 		thread->exit.status));
1890 
1891 	if (!are_interrupts_enabled())
1892 		panic("thread_exit() called with interrupts disabled!\n");
1893 
1894 	// boost our priority to get this over with
1895 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1896 
1897 	if (team != kernelTeam) {
1898 		// Cancel previously installed alarm timer, if any. Hold the scheduler
1899 		// lock to make sure that when cancel_timer() returns, the alarm timer
1900 		// hook will not be invoked anymore (since
1901 		// B_TIMER_ACQUIRE_SCHEDULER_LOCK is used).
1902 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1903 		cancel_timer(&thread->alarm);
1904 		schedulerLocker.Unlock();
1905 
1906 		// Delete all user timers associated with the thread.
1907 		ThreadLocker threadLocker(thread);
1908 		thread->DeleteUserTimers(false);
1909 
1910 		// detach the thread's user thread
1911 		user_thread* userThread = thread->user_thread;
1912 		thread->user_thread = NULL;
1913 
1914 		threadLocker.Unlock();
1915 
1916 		// Delete the thread's user thread, if it's not the main thread. If it
1917 		// is, we can save the work, since it will be deleted with the team's
1918 		// address space.
1919 		if (thread != team->main_thread)
1920 			team_free_user_thread(team, userThread);
1921 	}
1922 
1923 	// remember the user stack area -- we will delete it below
1924 	area_id userStackArea = -1;
1925 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1926 		userStackArea = thread->user_stack_area;
1927 		thread->user_stack_area = -1;
1928 	}
1929 
1930 	struct job_control_entry *death = NULL;
1931 	struct thread_death_entry* threadDeathEntry = NULL;
1932 	bool deleteTeam = false;
1933 	port_id debuggerPort = -1;
1934 
1935 	if (team != kernelTeam) {
1936 		user_debug_thread_exiting(thread);
1937 
1938 		if (team->main_thread == thread) {
1939 			// The main thread is exiting. Shut down the whole team.
1940 			deleteTeam = true;
1941 
1942 			// kill off all other threads and the user debugger facilities
1943 			debuggerPort = team_shutdown_team(team);
1944 
1945 			// acquire necessary locks, which are: process group lock, kernel
1946 			// team lock, parent team lock, and the team lock
1947 			team->LockProcessGroup();
1948 			kernelTeam->Lock();
1949 			team->LockTeamAndParent(true);
1950 		} else {
1951 			threadDeathEntry
1952 				= (thread_death_entry*)malloc(sizeof(thread_death_entry));
1953 
1954 			// acquire necessary locks, which are: kernel team lock and the team
1955 			// lock
1956 			kernelTeam->Lock();
1957 			team->Lock();
1958 		}
1959 
1960 		ThreadLocker threadLocker(thread);
1961 
1962 		state = disable_interrupts();
1963 
1964 		// swap address spaces, to make sure we're running on the kernel's pgdir
1965 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1966 
1967 		SpinLocker schedulerLocker(gSchedulerLock);
1968 			// removing the thread and putting its death entry to the parent
1969 			// team needs to be an atomic operation
1970 
1971 		// remember how long this thread lasted
1972 		bigtime_t now = system_time();
1973 		InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1974 		thread->kernel_time += now - thread->last_time;
1975 		thread->last_time = now;
1976 		threadTimeLocker.Unlock();
1977 
1978 		team->dead_threads_kernel_time += thread->kernel_time;
1979 		team->dead_threads_user_time += thread->user_time;
1980 
1981 		// stop/update thread/team CPU time user timers
1982 		if (thread->HasActiveCPUTimeUserTimers()
1983 			|| team->HasActiveCPUTimeUserTimers()) {
1984 			user_timer_stop_cpu_timers(thread, NULL);
1985 		}
1986 
1987 		// deactivate CPU time user timers for the thread
1988 		if (thread->HasActiveCPUTimeUserTimers())
1989 			thread->DeactivateCPUTimeUserTimers();
1990 
1991 		// put the thread into the kernel team until it dies
1992 		remove_thread_from_team(team, thread);
1993 		insert_thread_into_team(kernelTeam, thread);
1994 
1995 		if (team->death_entry != NULL) {
1996 			if (--team->death_entry->remaining_threads == 0)
1997 				team->death_entry->condition.NotifyOne(true, B_OK);
1998 		}
1999 
2000 		if (deleteTeam) {
2001 			Team* parent = team->parent;
2002 
2003 			// remember who our parent was so we can send a signal
2004 			parentID = parent->id;
2005 
2006 			// Set the team job control state to "dead" and detach the job
2007 			// control entry from our team struct.
2008 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL,
2009 				true);
2010 			death = team->job_control_entry;
2011 			team->job_control_entry = NULL;
2012 
2013 			if (death != NULL) {
2014 				death->InitDeadState();
2015 
2016 				// team_set_job_control_state() already moved our entry
2017 				// into the parent's list. We just check the soft limit of
2018 				// death entries.
2019 				if (parent->dead_children.count > MAX_DEAD_CHILDREN) {
2020 					death = parent->dead_children.entries.RemoveHead();
2021 					parent->dead_children.count--;
2022 				} else
2023 					death = NULL;
2024 			}
2025 
2026 			schedulerLocker.Unlock();
2027 			restore_interrupts(state);
2028 
2029 			threadLocker.Unlock();
2030 
2031 			// Get a temporary reference to the team's process group
2032 			// -- team_remove_team() removes the team from the group, which
2033 			// might destroy it otherwise and we wouldn't be able to unlock it.
2034 			ProcessGroup* group = team->group;
2035 			group->AcquireReference();
2036 
2037 			pid_t foregroundGroupToSignal;
2038 			team_remove_team(team, foregroundGroupToSignal);
2039 
2040 			// unlock everything but the parent team
2041 			team->Unlock();
2042 			if (parent != kernelTeam)
2043 				kernelTeam->Unlock();
2044 			group->Unlock();
2045 			group->ReleaseReference();
2046 
2047 			// Send SIGCHLD to the parent as long as we still have its lock.
2048 			// This makes job control state change + signalling atomic.
2049 			Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id);
2050 			if (team->exit.reason == CLD_EXITED) {
2051 				childSignal.SetStatus(team->exit.status);
2052 			} else {
2053 				childSignal.SetStatus(team->exit.signal);
2054 				childSignal.SetSendingUser(team->exit.signaling_user);
2055 			}
2056 			send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE);
2057 
2058 			// also unlock the parent
2059 			parent->Unlock();
2060 
2061 			// If the team was a session leader with controlling TTY, we have
2062 			// to send SIGHUP to the foreground process group.
2063 			if (foregroundGroupToSignal >= 0) {
2064 				Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id);
2065 				send_signal_to_process_group(foregroundGroupToSignal,
2066 					groupSignal, B_DO_NOT_RESCHEDULE);
2067 			}
2068 		} else {
2069 			// The thread is not the main thread. We store a thread death entry
2070 			// for it, unless someone is already waiting for it.
2071 			if (threadDeathEntry != NULL
2072 				&& list_is_empty(&thread->exit.waiters)) {
2073 				threadDeathEntry->thread = thread->id;
2074 				threadDeathEntry->status = thread->exit.status;
2075 
2076 				// add entry -- remove an old one, if we hit the limit
2077 				list_add_item(&team->dead_threads, threadDeathEntry);
2078 				team->dead_threads_count++;
2079 				threadDeathEntry = NULL;
2080 
2081 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
2082 					threadDeathEntry
2083 						= (thread_death_entry*)list_remove_head_item(
2084 							&team->dead_threads);
2085 					team->dead_threads_count--;
2086 				}
2087 			}
2088 
2089 			schedulerLocker.Unlock();
2090 			restore_interrupts(state);
2091 
2092 			threadLocker.Unlock();
2093 			team->Unlock();
2094 			kernelTeam->Unlock();
2095 		}
2096 
2097 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
2098 	}
2099 
2100 	free(threadDeathEntry);
2101 
2102 	// delete the team if we're its main thread
2103 	if (deleteTeam) {
2104 		team_delete_team(team, debuggerPort);
2105 
2106 		// we need to delete any death entry that made it to here
2107 		delete death;
2108 	}
2109 
2110 	ThreadLocker threadLocker(thread);
2111 
2112 	state = disable_interrupts();
2113 	SpinLocker schedulerLocker(gSchedulerLock);
2114 
2115 	// mark invisible in global hash/list, so it's no longer accessible
2116 	SpinLocker threadHashLocker(sThreadHashLock);
2117 	thread->visible = false;
2118 	sUsedThreads--;
2119 	threadHashLocker.Unlock();
2120 
2121 	// Stop debugging for this thread
2122 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2123 	debugInfo = thread->debug_info;
2124 	clear_thread_debug_info(&thread->debug_info, true);
2125 	threadDebugInfoLocker.Unlock();
2126 
2127 	// Remove the select infos. We notify them a little later.
2128 	select_info* selectInfos = thread->select_infos;
2129 	thread->select_infos = NULL;
2130 
2131 	schedulerLocker.Unlock();
2132 	restore_interrupts(state);
2133 
2134 	threadLocker.Unlock();
2135 
2136 	destroy_thread_debug_info(&debugInfo);
2137 
2138 	// notify select infos
2139 	select_info* info = selectInfos;
2140 	while (info != NULL) {
2141 		select_sync* sync = info->sync;
2142 
2143 		notify_select_events(info, B_EVENT_INVALID);
2144 		info = info->next;
2145 		put_select_sync(sync);
2146 	}
2147 
2148 	// notify listeners
2149 	sNotificationService.Notify(THREAD_REMOVED, thread);
2150 
2151 	// shutdown the thread messaging
2152 
2153 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
2154 	if (status == B_WOULD_BLOCK) {
2155 		// there is data waiting for us, so let us eat it
2156 		thread_id sender;
2157 
2158 		delete_sem(thread->msg.write_sem);
2159 			// first, let's remove all possibly waiting writers
2160 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
2161 	} else {
2162 		// we probably own the semaphore here, and we're the last to do so
2163 		delete_sem(thread->msg.write_sem);
2164 	}
2165 	// now we can safely remove the msg.read_sem
2166 	delete_sem(thread->msg.read_sem);
2167 
2168 	// fill all death entries and delete the sem that others will use to wait
2169 	// for us
2170 	{
2171 		sem_id cachedExitSem = thread->exit.sem;
2172 
2173 		ThreadLocker threadLocker(thread);
2174 
2175 		// make sure no one will grab this semaphore again
2176 		thread->exit.sem = -1;
2177 
2178 		// fill all death entries
2179 		thread_death_entry* entry = NULL;
2180 		while ((entry = (thread_death_entry*)list_get_next_item(
2181 				&thread->exit.waiters, entry)) != NULL) {
2182 			entry->status = thread->exit.status;
2183 		}
2184 
2185 		threadLocker.Unlock();
2186 
2187 		delete_sem(cachedExitSem);
2188 	}
2189 
2190 	// delete the user stack, if this was a user thread
2191 	if (!deleteTeam && userStackArea >= 0) {
2192 		// We postponed deleting the user stack until now, since this way all
2193 		// notifications for the thread's death are out already and all other
2194 		// threads waiting for this thread's death and some object on its stack
2195 		// will wake up before we (try to) delete the stack area. Of most
2196 		// relevance is probably the case where this is the main thread and
2197 		// other threads use objects on its stack -- so we want them terminated
2198 		// first.
2199 		// When the team is deleted, all areas are deleted anyway, so we don't
2200 		// need to do that explicitly in that case.
2201 		vm_delete_area(teamID, userStackArea, true);
2202 	}
2203 
2204 	// notify the debugger
2205 	if (teamID != kernelTeam->id)
2206 		user_debug_thread_deleted(teamID, thread->id);
2207 
2208 	// enqueue in the undertaker list and reschedule for the last time
2209 	UndertakerEntry undertakerEntry(thread, teamID);
2210 
2211 	disable_interrupts();
2212 	schedulerLocker.Lock();
2213 
2214 	sUndertakerEntries.Add(&undertakerEntry);
2215 	sUndertakerCondition.NotifyOne(true);
2216 
2217 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
2218 	scheduler_reschedule();
2219 
2220 	panic("never can get here\n");
2221 }
2222 
2223 
2224 /*!	Called in the interrupt handler code when a thread enters
2225 	the kernel for any reason.
2226 	Only tracks time for now.
2227 	Interrupts are disabled.
2228 */
2229 void
2230 thread_at_kernel_entry(bigtime_t now)
2231 {
2232 	Thread *thread = thread_get_current_thread();
2233 
2234 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
2235 
2236 	// track user time
2237 	SpinLocker threadTimeLocker(thread->time_lock);
2238 	thread->user_time += now - thread->last_time;
2239 	thread->last_time = now;
2240 	thread->in_kernel = true;
2241 	threadTimeLocker.Unlock();
2242 }
2243 
2244 
2245 /*!	Called whenever a thread exits kernel space to user space.
2246 	Tracks time, handles signals, ...
2247 	Interrupts must be enabled. When the function returns, interrupts will be
2248 	disabled.
2249 	The function may not return. This e.g. happens when the thread has received
2250 	a deadly signal.
2251 */
2252 void
2253 thread_at_kernel_exit(void)
2254 {
2255 	Thread *thread = thread_get_current_thread();
2256 
2257 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
2258 
2259 	handle_signals(thread);
2260 
2261 	disable_interrupts();
2262 
2263 	// track kernel time
2264 	bigtime_t now = system_time();
2265 	SpinLocker threadTimeLocker(thread->time_lock);
2266 	thread->in_kernel = false;
2267 	thread->kernel_time += now - thread->last_time;
2268 	thread->last_time = now;
2269 }
2270 
2271 
2272 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
2273 	and no debugging shall be done.
2274 	Interrupts must be disabled.
2275 */
2276 void
2277 thread_at_kernel_exit_no_signals(void)
2278 {
2279 	Thread *thread = thread_get_current_thread();
2280 
2281 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
2282 
2283 	// track kernel time
2284 	bigtime_t now = system_time();
2285 	SpinLocker threadTimeLocker(thread->time_lock);
2286 	thread->in_kernel = false;
2287 	thread->kernel_time += now - thread->last_time;
2288 	thread->last_time = now;
2289 }
2290 
2291 
2292 void
2293 thread_reset_for_exec(void)
2294 {
2295 	Thread* thread = thread_get_current_thread();
2296 
2297 	ThreadLocker threadLocker(thread);
2298 
2299 	// delete user-defined timers
2300 	thread->DeleteUserTimers(true);
2301 
2302 	// cancel pre-defined timer
2303 	if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID))
2304 		timer->Cancel();
2305 
2306 	// reset user_thread and user stack
2307 	thread->user_thread = NULL;
2308 	thread->user_stack_area = -1;
2309 	thread->user_stack_base = 0;
2310 	thread->user_stack_size = 0;
2311 
2312 	// reset signals
2313 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2314 
2315 	thread->ResetSignalsOnExec();
2316 
2317 	// reset thread CPU time clock
2318 	thread->cpu_clock_offset = -thread->CPUTime(false);
2319 
2320 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
2321 }
2322 
2323 
2324 /*! Insert a thread to the tail of a queue */
2325 void
2326 thread_enqueue(Thread *thread, struct thread_queue *queue)
2327 {
2328 	thread->queue_next = NULL;
2329 	if (queue->head == NULL) {
2330 		queue->head = thread;
2331 		queue->tail = thread;
2332 	} else {
2333 		queue->tail->queue_next = thread;
2334 		queue->tail = thread;
2335 	}
2336 }
2337 
2338 
2339 Thread *
2340 thread_lookat_queue(struct thread_queue *queue)
2341 {
2342 	return queue->head;
2343 }
2344 
2345 
2346 Thread *
2347 thread_dequeue(struct thread_queue *queue)
2348 {
2349 	Thread *thread = queue->head;
2350 
2351 	if (thread != NULL) {
2352 		queue->head = thread->queue_next;
2353 		if (queue->tail == thread)
2354 			queue->tail = NULL;
2355 	}
2356 	return thread;
2357 }
2358 
2359 
2360 Thread *
2361 thread_dequeue_id(struct thread_queue *q, thread_id id)
2362 {
2363 	Thread *thread;
2364 	Thread *last = NULL;
2365 
2366 	thread = q->head;
2367 	while (thread != NULL) {
2368 		if (thread->id == id) {
2369 			if (last == NULL)
2370 				q->head = thread->queue_next;
2371 			else
2372 				last->queue_next = thread->queue_next;
2373 
2374 			if (q->tail == thread)
2375 				q->tail = last;
2376 			break;
2377 		}
2378 		last = thread;
2379 		thread = thread->queue_next;
2380 	}
2381 	return thread;
2382 }
2383 
2384 
2385 thread_id
2386 allocate_thread_id()
2387 {
2388 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2389 
2390 	// find the next unused ID
2391 	thread_id id;
2392 	do {
2393 		id = sNextThreadID++;
2394 
2395 		// deal with integer overflow
2396 		if (sNextThreadID < 0)
2397 			sNextThreadID = 2;
2398 
2399 		// check whether the ID is already in use
2400 	} while (sThreadHash.Lookup(id, false) != NULL);
2401 
2402 	return id;
2403 }
2404 
2405 
2406 thread_id
2407 peek_next_thread_id()
2408 {
2409 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2410 	return sNextThreadID;
2411 }
2412 
2413 
2414 /*!	Yield the CPU to other threads.
2415 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
2416 	If \c false, it will continue to run, if there's no other thread in ready
2417 	state, and if it has a higher priority than the other ready threads, it
2418 	still has a good chance to continue.
2419 */
2420 void
2421 thread_yield(bool force)
2422 {
2423 	if (force) {
2424 		// snooze for roughly 3 thread quantums
2425 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
2426 #if 0
2427 		cpu_status state;
2428 
2429 		Thread *thread = thread_get_current_thread();
2430 		if (thread == NULL)
2431 			return;
2432 
2433 		InterruptsSpinLocker _(gSchedulerLock);
2434 
2435 		// mark the thread as yielded, so it will not be scheduled next
2436 		//thread->was_yielded = true;
2437 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
2438 		scheduler_reschedule();
2439 #endif
2440 	} else {
2441 		Thread *thread = thread_get_current_thread();
2442 		if (thread == NULL)
2443 			return;
2444 
2445 		// Don't force the thread off the CPU, just reschedule.
2446 		InterruptsSpinLocker _(gSchedulerLock);
2447 		scheduler_reschedule();
2448 	}
2449 }
2450 
2451 
2452 /*!	Kernel private thread creation function.
2453 */
2454 thread_id
2455 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
2456 	void *arg, team_id team)
2457 {
2458 	return thread_create_thread(
2459 		ThreadCreationAttributes(function, name, priority, arg, team),
2460 		true);
2461 }
2462 
2463 
2464 status_t
2465 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
2466 	status_t *_returnCode)
2467 {
2468 	job_control_entry* freeDeath = NULL;
2469 	status_t status = B_OK;
2470 
2471 	if (id < B_OK)
2472 		return B_BAD_THREAD_ID;
2473 
2474 	// get the thread, queue our death entry, and fetch the semaphore we have to
2475 	// wait on
2476 	sem_id exitSem = B_BAD_THREAD_ID;
2477 	struct thread_death_entry death;
2478 
2479 	Thread* thread = Thread::GetAndLock(id);
2480 	if (thread != NULL) {
2481 		// remember the semaphore we have to wait on and place our death entry
2482 		exitSem = thread->exit.sem;
2483 		list_add_link_to_head(&thread->exit.waiters, &death);
2484 
2485 		thread->UnlockAndReleaseReference();
2486 			// Note: We mustn't dereference the pointer afterwards, only check
2487 			// it.
2488 	}
2489 
2490 	thread_death_entry* threadDeathEntry = NULL;
2491 
2492 	if (thread == NULL) {
2493 		// we couldn't find this thread -- maybe it's already gone, and we'll
2494 		// find its death entry in our team
2495 		Team* team = thread_get_current_thread()->team;
2496 		TeamLocker teamLocker(team);
2497 
2498 		// check the child death entries first (i.e. main threads of child
2499 		// teams)
2500 		bool deleteEntry;
2501 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
2502 		if (freeDeath != NULL) {
2503 			death.status = freeDeath->status;
2504 			if (!deleteEntry)
2505 				freeDeath = NULL;
2506 		} else {
2507 			// check the thread death entries of the team (non-main threads)
2508 			while ((threadDeathEntry = (thread_death_entry*)list_get_next_item(
2509 					&team->dead_threads, threadDeathEntry)) != NULL) {
2510 				if (threadDeathEntry->thread == id) {
2511 					list_remove_item(&team->dead_threads, threadDeathEntry);
2512 					team->dead_threads_count--;
2513 					death.status = threadDeathEntry->status;
2514 					break;
2515 				}
2516 			}
2517 
2518 			if (threadDeathEntry == NULL)
2519 				status = B_BAD_THREAD_ID;
2520 		}
2521 	}
2522 
2523 	if (thread == NULL && status == B_OK) {
2524 		// we found the thread's death entry in our team
2525 		if (_returnCode)
2526 			*_returnCode = death.status;
2527 
2528 		delete freeDeath;
2529 		free(threadDeathEntry);
2530 		return B_OK;
2531 	}
2532 
2533 	// we need to wait for the death of the thread
2534 
2535 	if (exitSem < 0)
2536 		return B_BAD_THREAD_ID;
2537 
2538 	resume_thread(id);
2539 		// make sure we don't wait forever on a suspended thread
2540 
2541 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
2542 
2543 	if (status == B_OK) {
2544 		// this should never happen as the thread deletes the semaphore on exit
2545 		panic("could acquire exit_sem for thread %ld\n", id);
2546 	} else if (status == B_BAD_SEM_ID) {
2547 		// this is the way the thread normally exits
2548 		status = B_OK;
2549 
2550 		if (_returnCode)
2551 			*_returnCode = death.status;
2552 	} else {
2553 		// We were probably interrupted or the timeout occurred; we need to
2554 		// remove our death entry now.
2555 		thread = Thread::GetAndLock(id);
2556 		if (thread != NULL) {
2557 			list_remove_link(&death);
2558 			thread->UnlockAndReleaseReference();
2559 		} else {
2560 			// The thread is already gone, so we need to wait uninterruptibly
2561 			// for its exit semaphore to make sure our death entry stays valid.
2562 			// It won't take long, since the thread is apparently already in the
2563 			// middle of the cleanup.
2564 			acquire_sem(exitSem);
2565 			status = B_OK;
2566 		}
2567 	}
2568 
2569 	return status;
2570 }
2571 
2572 
2573 status_t
2574 select_thread(int32 id, struct select_info* info, bool kernel)
2575 {
2576 	// get and lock the thread
2577 	Thread* thread = Thread::GetAndLock(id);
2578 	if (thread == NULL)
2579 		return B_BAD_THREAD_ID;
2580 	BReference<Thread> threadReference(thread, true);
2581 	ThreadLocker threadLocker(thread, true);
2582 
2583 	// We support only B_EVENT_INVALID at the moment.
2584 	info->selected_events &= B_EVENT_INVALID;
2585 
2586 	// add info to list
2587 	if (info->selected_events != 0) {
2588 		info->next = thread->select_infos;
2589 		thread->select_infos = info;
2590 
2591 		// we need a sync reference
2592 		atomic_add(&info->sync->ref_count, 1);
2593 	}
2594 
2595 	return B_OK;
2596 }
2597 
2598 
2599 status_t
2600 deselect_thread(int32 id, struct select_info* info, bool kernel)
2601 {
2602 	// get and lock the thread
2603 	Thread* thread = Thread::GetAndLock(id);
2604 	if (thread == NULL)
2605 		return B_BAD_THREAD_ID;
2606 	BReference<Thread> threadReference(thread, true);
2607 	ThreadLocker threadLocker(thread, true);
2608 
2609 	// remove info from list
2610 	select_info** infoLocation = &thread->select_infos;
2611 	while (*infoLocation != NULL && *infoLocation != info)
2612 		infoLocation = &(*infoLocation)->next;
2613 
2614 	if (*infoLocation != info)
2615 		return B_OK;
2616 
2617 	*infoLocation = info->next;
2618 
2619 	threadLocker.Unlock();
2620 
2621 	// surrender sync reference
2622 	put_select_sync(info->sync);
2623 
2624 	return B_OK;
2625 }
2626 
2627 
2628 int32
2629 thread_max_threads(void)
2630 {
2631 	return sMaxThreads;
2632 }
2633 
2634 
2635 int32
2636 thread_used_threads(void)
2637 {
2638 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
2639 	return sUsedThreads;
2640 }
2641 
2642 
2643 /*!	Returns a user-readable string for a thread state.
2644 	Only for use in the kernel debugger.
2645 */
2646 const char*
2647 thread_state_to_text(Thread* thread, int32 state)
2648 {
2649 	return state_to_text(thread, state);
2650 }
2651 
2652 
2653 int32
2654 thread_get_io_priority(thread_id id)
2655 {
2656 	Thread* thread = Thread::GetAndLock(id);
2657 	if (thread == NULL)
2658 		return B_BAD_THREAD_ID;
2659 	BReference<Thread> threadReference(thread, true);
2660 	ThreadLocker threadLocker(thread, true);
2661 
2662 	int32 priority = thread->io_priority;
2663 	if (priority < 0) {
2664 		// negative I/O priority means using the (CPU) priority
2665 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2666 		priority = thread->priority;
2667 	}
2668 
2669 	return priority;
2670 }
2671 
2672 
2673 void
2674 thread_set_io_priority(int32 priority)
2675 {
2676 	Thread* thread = thread_get_current_thread();
2677 	ThreadLocker threadLocker(thread);
2678 
2679 	thread->io_priority = priority;
2680 }
2681 
2682 
2683 status_t
2684 thread_init(kernel_args *args)
2685 {
2686 	TRACE(("thread_init: entry\n"));
2687 
2688 	// create the thread hash table
2689 	new(&sThreadHash) ThreadHashTable();
2690 	if (sThreadHash.Init(128) != B_OK)
2691 		panic("thread_init(): failed to init thread hash table!");
2692 
2693 	// create the thread structure object cache
2694 	sThreadCache = create_object_cache("threads", sizeof(Thread), 16, NULL,
2695 		NULL, NULL);
2696 		// Note: The x86 port requires 16 byte alignment of thread structures.
2697 	if (sThreadCache == NULL)
2698 		panic("thread_init(): failed to allocate thread object cache!");
2699 
2700 	if (arch_thread_init(args) < B_OK)
2701 		panic("arch_thread_init() failed!\n");
2702 
2703 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2704 	sNextThreadID = B_SYSTEM_TEAM + 1;
2705 
2706 	// create an idle thread for each cpu
2707 	for (uint32 i = 0; i < args->num_cpus; i++) {
2708 		Thread *thread;
2709 		area_info info;
2710 		char name[64];
2711 
2712 		sprintf(name, "idle thread %lu", i + 1);
2713 		thread = new(&sIdleThreads[i]) Thread(name,
2714 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2715 		if (thread == NULL || thread->Init(true) != B_OK) {
2716 			panic("error creating idle thread struct\n");
2717 			return B_NO_MEMORY;
2718 		}
2719 
2720 		gCPU[i].running_thread = thread;
2721 
2722 		thread->team = team_get_kernel_team();
2723 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2724 		thread->state = B_THREAD_RUNNING;
2725 		thread->next_state = B_THREAD_READY;
2726 		sprintf(name, "idle thread %lu kstack", i + 1);
2727 		thread->kernel_stack_area = find_area(name);
2728 
2729 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2730 			panic("error finding idle kstack area\n");
2731 
2732 		thread->kernel_stack_base = (addr_t)info.address;
2733 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2734 
2735 		thread->visible = true;
2736 		insert_thread_into_team(thread->team, thread);
2737 	}
2738 	sUsedThreads = args->num_cpus;
2739 
2740 	// init the notification service
2741 	new(&sNotificationService) ThreadNotificationService();
2742 
2743 	// start the undertaker thread
2744 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2745 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2746 
2747 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2748 		B_DISPLAY_PRIORITY, NULL);
2749 	if (undertakerThread < 0)
2750 		panic("Failed to create undertaker thread!");
2751 	resume_thread(undertakerThread);
2752 
2753 	// set up some debugger commands
2754 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2755 		"[ <team> ]\n"
2756 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2757 		"all threads of the specified team.\n"
2758 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2759 	add_debugger_command_etc("ready", &dump_thread_list,
2760 		"List all ready threads",
2761 		"\n"
2762 		"Prints a list of all threads in ready state.\n", 0);
2763 	add_debugger_command_etc("running", &dump_thread_list,
2764 		"List all running threads",
2765 		"\n"
2766 		"Prints a list of all threads in running state.\n", 0);
2767 	add_debugger_command_etc("waiting", &dump_thread_list,
2768 		"List all waiting threads (optionally for a specific semaphore)",
2769 		"[ <sem> ]\n"
2770 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2771 		"specified, only the threads waiting on that semaphore are listed.\n"
2772 		"  <sem>  - ID of the semaphore.\n", 0);
2773 	add_debugger_command_etc("realtime", &dump_thread_list,
2774 		"List all realtime threads",
2775 		"\n"
2776 		"Prints a list of all threads with realtime priority.\n", 0);
2777 	add_debugger_command_etc("thread", &dump_thread_info,
2778 		"Dump info about a particular thread",
2779 		"[ -s ] ( <id> | <address> | <name> )*\n"
2780 		"Prints information about the specified thread. If no argument is\n"
2781 		"given the current thread is selected.\n"
2782 		"  -s         - Print info in compact table form (like \"threads\").\n"
2783 		"  <id>       - The ID of the thread.\n"
2784 		"  <address>  - The address of the thread structure.\n"
2785 		"  <name>     - The thread's name.\n", 0);
2786 	add_debugger_command_etc("calling", &dump_thread_list,
2787 		"Show all threads that have a specific address in their call chain",
2788 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2789 	add_debugger_command_etc("unreal", &make_thread_unreal,
2790 		"Set realtime priority threads to normal priority",
2791 		"[ <id> ]\n"
2792 		"Sets the priority of all realtime threads or, if given, the one\n"
2793 		"with the specified ID to \"normal\" priority.\n"
2794 		"  <id>  - The ID of the thread.\n", 0);
2795 	add_debugger_command_etc("suspend", &make_thread_suspended,
2796 		"Suspend a thread",
2797 		"[ <id> ]\n"
2798 		"Suspends the thread with the given ID. If no ID argument is given\n"
2799 		"the current thread is selected.\n"
2800 		"  <id>  - The ID of the thread.\n", 0);
2801 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2802 		"<id>\n"
2803 		"Resumes the specified thread, if it is currently suspended.\n"
2804 		"  <id>  - The ID of the thread.\n", 0);
2805 	add_debugger_command_etc("drop", &drop_into_debugger,
2806 		"Drop a thread into the userland debugger",
2807 		"<id>\n"
2808 		"Drops the specified (userland) thread into the userland debugger\n"
2809 		"after leaving the kernel debugger.\n"
2810 		"  <id>  - The ID of the thread.\n", 0);
2811 	add_debugger_command_etc("priority", &set_thread_prio,
2812 		"Set a thread's priority",
2813 		"<priority> [ <id> ]\n"
2814 		"Sets the priority of the thread with the specified ID to the given\n"
2815 		"priority. If no thread ID is given, the current thread is selected.\n"
2816 		"  <priority>  - The thread's new priority (0 - 120)\n"
2817 		"  <id>        - The ID of the thread.\n", 0);
2818 
2819 	return B_OK;
2820 }
2821 
2822 
2823 status_t
2824 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2825 {
2826 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2827 	// so that get_current_cpu and friends will work, which is crucial for
2828 	// a lot of low level routines
2829 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2830 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2831 	return B_OK;
2832 }
2833 
2834 
2835 //	#pragma mark - thread blocking API
2836 
2837 
2838 static status_t
2839 thread_block_timeout(timer* timer)
2840 {
2841 	// The timer has been installed with B_TIMER_ACQUIRE_SCHEDULER_LOCK, so
2842 	// we're holding the scheduler lock already. This makes things comfortably
2843 	// easy.
2844 
2845 	Thread* thread = (Thread*)timer->user_data;
2846 	thread_unblock_locked(thread, B_TIMED_OUT);
2847 
2848 	return B_HANDLED_INTERRUPT;
2849 }
2850 
2851 
2852 /*!	Blocks the current thread.
2853 
2854 	The function acquires the scheduler lock and calls thread_block_locked().
2855 	See there for more information.
2856 */
2857 status_t
2858 thread_block()
2859 {
2860 	InterruptsSpinLocker _(gSchedulerLock);
2861 	return thread_block_locked(thread_get_current_thread());
2862 }
2863 
2864 
2865 /*!	Blocks the current thread with a timeout.
2866 
2867 	Acquires the scheduler lock and calls thread_block_with_timeout_locked().
2868 	See there for more information.
2869 */
2870 status_t
2871 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2872 {
2873 	InterruptsSpinLocker _(gSchedulerLock);
2874 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2875 }
2876 
2877 
2878 /*!	Blocks the current thread with a timeout.
2879 
2880 	The thread is blocked until someone else unblock it or the specified timeout
2881 	occurs. Must be called after a call to thread_prepare_to_block(). If the
2882 	thread has already been unblocked after the previous call to
2883 	thread_prepare_to_block(), this function will return immediately. See
2884 	thread_prepare_to_block() for more details.
2885 
2886 	The caller must hold the scheduler lock.
2887 
2888 	\param thread The current thread.
2889 	\param timeoutFlags The standard timeout flags:
2890 		- \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait.
2891 		- \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when
2892 			the timeout shall occur.
2893 		- \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT
2894 			is specified, too. Specifies that \a timeout is a real time, not a
2895 			system time.
2896 		If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are
2897 		specified, an infinite timeout is implied and the function behaves like
2898 		thread_block_locked().
2899 	\return The error code passed to the unblocking function. thread_interrupt()
2900 		uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is
2901 		returned. By convention \c B_OK means that the wait was successful while
2902 		another error code indicates a failure (what that means depends on the
2903 		client code).
2904 */
2905 status_t
2906 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2907 {
2908 	Thread* thread = thread_get_current_thread();
2909 
2910 	if (thread->wait.status != 1)
2911 		return thread->wait.status;
2912 
2913 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2914 		&& timeout != B_INFINITE_TIMEOUT;
2915 
2916 	if (useTimer) {
2917 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2918 		// avoids nasty race conditions and deadlock problems that could
2919 		// otherwise occur between our cancel_timer() and a concurrently
2920 		// executing thread_block_timeout().
2921 		uint32 timerFlags;
2922 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2923 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2924 		} else {
2925 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2926 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2927 				timerFlags |= B_TIMER_REAL_TIME_BASE;
2928 		}
2929 		timerFlags |= B_TIMER_ACQUIRE_SCHEDULER_LOCK;
2930 
2931 		// install the timer
2932 		thread->wait.unblock_timer.user_data = thread;
2933 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2934 			timerFlags);
2935 	}
2936 
2937 	// block
2938 	status_t error = thread_block_locked(thread);
2939 
2940 	// cancel timer, if it didn't fire
2941 	if (error != B_TIMED_OUT && useTimer)
2942 		cancel_timer(&thread->wait.unblock_timer);
2943 
2944 	return error;
2945 }
2946 
2947 
2948 /*!	Unblocks a userland-blocked thread.
2949 	The caller must not hold any locks.
2950 */
2951 static status_t
2952 user_unblock_thread(thread_id threadID, status_t status)
2953 {
2954 	// get the thread
2955 	Thread* thread = Thread::GetAndLock(threadID);
2956 	if (thread == NULL)
2957 		return B_BAD_THREAD_ID;
2958 	BReference<Thread> threadReference(thread, true);
2959 	ThreadLocker threadLocker(thread, true);
2960 
2961 	if (thread->user_thread == NULL)
2962 		return B_NOT_ALLOWED;
2963 
2964 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2965 
2966 	if (thread->user_thread->wait_status > 0) {
2967 		thread->user_thread->wait_status = status;
2968 		thread_unblock_locked(thread, status);
2969 	}
2970 
2971 	return B_OK;
2972 }
2973 
2974 
2975 //	#pragma mark - public kernel API
2976 
2977 
2978 void
2979 exit_thread(status_t returnValue)
2980 {
2981 	Thread *thread = thread_get_current_thread();
2982 	Team* team = thread->team;
2983 
2984 	thread->exit.status = returnValue;
2985 
2986 	// if called from a kernel thread, we don't deliver the signal,
2987 	// we just exit directly to keep the user space behaviour of
2988 	// this function
2989 	if (team != team_get_kernel_team()) {
2990 		// If this is its main thread, set the team's exit status.
2991 		if (thread == team->main_thread) {
2992 			TeamLocker teamLocker(team);
2993 
2994 			if (!team->exit.initialized) {
2995 				team->exit.reason = CLD_EXITED;
2996 				team->exit.signal = 0;
2997 				team->exit.signaling_user = 0;
2998 				team->exit.status = returnValue;
2999 				team->exit.initialized = true;
3000 			}
3001 
3002 			teamLocker.Unlock();
3003 		}
3004 
3005 		Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3006 		send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3007 	} else
3008 		thread_exit();
3009 }
3010 
3011 
3012 status_t
3013 kill_thread(thread_id id)
3014 {
3015 	if (id <= 0)
3016 		return B_BAD_VALUE;
3017 
3018 	Thread* currentThread = thread_get_current_thread();
3019 
3020 	Signal signal(SIGKILLTHR, SI_USER, B_OK, currentThread->team->id);
3021 	return send_signal_to_thread_id(id, signal, 0);
3022 }
3023 
3024 
3025 status_t
3026 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
3027 {
3028 	return send_data_etc(thread, code, buffer, bufferSize, 0);
3029 }
3030 
3031 
3032 int32
3033 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
3034 {
3035 	return receive_data_etc(sender, buffer, bufferSize, 0);
3036 }
3037 
3038 
3039 bool
3040 has_data(thread_id thread)
3041 {
3042 	// TODO: The thread argument is ignored.
3043 	int32 count;
3044 
3045 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
3046 			&count) != B_OK)
3047 		return false;
3048 
3049 	return count == 0 ? false : true;
3050 }
3051 
3052 
3053 status_t
3054 _get_thread_info(thread_id id, thread_info *info, size_t size)
3055 {
3056 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
3057 		return B_BAD_VALUE;
3058 
3059 	// get the thread
3060 	Thread* thread = Thread::GetAndLock(id);
3061 	if (thread == NULL)
3062 		return B_BAD_THREAD_ID;
3063 	BReference<Thread> threadReference(thread, true);
3064 	ThreadLocker threadLocker(thread, true);
3065 
3066 	// fill the info -- also requires the scheduler lock to be held
3067 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3068 
3069 	fill_thread_info(thread, info, size);
3070 
3071 	return B_OK;
3072 }
3073 
3074 
3075 status_t
3076 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
3077 	size_t size)
3078 {
3079 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
3080 		return B_BAD_VALUE;
3081 
3082 	int32 lastID = *_cookie;
3083 
3084 	// get the team
3085 	Team* team = Team::GetAndLock(teamID);
3086 	if (team == NULL)
3087 		return B_BAD_VALUE;
3088 	BReference<Team> teamReference(team, true);
3089 	TeamLocker teamLocker(team, true);
3090 
3091 	Thread* thread = NULL;
3092 
3093 	if (lastID == 0) {
3094 		// We start with the main thread
3095 		thread = team->main_thread;
3096 	} else {
3097 		// Find the one thread with an ID greater than ours (as long as the IDs
3098 		// don't wrap they are always sorted from highest to lowest).
3099 		// TODO: That is broken not only when the IDs wrap, but also for the
3100 		// kernel team, to which threads are added when they are dying.
3101 		for (Thread* next = team->thread_list; next != NULL;
3102 				next = next->team_next) {
3103 			if (next->id <= lastID)
3104 				break;
3105 
3106 			thread = next;
3107 		}
3108 	}
3109 
3110 	if (thread == NULL)
3111 		return B_BAD_VALUE;
3112 
3113 	lastID = thread->id;
3114 	*_cookie = lastID;
3115 
3116 	ThreadLocker threadLocker(thread);
3117 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3118 
3119 	fill_thread_info(thread, info, size);
3120 
3121 	return B_OK;
3122 }
3123 
3124 
3125 thread_id
3126 find_thread(const char* name)
3127 {
3128 	if (name == NULL)
3129 		return thread_get_current_thread_id();
3130 
3131 	InterruptsSpinLocker threadHashLocker(sThreadHashLock);
3132 
3133 	// TODO: Scanning the whole hash with the thread hash lock held isn't
3134 	// exactly cheap -- although this function is probably used very rarely.
3135 
3136 	for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
3137 			Thread* thread = it.Next();) {
3138 		if (!thread->visible)
3139 			continue;
3140 
3141 		if (strcmp(thread->name, name) == 0)
3142 			return thread->id;
3143 	}
3144 
3145 	return B_NAME_NOT_FOUND;
3146 }
3147 
3148 
3149 status_t
3150 rename_thread(thread_id id, const char* name)
3151 {
3152 	if (name == NULL)
3153 		return B_BAD_VALUE;
3154 
3155 	// get the thread
3156 	Thread* thread = Thread::GetAndLock(id);
3157 	if (thread == NULL)
3158 		return B_BAD_THREAD_ID;
3159 	BReference<Thread> threadReference(thread, true);
3160 	ThreadLocker threadLocker(thread, true);
3161 
3162 	// check whether the operation is allowed
3163 	if (thread->team != thread_get_current_thread()->team)
3164 		return B_NOT_ALLOWED;
3165 
3166 	strlcpy(thread->name, name, B_OS_NAME_LENGTH);
3167 
3168 	team_id teamID = thread->team->id;
3169 
3170 	threadLocker.Unlock();
3171 
3172 	// notify listeners
3173 	sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id);
3174 		// don't pass the thread structure, as it's unsafe, if it isn't ours
3175 
3176 	return B_OK;
3177 }
3178 
3179 
3180 status_t
3181 set_thread_priority(thread_id id, int32 priority)
3182 {
3183 	int32 oldPriority;
3184 
3185 	// make sure the passed in priority is within bounds
3186 	if (priority > THREAD_MAX_SET_PRIORITY)
3187 		priority = THREAD_MAX_SET_PRIORITY;
3188 	if (priority < THREAD_MIN_SET_PRIORITY)
3189 		priority = THREAD_MIN_SET_PRIORITY;
3190 
3191 	// get the thread
3192 	Thread* thread = Thread::GetAndLock(id);
3193 	if (thread == NULL)
3194 		return B_BAD_THREAD_ID;
3195 	BReference<Thread> threadReference(thread, true);
3196 	ThreadLocker threadLocker(thread, true);
3197 
3198 	// check whether the change is allowed
3199 	if (thread_is_idle_thread(thread))
3200 		return B_NOT_ALLOWED;
3201 
3202 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3203 
3204 	if (thread == thread_get_current_thread()) {
3205 		// It's ourself, so we know we aren't in the run queue, and we can
3206 		// manipulate our structure directly.
3207 		oldPriority = thread->priority;
3208 		thread->priority = thread->next_priority = priority;
3209 	} else {
3210 		oldPriority = thread->priority;
3211 		scheduler_set_thread_priority(thread, priority);
3212 	}
3213 
3214 	return oldPriority;
3215 }
3216 
3217 
3218 status_t
3219 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
3220 {
3221 	return common_snooze_etc(timeout, timebase, flags, NULL);
3222 }
3223 
3224 
3225 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
3226 status_t
3227 snooze(bigtime_t timeout)
3228 {
3229 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
3230 }
3231 
3232 
3233 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
3234 	signals.
3235 */
3236 status_t
3237 snooze_until(bigtime_t timeout, int timebase)
3238 {
3239 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
3240 }
3241 
3242 
3243 status_t
3244 wait_for_thread(thread_id thread, status_t *_returnCode)
3245 {
3246 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
3247 }
3248 
3249 
3250 status_t
3251 suspend_thread(thread_id id)
3252 {
3253 	if (id <= 0)
3254 		return B_BAD_VALUE;
3255 
3256 	Thread* currentThread = thread_get_current_thread();
3257 
3258 	Signal signal(SIGSTOP, SI_USER, B_OK, currentThread->team->id);
3259 	return send_signal_to_thread_id(id, signal, 0);
3260 }
3261 
3262 
3263 status_t
3264 resume_thread(thread_id id)
3265 {
3266 	if (id <= 0)
3267 		return B_BAD_VALUE;
3268 
3269 	Thread* currentThread = thread_get_current_thread();
3270 
3271 	// Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains
3272 	// compatibility to BeOS which documents the combination of suspend_thread()
3273 	// and resume_thread() to interrupt threads waiting on semaphores.
3274 	Signal signal(SIGNAL_CONTINUE_THREAD, SI_USER, B_OK,
3275 		currentThread->team->id);
3276 	return send_signal_to_thread_id(id, signal, 0);
3277 }
3278 
3279 
3280 thread_id
3281 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
3282 	void *arg)
3283 {
3284 	return thread_create_thread(
3285 		ThreadCreationAttributes(function, name, priority, arg),
3286 		true);
3287 }
3288 
3289 
3290 int
3291 getrlimit(int resource, struct rlimit * rlp)
3292 {
3293 	status_t error = common_getrlimit(resource, rlp);
3294 	if (error != B_OK) {
3295 		errno = error;
3296 		return -1;
3297 	}
3298 
3299 	return 0;
3300 }
3301 
3302 
3303 int
3304 setrlimit(int resource, const struct rlimit * rlp)
3305 {
3306 	status_t error = common_setrlimit(resource, rlp);
3307 	if (error != B_OK) {
3308 		errno = error;
3309 		return -1;
3310 	}
3311 
3312 	return 0;
3313 }
3314 
3315 
3316 //	#pragma mark - syscalls
3317 
3318 
3319 void
3320 _user_exit_thread(status_t returnValue)
3321 {
3322 	exit_thread(returnValue);
3323 }
3324 
3325 
3326 status_t
3327 _user_kill_thread(thread_id thread)
3328 {
3329 	// TODO: Don't allow kernel threads to be killed!
3330 	return kill_thread(thread);
3331 }
3332 
3333 
3334 status_t
3335 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int))
3336 {
3337 	// check the cancel function
3338 	if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction))
3339 		return B_BAD_VALUE;
3340 
3341 	// get and lock the thread
3342 	Thread* thread = Thread::GetAndLock(threadID);
3343 	if (thread == NULL)
3344 		return B_BAD_THREAD_ID;
3345 	BReference<Thread> threadReference(thread, true);
3346 	ThreadLocker threadLocker(thread, true);
3347 
3348 	// only threads of the same team can be canceled
3349 	if (thread->team != thread_get_current_thread()->team)
3350 		return B_NOT_ALLOWED;
3351 
3352 	// set the cancel function
3353 	thread->cancel_function = cancelFunction;
3354 
3355 	// send the cancellation signal to the thread
3356 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3357 	return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0);
3358 }
3359 
3360 
3361 status_t
3362 _user_resume_thread(thread_id thread)
3363 {
3364 	// TODO: Don't allow kernel threads to be resumed!
3365 	return resume_thread(thread);
3366 }
3367 
3368 
3369 status_t
3370 _user_suspend_thread(thread_id thread)
3371 {
3372 	// TODO: Don't allow kernel threads to be suspended!
3373 	return suspend_thread(thread);
3374 }
3375 
3376 
3377 status_t
3378 _user_rename_thread(thread_id thread, const char *userName)
3379 {
3380 	char name[B_OS_NAME_LENGTH];
3381 
3382 	if (!IS_USER_ADDRESS(userName)
3383 		|| userName == NULL
3384 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
3385 		return B_BAD_ADDRESS;
3386 
3387 	// TODO: Don't allow kernel threads to be renamed!
3388 	return rename_thread(thread, name);
3389 }
3390 
3391 
3392 int32
3393 _user_set_thread_priority(thread_id thread, int32 newPriority)
3394 {
3395 	// TODO: Don't allow setting priority of kernel threads!
3396 	return set_thread_priority(thread, newPriority);
3397 }
3398 
3399 
3400 thread_id
3401 _user_spawn_thread(thread_creation_attributes* userAttributes)
3402 {
3403 	// copy the userland structure to the kernel
3404 	char nameBuffer[B_OS_NAME_LENGTH];
3405 	ThreadCreationAttributes attributes;
3406 	status_t error = attributes.InitFromUserAttributes(userAttributes,
3407 		nameBuffer);
3408 	if (error != B_OK)
3409 		return error;
3410 
3411 	// create the thread
3412 	thread_id threadID = thread_create_thread(attributes, false);
3413 
3414 	if (threadID >= 0)
3415 		user_debug_thread_created(threadID);
3416 
3417 	return threadID;
3418 }
3419 
3420 
3421 status_t
3422 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
3423 	bigtime_t* userRemainingTime)
3424 {
3425 	// We need to store more syscall restart parameters than usual and need a
3426 	// somewhat different handling. Hence we can't use
3427 	// syscall_restart_handle_timeout_pre() but do the job ourselves.
3428 	struct restart_parameters {
3429 		bigtime_t	timeout;
3430 		clockid_t	timebase;
3431 		uint32		flags;
3432 	};
3433 
3434 	Thread* thread = thread_get_current_thread();
3435 
3436 	if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) {
3437 		// The syscall was restarted. Fetch the parameters from the stored
3438 		// restart parameters.
3439 		restart_parameters* restartParameters
3440 			= (restart_parameters*)thread->syscall_restart.parameters;
3441 		timeout = restartParameters->timeout;
3442 		timebase = restartParameters->timebase;
3443 		flags = restartParameters->flags;
3444 	} else {
3445 		// convert relative timeouts to absolute ones
3446 		if ((flags & B_RELATIVE_TIMEOUT) != 0) {
3447 			// not restarted yet and the flags indicate a relative timeout
3448 
3449 			// Make sure we use the system time base, so real-time clock changes
3450 			// won't affect our wait.
3451 			flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE;
3452 			if (timebase == CLOCK_REALTIME)
3453 				timebase = CLOCK_MONOTONIC;
3454 
3455 			// get the current time and make the timeout absolute
3456 			bigtime_t now;
3457 			status_t error = user_timer_get_clock(timebase, now);
3458 			if (error != B_OK)
3459 				return error;
3460 
3461 			timeout += now;
3462 
3463 			// deal with overflow
3464 			if (timeout < 0)
3465 				timeout = B_INFINITE_TIMEOUT;
3466 
3467 			flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
3468 		} else
3469 			flags |= B_ABSOLUTE_TIMEOUT;
3470 	}
3471 
3472 	// snooze
3473 	bigtime_t remainingTime;
3474 	status_t error = common_snooze_etc(timeout, timebase,
3475 		flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION,
3476 		userRemainingTime != NULL ? &remainingTime : NULL);
3477 
3478 	// If interrupted, copy the remaining time back to userland and prepare the
3479 	// syscall restart.
3480 	if (error == B_INTERRUPTED) {
3481 		if (userRemainingTime != NULL
3482 			&& (!IS_USER_ADDRESS(userRemainingTime)
3483 				|| user_memcpy(userRemainingTime, &remainingTime,
3484 					sizeof(remainingTime)) != B_OK)) {
3485 			return B_BAD_ADDRESS;
3486 		}
3487 
3488 		// store the normalized values in the restart parameters
3489 		restart_parameters* restartParameters
3490 			= (restart_parameters*)thread->syscall_restart.parameters;
3491 		restartParameters->timeout = timeout;
3492 		restartParameters->timebase = timebase;
3493 		restartParameters->flags = flags;
3494 
3495 		// restart the syscall, if possible
3496 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
3497 	}
3498 
3499 	return error;
3500 }
3501 
3502 
3503 void
3504 _user_thread_yield(void)
3505 {
3506 	thread_yield(true);
3507 }
3508 
3509 
3510 status_t
3511 _user_get_thread_info(thread_id id, thread_info *userInfo)
3512 {
3513 	thread_info info;
3514 	status_t status;
3515 
3516 	if (!IS_USER_ADDRESS(userInfo))
3517 		return B_BAD_ADDRESS;
3518 
3519 	status = _get_thread_info(id, &info, sizeof(thread_info));
3520 
3521 	if (status >= B_OK
3522 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3523 		return B_BAD_ADDRESS;
3524 
3525 	return status;
3526 }
3527 
3528 
3529 status_t
3530 _user_get_next_thread_info(team_id team, int32 *userCookie,
3531 	thread_info *userInfo)
3532 {
3533 	status_t status;
3534 	thread_info info;
3535 	int32 cookie;
3536 
3537 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
3538 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3539 		return B_BAD_ADDRESS;
3540 
3541 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
3542 	if (status < B_OK)
3543 		return status;
3544 
3545 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3546 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3547 		return B_BAD_ADDRESS;
3548 
3549 	return status;
3550 }
3551 
3552 
3553 thread_id
3554 _user_find_thread(const char *userName)
3555 {
3556 	char name[B_OS_NAME_LENGTH];
3557 
3558 	if (userName == NULL)
3559 		return find_thread(NULL);
3560 
3561 	if (!IS_USER_ADDRESS(userName)
3562 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
3563 		return B_BAD_ADDRESS;
3564 
3565 	return find_thread(name);
3566 }
3567 
3568 
3569 status_t
3570 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
3571 {
3572 	status_t returnCode;
3573 	status_t status;
3574 
3575 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
3576 		return B_BAD_ADDRESS;
3577 
3578 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
3579 
3580 	if (status == B_OK && userReturnCode != NULL
3581 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
3582 		return B_BAD_ADDRESS;
3583 	}
3584 
3585 	return syscall_restart_handle_post(status);
3586 }
3587 
3588 
3589 bool
3590 _user_has_data(thread_id thread)
3591 {
3592 	return has_data(thread);
3593 }
3594 
3595 
3596 status_t
3597 _user_send_data(thread_id thread, int32 code, const void *buffer,
3598 	size_t bufferSize)
3599 {
3600 	if (!IS_USER_ADDRESS(buffer))
3601 		return B_BAD_ADDRESS;
3602 
3603 	return send_data_etc(thread, code, buffer, bufferSize,
3604 		B_KILL_CAN_INTERRUPT);
3605 		// supports userland buffers
3606 }
3607 
3608 
3609 status_t
3610 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
3611 {
3612 	thread_id sender;
3613 	status_t code;
3614 
3615 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
3616 		|| !IS_USER_ADDRESS(buffer))
3617 		return B_BAD_ADDRESS;
3618 
3619 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
3620 		// supports userland buffers
3621 
3622 	if (_userSender != NULL)
3623 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
3624 			return B_BAD_ADDRESS;
3625 
3626 	return code;
3627 }
3628 
3629 
3630 status_t
3631 _user_block_thread(uint32 flags, bigtime_t timeout)
3632 {
3633 	syscall_restart_handle_timeout_pre(flags, timeout);
3634 	flags |= B_CAN_INTERRUPT;
3635 
3636 	Thread* thread = thread_get_current_thread();
3637 	ThreadLocker threadLocker(thread);
3638 
3639 	// check, if already done
3640 	if (thread->user_thread->wait_status <= 0)
3641 		return thread->user_thread->wait_status;
3642 
3643 	// nope, so wait
3644 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
3645 
3646 	threadLocker.Unlock();
3647 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3648 
3649 	status_t status = thread_block_with_timeout_locked(flags, timeout);
3650 
3651 	schedulerLocker.Unlock();
3652 	threadLocker.Lock();
3653 
3654 	// Interruptions or timeouts can race with other threads unblocking us.
3655 	// Favor a wake-up by another thread, i.e. if someone changed the wait
3656 	// status, use that.
3657 	status_t oldStatus = thread->user_thread->wait_status;
3658 	if (oldStatus > 0)
3659 		thread->user_thread->wait_status = status;
3660 	else
3661 		status = oldStatus;
3662 
3663 	threadLocker.Unlock();
3664 
3665 	return syscall_restart_handle_timeout_post(status, timeout);
3666 }
3667 
3668 
3669 status_t
3670 _user_unblock_thread(thread_id threadID, status_t status)
3671 {
3672 	status_t error = user_unblock_thread(threadID, status);
3673 
3674 	if (error == B_OK)
3675 		scheduler_reschedule_if_necessary();
3676 
3677 	return error;
3678 }
3679 
3680 
3681 status_t
3682 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3683 {
3684 	enum {
3685 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3686 	};
3687 
3688 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3689 		return B_BAD_ADDRESS;
3690 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3691 		return B_BAD_VALUE;
3692 
3693 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3694 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3695 		return B_BAD_ADDRESS;
3696 
3697 	for (uint32 i = 0; i < count; i++)
3698 		user_unblock_thread(threads[i], status);
3699 
3700 	scheduler_reschedule_if_necessary();
3701 
3702 	return B_OK;
3703 }
3704 
3705 
3706 // TODO: the following two functions don't belong here
3707 
3708 
3709 int
3710 _user_getrlimit(int resource, struct rlimit *urlp)
3711 {
3712 	struct rlimit rl;
3713 	int ret;
3714 
3715 	if (urlp == NULL)
3716 		return EINVAL;
3717 
3718 	if (!IS_USER_ADDRESS(urlp))
3719 		return B_BAD_ADDRESS;
3720 
3721 	ret = common_getrlimit(resource, &rl);
3722 
3723 	if (ret == 0) {
3724 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3725 		if (ret < 0)
3726 			return ret;
3727 
3728 		return 0;
3729 	}
3730 
3731 	return ret;
3732 }
3733 
3734 
3735 int
3736 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3737 {
3738 	struct rlimit resourceLimit;
3739 
3740 	if (userResourceLimit == NULL)
3741 		return EINVAL;
3742 
3743 	if (!IS_USER_ADDRESS(userResourceLimit)
3744 		|| user_memcpy(&resourceLimit, userResourceLimit,
3745 			sizeof(struct rlimit)) < B_OK)
3746 		return B_BAD_ADDRESS;
3747 
3748 	return common_setrlimit(resource, &resourceLimit);
3749 }
3750