xref: /haiku/src/system/kernel/thread.cpp (revision 23338ed551920aae841646afa77530c41efb42c8)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <Notifications.h>
34 #include <real_time_clock.h>
35 #include <smp.h>
36 #include <syscalls.h>
37 #include <syscall_restart.h>
38 #include <team.h>
39 #include <tls.h>
40 #include <user_runtime.h>
41 #include <user_thread.h>
42 #include <vfs.h>
43 #include <vm.h>
44 #include <vm_address_space.h>
45 #include <wait_for_objects.h>
46 
47 
48 //#define TRACE_THREAD
49 #ifdef TRACE_THREAD
50 #	define TRACE(x) dprintf x
51 #else
52 #	define TRACE(x) ;
53 #endif
54 
55 
56 #define THREAD_MAX_MESSAGE_SIZE		65536
57 
58 
59 struct thread_key {
60 	thread_id id;
61 };
62 
63 // global
64 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
65 
66 // thread list
67 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
68 static hash_table *sThreadHash = NULL;
69 static thread_id sNextThreadID = 1;
70 
71 // some arbitrary chosen limits - should probably depend on the available
72 // memory (the limit is not yet enforced)
73 static int32 sMaxThreads = 4096;
74 static int32 sUsedThreads = 0;
75 
76 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
77 	struct thread*	thread;
78 	team_id			teamID;
79 	sem_id			deathSem;
80 
81 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
82 		:
83 		thread(thread),
84 		teamID(teamID),
85 		deathSem(deathSem)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!
127 	Inserts a thread into a team.
128 	You must hold the team lock when you call this function.
129 */
130 static void
131 insert_thread_into_team(struct team *team, struct thread *thread)
132 {
133 	thread->team_next = team->thread_list;
134 	team->thread_list = thread;
135 	team->num_threads++;
136 
137 	if (team->num_threads == 1) {
138 		// this was the first thread
139 		team->main_thread = thread;
140 	}
141 	thread->team = team;
142 }
143 
144 
145 /*!
146 	Removes a thread from a team.
147 	You must hold the team lock when you call this function.
148 */
149 static void
150 remove_thread_from_team(struct team *team, struct thread *thread)
151 {
152 	struct thread *temp, *last = NULL;
153 
154 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
155 		if (temp == thread) {
156 			if (last == NULL)
157 				team->thread_list = temp->team_next;
158 			else
159 				last->team_next = temp->team_next;
160 
161 			team->num_threads--;
162 			break;
163 		}
164 		last = temp;
165 	}
166 }
167 
168 
169 static int
170 thread_struct_compare(void *_t, const void *_key)
171 {
172 	struct thread *thread = (struct thread*)_t;
173 	const struct thread_key *key = (const struct thread_key*)_key;
174 
175 	if (thread->id == key->id)
176 		return 0;
177 
178 	return 1;
179 }
180 
181 
182 static uint32
183 thread_struct_hash(void *_t, const void *_key, uint32 range)
184 {
185 	struct thread *thread = (struct thread*)_t;
186 	const struct thread_key *key = (const struct thread_key*)_key;
187 
188 	if (thread != NULL)
189 		return thread->id % range;
190 
191 	return (uint32)key->id % range;
192 }
193 
194 
195 static void
196 reset_signals(struct thread *thread)
197 {
198 	thread->sig_pending = 0;
199 	thread->sig_block_mask = 0;
200 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
201 	thread->signal_stack_base = 0;
202 	thread->signal_stack_size = 0;
203 	thread->signal_stack_enabled = false;
204 }
205 
206 
207 /*!
208 	Allocates and fills in thread structure (or reuses one from the
209 	dead queue).
210 
211 	\param threadID The ID to be assigned to the new thread. If
212 		  \code < 0 \endcode a fresh one is allocated.
213 	\param thread initialize this thread struct if nonnull
214 */
215 
216 static struct thread *
217 create_thread_struct(struct thread *inthread, const char *name,
218 	thread_id threadID, struct cpu_ent *cpu)
219 {
220 	struct thread *thread;
221 	cpu_status state;
222 	char temp[64];
223 	bool recycled = false;
224 
225 	if (inthread == NULL) {
226 		// try to recycle one from the dead queue first
227 		state = disable_interrupts();
228 		GRAB_THREAD_LOCK();
229 		thread = thread_dequeue(&dead_q);
230 		RELEASE_THREAD_LOCK();
231 		restore_interrupts(state);
232 
233 		// if not, create a new one
234 		if (thread == NULL) {
235 			thread = (struct thread *)malloc(sizeof(struct thread));
236 			if (thread == NULL)
237 				return NULL;
238 		} else {
239 			recycled = true;
240 		}
241 	} else {
242 		thread = inthread;
243 	}
244 
245 	if (!recycled)
246 		scheduler_on_thread_create(thread);
247 
248 	if (name != NULL)
249 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
250 	else
251 		strcpy(thread->name, "unnamed thread");
252 
253 	thread->flags = 0;
254 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
255 	thread->team = NULL;
256 	thread->cpu = cpu;
257 	thread->previous_cpu = NULL;
258 	thread->pinned_to_cpu = 0;
259 	thread->keep_scheduled = 0;
260 	thread->fault_handler = 0;
261 	thread->page_faults_allowed = 1;
262 	thread->kernel_stack_area = -1;
263 	thread->kernel_stack_base = 0;
264 	thread->user_stack_area = -1;
265 	thread->user_stack_base = 0;
266 	thread->user_local_storage = 0;
267 	thread->kernel_errno = 0;
268 	thread->team_next = NULL;
269 	thread->queue_next = NULL;
270 	thread->priority = thread->next_priority = -1;
271 	thread->io_priority = -1;
272 	thread->args1 = NULL;  thread->args2 = NULL;
273 	thread->alarm.period = 0;
274 	reset_signals(thread);
275 	thread->in_kernel = true;
276 	thread->was_yielded = false;
277 	thread->user_time = 0;
278 	thread->kernel_time = 0;
279 	thread->last_time = 0;
280 	thread->exit.status = 0;
281 	thread->exit.reason = 0;
282 	thread->exit.signal = 0;
283 	list_init(&thread->exit.waiters);
284 	thread->select_infos = NULL;
285 	thread->post_interrupt_callback = NULL;
286 	thread->post_interrupt_data = NULL;
287 	thread->user_thread = NULL;
288 
289 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
290 	thread->exit.sem = create_sem(0, temp);
291 	if (thread->exit.sem < B_OK)
292 		goto err1;
293 
294 	sprintf(temp, "%s send", thread->name);
295 	thread->msg.write_sem = create_sem(1, temp);
296 	if (thread->msg.write_sem < B_OK)
297 		goto err2;
298 
299 	sprintf(temp, "%s receive", thread->name);
300 	thread->msg.read_sem = create_sem(0, temp);
301 	if (thread->msg.read_sem < B_OK)
302 		goto err3;
303 
304 	if (arch_thread_init_thread_struct(thread) < B_OK)
305 		goto err4;
306 
307 	return thread;
308 
309 err4:
310 	delete_sem(thread->msg.read_sem);
311 err3:
312 	delete_sem(thread->msg.write_sem);
313 err2:
314 	delete_sem(thread->exit.sem);
315 err1:
316 	// ToDo: put them in the dead queue instead?
317 	if (inthread == NULL) {
318 		free(thread);
319 		scheduler_on_thread_destroy(thread);
320 	}
321 	return NULL;
322 }
323 
324 
325 static void
326 delete_thread_struct(struct thread *thread)
327 {
328 	delete_sem(thread->exit.sem);
329 	delete_sem(thread->msg.write_sem);
330 	delete_sem(thread->msg.read_sem);
331 
332 	scheduler_on_thread_destroy(thread);
333 
334 	// ToDo: put them in the dead queue instead?
335 	free(thread);
336 }
337 
338 
339 /*! This function gets run by a new thread before anything else */
340 static void
341 thread_kthread_entry(void)
342 {
343 	struct thread *thread = thread_get_current_thread();
344 
345 	// The thread is new and has been scheduled the first time. Notify the user
346 	// debugger code.
347 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
348 		user_debug_thread_scheduled(thread);
349 
350 	// simulates the thread spinlock release that would occur if the thread had been
351 	// rescheded from. The resched didn't happen because the thread is new.
352 	RELEASE_THREAD_LOCK();
353 
354 	// start tracking time
355 	thread->last_time = system_time();
356 
357 	enable_interrupts(); // this essentially simulates a return-from-interrupt
358 }
359 
360 
361 static void
362 thread_kthread_exit(void)
363 {
364 	struct thread *thread = thread_get_current_thread();
365 
366 	thread->exit.reason = THREAD_RETURN_EXIT;
367 	thread_exit();
368 }
369 
370 
371 /*!
372 	Initializes the thread and jumps to its userspace entry point.
373 	This function is called at creation time of every user thread,
374 	but not for a team's main thread.
375 */
376 static int
377 _create_user_thread_kentry(void)
378 {
379 	struct thread *thread = thread_get_current_thread();
380 
381 	// jump to the entry point in user space
382 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
383 		thread->args1, thread->args2);
384 
385 	// only get here if the above call fails
386 	return 0;
387 }
388 
389 
390 /*! Initializes the thread and calls it kernel space entry point. */
391 static int
392 _create_kernel_thread_kentry(void)
393 {
394 	struct thread *thread = thread_get_current_thread();
395 	int (*func)(void *args) = (int (*)(void *))thread->entry;
396 
397 	// call the entry function with the appropriate args
398 	return func(thread->args1);
399 }
400 
401 
402 /*!
403 	Creates a new thread in the team with the specified team ID.
404 
405 	\param threadID The ID to be assigned to the new thread. If
406 		  \code < 0 \endcode a fresh one is allocated.
407 */
408 static thread_id
409 create_thread(thread_creation_attributes& attributes, bool kernel)
410 {
411 	struct thread *thread, *currentThread;
412 	struct team *team;
413 	cpu_status state;
414 	char stack_name[B_OS_NAME_LENGTH];
415 	status_t status;
416 	bool abort = false;
417 	bool debugNewThread = false;
418 
419 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
420 		attributes.thread, kernel ? "kernel" : "user"));
421 
422 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
423 		NULL);
424 	if (thread == NULL)
425 		return B_NO_MEMORY;
426 
427 	thread->priority = attributes.priority == -1
428 		? B_NORMAL_PRIORITY : attributes.priority;
429 	thread->next_priority = thread->priority;
430 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
431 	thread->state = B_THREAD_SUSPENDED;
432 	thread->next_state = B_THREAD_SUSPENDED;
433 
434 	// init debug structure
435 	init_thread_debug_info(&thread->debug_info);
436 
437 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
438 		thread->id);
439 	thread->kernel_stack_area = create_area(stack_name,
440 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
441 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
442 		B_FULL_LOCK,
443 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
444 
445 	if (thread->kernel_stack_area < 0) {
446 		// we're not yet part of a team, so we can just bail out
447 		status = thread->kernel_stack_area;
448 
449 		dprintf("create_thread: error creating kernel stack: %s!\n",
450 			strerror(status));
451 
452 		delete_thread_struct(thread);
453 		return status;
454 	}
455 
456 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
457 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
458 
459 	state = disable_interrupts();
460 	GRAB_THREAD_LOCK();
461 
462 	// If the new thread belongs to the same team as the current thread,
463 	// it may inherit some of the thread debug flags.
464 	currentThread = thread_get_current_thread();
465 	if (currentThread && currentThread->team->id == attributes.team) {
466 		// inherit all user flags...
467 		int32 debugFlags = currentThread->debug_info.flags
468 			& B_THREAD_DEBUG_USER_FLAG_MASK;
469 
470 		// ... save the syscall tracing flags, unless explicitely specified
471 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
472 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
473 				| B_THREAD_DEBUG_POST_SYSCALL);
474 		}
475 
476 		thread->debug_info.flags = debugFlags;
477 
478 		// stop the new thread, if desired
479 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
480 	}
481 
482 	// insert into global list
483 	hash_insert(sThreadHash, thread);
484 	sUsedThreads++;
485 	scheduler_on_thread_init(thread);
486 	RELEASE_THREAD_LOCK();
487 
488 	GRAB_TEAM_LOCK();
489 	// look at the team, make sure it's not being deleted
490 	team = team_get_team_struct_locked(attributes.team);
491 
492 	if (team == NULL || team->state == TEAM_STATE_DEATH)
493 		abort = true;
494 
495 	if (!abort && !kernel) {
496 		thread->user_thread = team_allocate_user_thread(team);
497 		abort = thread->user_thread == NULL;
498 	}
499 
500 	if (!abort) {
501 		// Debug the new thread, if the parent thread required that (see above),
502 		// or the respective global team debug flag is set. But only, if a
503 		// debugger is installed for the team.
504 		debugNewThread |= (atomic_get(&team->debug_info.flags)
505 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
506 		if (debugNewThread
507 			&& (atomic_get(&team->debug_info.flags)
508 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
509 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
510 		}
511 
512 		insert_thread_into_team(team, thread);
513 	}
514 
515 	RELEASE_TEAM_LOCK();
516 	if (abort) {
517 		GRAB_THREAD_LOCK();
518 		hash_remove(sThreadHash, thread);
519 		RELEASE_THREAD_LOCK();
520 	}
521 	restore_interrupts(state);
522 	if (abort) {
523 		delete_area(thread->kernel_stack_area);
524 		delete_thread_struct(thread);
525 		return B_BAD_TEAM_ID;
526 	}
527 
528 	thread->args1 = attributes.args1;
529 	thread->args2 = attributes.args2;
530 	thread->entry = attributes.entry;
531 	status = thread->id;
532 
533 	// notify listeners
534 	sNotificationService.Notify(THREAD_ADDED, thread);
535 
536 	if (kernel) {
537 		// this sets up an initial kthread stack that runs the entry
538 
539 		// Note: whatever function wants to set up a user stack later for this
540 		// thread must initialize the TLS for it
541 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
542 			&thread_kthread_entry, &thread_kthread_exit);
543 	} else {
544 		// create user stack
545 
546 		// the stack will be between USER_STACK_REGION and the main thread stack
547 		// area (the user stack of the main thread is created in
548 		// team_create_team())
549 		if (attributes.stack_address == NULL) {
550 			thread->user_stack_base = USER_STACK_REGION;
551 			if (attributes.stack_size <= 0)
552 				thread->user_stack_size = USER_STACK_SIZE;
553 			else
554 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
555 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
556 
557 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
558 				attributes.name, thread->id);
559 			thread->user_stack_area = create_area_etc(team->id, stack_name,
560 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
561 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
562 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
563 			if (thread->user_stack_area < B_OK
564 				|| arch_thread_init_tls(thread) < B_OK) {
565 				// great, we have a fully running thread without a (usable)
566 				// stack
567 				dprintf("create_thread: unable to create proper user stack!\n");
568 				status = thread->user_stack_area;
569 				kill_thread(thread->id);
570 			}
571 		} else {
572 			thread->user_stack_base = (addr_t)attributes.stack_address;
573 			thread->user_stack_size = attributes.stack_size;
574 		}
575 
576 		user_debug_update_new_thread_flags(thread->id);
577 
578 		// copy the user entry over to the args field in the thread struct
579 		// the function this will call will immediately switch the thread into
580 		// user space.
581 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
582 			&thread_kthread_entry, &thread_kthread_exit);
583 	}
584 
585 	return status;
586 }
587 
588 
589 static status_t
590 undertaker(void* /*args*/)
591 {
592 	while (true) {
593 		// wait for a thread to bury
594 		InterruptsSpinLocker locker(gThreadSpinlock);
595 
596 		while (sUndertakerEntries.IsEmpty()) {
597 			ConditionVariableEntry conditionEntry;
598 			sUndertakerCondition.Add(&conditionEntry);
599 			locker.Unlock();
600 
601 			conditionEntry.Wait();
602 
603 			locker.Lock();
604 		}
605 
606 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
607 		locker.Unlock();
608 
609 		UndertakerEntry entry = *_entry;
610 			// we need a copy, since the original entry is on the thread's stack
611 
612 		// we've got an entry
613 		struct thread* thread = entry.thread;
614 
615 		// delete the old kernel stack area
616 		delete_area(thread->kernel_stack_area);
617 
618 		// remove this thread from all of the global lists
619 		disable_interrupts();
620 		GRAB_TEAM_LOCK();
621 
622 		remove_thread_from_team(team_get_kernel_team(), thread);
623 
624 		RELEASE_TEAM_LOCK();
625 		enable_interrupts();
626 			// needed for the debugger notification below
627 
628 		if (entry.deathSem >= 0)
629 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
630 
631 		// free the thread structure
632 		locker.Lock();
633 		thread_enqueue(thread, &dead_q);
634 			// TODO: Use the slab allocator!
635 	}
636 
637 	// never can get here
638 	return B_OK;
639 }
640 
641 
642 static sem_id
643 get_thread_wait_sem(struct thread* thread)
644 {
645 	if (thread->state == B_THREAD_WAITING
646 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
647 		return (sem_id)(addr_t)thread->wait.object;
648 	}
649 	return -1;
650 }
651 
652 
653 /*!
654 	Fills the thread_info structure with information from the specified
655 	thread.
656 	The thread lock must be held when called.
657 */
658 static void
659 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
660 {
661 	info->thread = thread->id;
662 	info->team = thread->team->id;
663 
664 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
665 
666 	if (thread->state == B_THREAD_WAITING) {
667 		info->state = B_THREAD_WAITING;
668 
669 		switch (thread->wait.type) {
670 			case THREAD_BLOCK_TYPE_SNOOZE:
671 				info->state = B_THREAD_ASLEEP;
672 				break;
673 
674 			case THREAD_BLOCK_TYPE_SEMAPHORE:
675 			{
676 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
677 				if (sem == thread->msg.read_sem)
678 					info->state = B_THREAD_RECEIVING;
679 				break;
680 			}
681 
682 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
683 			default:
684 				break;
685 		}
686 	} else
687 		info->state = (thread_state)thread->state;
688 
689 	info->priority = thread->priority;
690 	info->user_time = thread->user_time;
691 	info->kernel_time = thread->kernel_time;
692 	info->stack_base = (void *)thread->user_stack_base;
693 	info->stack_end = (void *)(thread->user_stack_base
694 		+ thread->user_stack_size);
695 	info->sem = get_thread_wait_sem(thread);
696 }
697 
698 static status_t
699 send_data_etc(thread_id id, int32 code, const void *buffer,
700 	size_t bufferSize, int32 flags)
701 {
702 	struct thread *target;
703 	sem_id cachedSem;
704 	cpu_status state;
705 	status_t status;
706 	cbuf *data;
707 
708 	state = disable_interrupts();
709 	GRAB_THREAD_LOCK();
710 	target = thread_get_thread_struct_locked(id);
711 	if (!target) {
712 		RELEASE_THREAD_LOCK();
713 		restore_interrupts(state);
714 		return B_BAD_THREAD_ID;
715 	}
716 	cachedSem = target->msg.write_sem;
717 	RELEASE_THREAD_LOCK();
718 	restore_interrupts(state);
719 
720 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
721 		return B_NO_MEMORY;
722 
723 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
724 	if (status == B_INTERRUPTED) {
725 		// We got interrupted by a signal
726 		return status;
727 	}
728 	if (status != B_OK) {
729 		// Any other acquisition problems may be due to thread deletion
730 		return B_BAD_THREAD_ID;
731 	}
732 
733 	if (bufferSize > 0) {
734 		data = cbuf_get_chain(bufferSize);
735 		if (data == NULL)
736 			return B_NO_MEMORY;
737 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
738 		if (status < B_OK) {
739 			cbuf_free_chain(data);
740 			return B_NO_MEMORY;
741 		}
742 	} else
743 		data = NULL;
744 
745 	state = disable_interrupts();
746 	GRAB_THREAD_LOCK();
747 
748 	// The target thread could have been deleted at this point
749 	target = thread_get_thread_struct_locked(id);
750 	if (target == NULL) {
751 		RELEASE_THREAD_LOCK();
752 		restore_interrupts(state);
753 		cbuf_free_chain(data);
754 		return B_BAD_THREAD_ID;
755 	}
756 
757 	// Save message informations
758 	target->msg.sender = thread_get_current_thread()->id;
759 	target->msg.code = code;
760 	target->msg.size = bufferSize;
761 	target->msg.buffer = data;
762 	cachedSem = target->msg.read_sem;
763 
764 	RELEASE_THREAD_LOCK();
765 	restore_interrupts(state);
766 
767 	release_sem(cachedSem);
768 	return B_OK;
769 }
770 
771 
772 static int32
773 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
774 	int32 flags)
775 {
776 	struct thread *thread = thread_get_current_thread();
777 	status_t status;
778 	size_t size;
779 	int32 code;
780 
781 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
782 	if (status < B_OK) {
783 		// Actually, we're not supposed to return error codes
784 		// but since the only reason this can fail is that we
785 		// were killed, it's probably okay to do so (but also
786 		// meaningless).
787 		return status;
788 	}
789 
790 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
791 		size = min_c(bufferSize, thread->msg.size);
792 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
793 			0, size);
794 		if (status < B_OK) {
795 			cbuf_free_chain(thread->msg.buffer);
796 			release_sem(thread->msg.write_sem);
797 			return status;
798 		}
799 	}
800 
801 	*_sender = thread->msg.sender;
802 	code = thread->msg.code;
803 
804 	cbuf_free_chain(thread->msg.buffer);
805 	release_sem(thread->msg.write_sem);
806 
807 	return code;
808 }
809 
810 
811 static status_t
812 common_getrlimit(int resource, struct rlimit * rlp)
813 {
814 	if (!rlp)
815 		return B_BAD_ADDRESS;
816 
817 	switch (resource) {
818 		case RLIMIT_NOFILE:
819 		case RLIMIT_NOVMON:
820 			return vfs_getrlimit(resource, rlp);
821 
822 		case RLIMIT_CORE:
823 			rlp->rlim_cur = 0;
824 			rlp->rlim_max = 0;
825 			return B_OK;
826 
827 		case RLIMIT_STACK:
828 		{
829 			struct thread *thread = thread_get_current_thread();
830 			if (!thread)
831 				return B_ERROR;
832 			rlp->rlim_cur = thread->user_stack_size;
833 			rlp->rlim_max = thread->user_stack_size;
834 			return B_OK;
835 		}
836 
837 		default:
838 			return EINVAL;
839 	}
840 
841 	return B_OK;
842 }
843 
844 
845 static status_t
846 common_setrlimit(int resource, const struct rlimit * rlp)
847 {
848 	if (!rlp)
849 		return B_BAD_ADDRESS;
850 
851 	switch (resource) {
852 		case RLIMIT_NOFILE:
853 		case RLIMIT_NOVMON:
854 			return vfs_setrlimit(resource, rlp);
855 
856 		case RLIMIT_CORE:
857 			// We don't support core file, so allow settings to 0/0 only.
858 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
859 				return EINVAL;
860 			return B_OK;
861 
862 		default:
863 			return EINVAL;
864 	}
865 
866 	return B_OK;
867 }
868 
869 
870 //	#pragma mark - debugger calls
871 
872 
873 static int
874 make_thread_unreal(int argc, char **argv)
875 {
876 	struct thread *thread;
877 	struct hash_iterator i;
878 	int32 id = -1;
879 
880 	if (argc > 2) {
881 		print_debugger_command_usage(argv[0]);
882 		return 0;
883 	}
884 
885 	if (argc > 1)
886 		id = strtoul(argv[1], NULL, 0);
887 
888 	hash_open(sThreadHash, &i);
889 
890 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
891 		if (id != -1 && thread->id != id)
892 			continue;
893 
894 		if (thread->priority > B_DISPLAY_PRIORITY) {
895 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
896 			kprintf("thread %ld made unreal\n", thread->id);
897 		}
898 	}
899 
900 	hash_close(sThreadHash, &i, false);
901 	return 0;
902 }
903 
904 
905 static int
906 set_thread_prio(int argc, char **argv)
907 {
908 	struct thread *thread;
909 	struct hash_iterator i;
910 	int32 id;
911 	int32 prio;
912 
913 	if (argc > 3 || argc < 2) {
914 		print_debugger_command_usage(argv[0]);
915 		return 0;
916 	}
917 
918 	prio = strtoul(argv[1], NULL, 0);
919 	if (prio > THREAD_MAX_SET_PRIORITY)
920 		prio = THREAD_MAX_SET_PRIORITY;
921 	if (prio < THREAD_MIN_SET_PRIORITY)
922 		prio = THREAD_MIN_SET_PRIORITY;
923 
924 	if (argc > 2)
925 		id = strtoul(argv[2], NULL, 0);
926 	else
927 		id = thread_get_current_thread()->id;
928 
929 	hash_open(sThreadHash, &i);
930 
931 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
932 		if (thread->id != id)
933 			continue;
934 		thread->priority = thread->next_priority = prio;
935 		kprintf("thread %ld set to priority %ld\n", id, prio);
936 		break;
937 	}
938 	if (!thread)
939 		kprintf("thread %ld (%#lx) not found\n", id, id);
940 
941 	hash_close(sThreadHash, &i, false);
942 	return 0;
943 }
944 
945 
946 static int
947 make_thread_suspended(int argc, char **argv)
948 {
949 	struct thread *thread;
950 	struct hash_iterator i;
951 	int32 id;
952 
953 	if (argc > 2) {
954 		print_debugger_command_usage(argv[0]);
955 		return 0;
956 	}
957 
958 	if (argc == 1)
959 		id = thread_get_current_thread()->id;
960 	else
961 		id = strtoul(argv[1], NULL, 0);
962 
963 	hash_open(sThreadHash, &i);
964 
965 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
966 		if (thread->id != id)
967 			continue;
968 
969 		thread->next_state = B_THREAD_SUSPENDED;
970 		kprintf("thread %ld suspended\n", id);
971 		break;
972 	}
973 	if (!thread)
974 		kprintf("thread %ld (%#lx) not found\n", id, id);
975 
976 	hash_close(sThreadHash, &i, false);
977 	return 0;
978 }
979 
980 
981 static int
982 make_thread_resumed(int argc, char **argv)
983 {
984 	struct thread *thread;
985 	struct hash_iterator i;
986 	int32 id;
987 
988 	if (argc != 2) {
989 		print_debugger_command_usage(argv[0]);
990 		return 0;
991 	}
992 
993 	// force user to enter a thread id, as using
994 	// the current thread is usually not intended
995 	id = strtoul(argv[1], NULL, 0);
996 
997 	hash_open(sThreadHash, &i);
998 
999 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1000 		if (thread->id != id)
1001 			continue;
1002 
1003 		if (thread->state == B_THREAD_SUSPENDED) {
1004 			scheduler_enqueue_in_run_queue(thread);
1005 			kprintf("thread %ld resumed\n", thread->id);
1006 		}
1007 		break;
1008 	}
1009 	if (!thread)
1010 		kprintf("thread %ld (%#lx) not found\n", id, id);
1011 
1012 	hash_close(sThreadHash, &i, false);
1013 	return 0;
1014 }
1015 
1016 
1017 static int
1018 drop_into_debugger(int argc, char **argv)
1019 {
1020 	status_t err;
1021 	int32 id;
1022 
1023 	if (argc > 2) {
1024 		print_debugger_command_usage(argv[0]);
1025 		return 0;
1026 	}
1027 
1028 	if (argc == 1)
1029 		id = thread_get_current_thread()->id;
1030 	else
1031 		id = strtoul(argv[1], NULL, 0);
1032 
1033 	err = _user_debug_thread(id);
1034 	if (err)
1035 		kprintf("drop failed\n");
1036 	else
1037 		kprintf("thread %ld dropped into user debugger\n", id);
1038 
1039 	return 0;
1040 }
1041 
1042 
1043 static const char *
1044 state_to_text(struct thread *thread, int32 state)
1045 {
1046 	switch (state) {
1047 		case B_THREAD_READY:
1048 			return "ready";
1049 
1050 		case B_THREAD_RUNNING:
1051 			return "running";
1052 
1053 		case B_THREAD_WAITING:
1054 		{
1055 			if (thread != NULL) {
1056 				switch (thread->wait.type) {
1057 					case THREAD_BLOCK_TYPE_SNOOZE:
1058 						return "zzz";
1059 
1060 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1061 					{
1062 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1063 						if (sem == thread->msg.read_sem)
1064 							return "receive";
1065 						break;
1066 					}
1067 				}
1068 			}
1069 
1070 			return "waiting";
1071 		}
1072 
1073 		case B_THREAD_SUSPENDED:
1074 			return "suspended";
1075 
1076 		case THREAD_STATE_FREE_ON_RESCHED:
1077 			return "death";
1078 
1079 		default:
1080 			return "UNKNOWN";
1081 	}
1082 }
1083 
1084 
1085 static void
1086 print_thread_list_table_head()
1087 {
1088 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1089 		"  team  name\n");
1090 }
1091 
1092 
1093 static void
1094 _dump_thread_info(struct thread *thread, bool shortInfo)
1095 {
1096 	if (shortInfo) {
1097 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1098 			thread->state));
1099 
1100 		// does it block on a semaphore or a condition variable?
1101 		if (thread->state == B_THREAD_WAITING) {
1102 			switch (thread->wait.type) {
1103 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1104 				{
1105 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1106 					if (sem == thread->msg.read_sem)
1107 						kprintf("                    ");
1108 					else
1109 						kprintf("sem  %12ld   ", sem);
1110 					break;
1111 				}
1112 
1113 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1114 					kprintf("cvar   %p   ", thread->wait.object);
1115 					break;
1116 
1117 				case THREAD_BLOCK_TYPE_SNOOZE:
1118 					kprintf("                    ");
1119 					break;
1120 
1121 				case THREAD_BLOCK_TYPE_SIGNAL:
1122 					kprintf("signal              ");
1123 					break;
1124 
1125 				case THREAD_BLOCK_TYPE_MUTEX:
1126 					kprintf("mutex  %p   ", thread->wait.object);
1127 					break;
1128 
1129 				case THREAD_BLOCK_TYPE_RW_LOCK:
1130 					kprintf("rwlock %p   ", thread->wait.object);
1131 					break;
1132 
1133 				case THREAD_BLOCK_TYPE_OTHER:
1134 					kprintf("other               ");
1135 					break;
1136 
1137 				default:
1138 					kprintf("???    %p   ", thread->wait.object);
1139 					break;
1140 			}
1141 		} else
1142 			kprintf("        -           ");
1143 
1144 		// on which CPU does it run?
1145 		if (thread->cpu)
1146 			kprintf("%2d", thread->cpu->cpu_num);
1147 		else
1148 			kprintf(" -");
1149 
1150 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1151 			(void *)thread->kernel_stack_base, thread->team->id,
1152 			thread->name != NULL ? thread->name : "<NULL>");
1153 
1154 		return;
1155 	}
1156 
1157 	// print the long info
1158 
1159 	struct death_entry *death = NULL;
1160 
1161 	kprintf("THREAD: %p\n", thread);
1162 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1163 	kprintf("name:               \"%s\"\n", thread->name);
1164 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1165 		thread->all_next, thread->team_next, thread->queue_next);
1166 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1167 		thread->next_priority, thread->io_priority);
1168 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1169 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1170 	kprintf("cpu:                %p ", thread->cpu);
1171 	if (thread->cpu)
1172 		kprintf("(%d)\n", thread->cpu->cpu_num);
1173 	else
1174 		kprintf("\n");
1175 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1176 		thread->sig_block_mask);
1177 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1178 
1179 	if (thread->state == B_THREAD_WAITING) {
1180 		kprintf("waiting for:        ");
1181 
1182 		switch (thread->wait.type) {
1183 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1184 			{
1185 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1186 				if (sem == thread->msg.read_sem)
1187 					kprintf("data\n");
1188 				else
1189 					kprintf("semaphore %ld\n", sem);
1190 				break;
1191 			}
1192 
1193 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1194 				kprintf("condition variable %p\n", thread->wait.object);
1195 				break;
1196 
1197 			case THREAD_BLOCK_TYPE_SNOOZE:
1198 				kprintf("snooze()\n");
1199 				break;
1200 
1201 			case THREAD_BLOCK_TYPE_SIGNAL:
1202 				kprintf("signal\n");
1203 				break;
1204 
1205 			case THREAD_BLOCK_TYPE_MUTEX:
1206 				kprintf("mutex %p\n", thread->wait.object);
1207 				break;
1208 
1209 			case THREAD_BLOCK_TYPE_RW_LOCK:
1210 				kprintf("rwlock %p\n", thread->wait.object);
1211 				break;
1212 
1213 			case THREAD_BLOCK_TYPE_OTHER:
1214 				kprintf("other (%s)\n", (char*)thread->wait.object);
1215 				break;
1216 
1217 			default:
1218 				kprintf("unknown (%p)\n", thread->wait.object);
1219 				break;
1220 		}
1221 	}
1222 
1223 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1224 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1225 	kprintf("entry:              %p\n", (void *)thread->entry);
1226 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1227 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1228 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1229 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1230 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1231 	kprintf("  exit.waiters:\n");
1232 	while ((death = (struct death_entry*)list_get_next_item(
1233 			&thread->exit.waiters, death)) != NULL) {
1234 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1235 	}
1236 
1237 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1238 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1239 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1240 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1241 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1242 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1243 		strerror(thread->kernel_errno));
1244 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1245 	kprintf("user_time:          %Ld\n", thread->user_time);
1246 	kprintf("flags:              0x%lx\n", thread->flags);
1247 	kprintf("architecture dependant section:\n");
1248 	arch_thread_dump_info(&thread->arch_info);
1249 }
1250 
1251 
1252 static int
1253 dump_thread_info(int argc, char **argv)
1254 {
1255 	bool shortInfo = false;
1256 	int argi = 1;
1257 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1258 		shortInfo = true;
1259 		print_thread_list_table_head();
1260 		argi++;
1261 	}
1262 
1263 	if (argi == argc) {
1264 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1265 		return 0;
1266 	}
1267 
1268 	for (; argi < argc; argi++) {
1269 		const char *name = argv[argi];
1270 		int32 id = strtoul(name, NULL, 0);
1271 
1272 		if (IS_KERNEL_ADDRESS(id)) {
1273 			// semi-hack
1274 			_dump_thread_info((struct thread *)id, shortInfo);
1275 			continue;
1276 		}
1277 
1278 		// walk through the thread list, trying to match name or id
1279 		bool found = false;
1280 		struct hash_iterator i;
1281 		hash_open(sThreadHash, &i);
1282 		struct thread *thread;
1283 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1284 			if (!strcmp(name, thread->name) || thread->id == id) {
1285 				_dump_thread_info(thread, shortInfo);
1286 				found = true;
1287 				break;
1288 			}
1289 		}
1290 		hash_close(sThreadHash, &i, false);
1291 
1292 		if (!found)
1293 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 
1300 static int
1301 dump_thread_list(int argc, char **argv)
1302 {
1303 	struct thread *thread;
1304 	struct hash_iterator i;
1305 	bool realTimeOnly = false;
1306 	bool calling = false;
1307 	const char *callSymbol = NULL;
1308 	addr_t callStart = 0;
1309 	addr_t callEnd = 0;
1310 	int32 requiredState = 0;
1311 	team_id team = -1;
1312 	sem_id sem = -1;
1313 
1314 	if (!strcmp(argv[0], "realtime"))
1315 		realTimeOnly = true;
1316 	else if (!strcmp(argv[0], "ready"))
1317 		requiredState = B_THREAD_READY;
1318 	else if (!strcmp(argv[0], "running"))
1319 		requiredState = B_THREAD_RUNNING;
1320 	else if (!strcmp(argv[0], "waiting")) {
1321 		requiredState = B_THREAD_WAITING;
1322 
1323 		if (argc > 1) {
1324 			sem = strtoul(argv[1], NULL, 0);
1325 			if (sem == 0)
1326 				kprintf("ignoring invalid semaphore argument.\n");
1327 		}
1328 	} else if (!strcmp(argv[0], "calling")) {
1329 		if (argc < 2) {
1330 			kprintf("Need to give a symbol name or start and end arguments.\n");
1331 			return 0;
1332 		} else if (argc == 3) {
1333 			callStart = parse_expression(argv[1]);
1334 			callEnd = parse_expression(argv[2]);
1335 		} else
1336 			callSymbol = argv[1];
1337 
1338 		calling = true;
1339 	} else if (argc > 1) {
1340 		team = strtoul(argv[1], NULL, 0);
1341 		if (team == 0)
1342 			kprintf("ignoring invalid team argument.\n");
1343 	}
1344 
1345 	print_thread_list_table_head();
1346 
1347 	hash_open(sThreadHash, &i);
1348 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1349 		// filter out threads not matching the search criteria
1350 		if ((requiredState && thread->state != requiredState)
1351 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1352 					callStart, callEnd))
1353 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1354 			|| (team > 0 && thread->team->id != team)
1355 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1356 			continue;
1357 
1358 		_dump_thread_info(thread, true);
1359 	}
1360 	hash_close(sThreadHash, &i, false);
1361 	return 0;
1362 }
1363 
1364 
1365 //	#pragma mark - private kernel API
1366 
1367 
1368 void
1369 thread_exit(void)
1370 {
1371 	cpu_status state;
1372 	struct thread *thread = thread_get_current_thread();
1373 	struct team *team = thread->team;
1374 	thread_id parentID = -1;
1375 	bool deleteTeam = false;
1376 	sem_id cachedDeathSem = -1;
1377 	status_t status;
1378 	struct thread_debug_info debugInfo;
1379 	team_id teamID = team->id;
1380 
1381 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1382 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1383 			? "due to signal" : "normally", thread->exit.status));
1384 
1385 	if (!are_interrupts_enabled())
1386 		panic("thread_exit() called with interrupts disabled!\n");
1387 
1388 	// boost our priority to get this over with
1389 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1390 
1391 	// Cancel previously installed alarm timer, if any
1392 	cancel_timer(&thread->alarm);
1393 
1394 	// delete the user stack area first, we won't need it anymore
1395 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1396 		area_id area = thread->user_stack_area;
1397 		thread->user_stack_area = -1;
1398 		vm_delete_area(team->id, area, true);
1399 	}
1400 
1401 	struct job_control_entry *death = NULL;
1402 	struct death_entry* threadDeathEntry = NULL;
1403 	ConditionVariableEntry waitForDebuggerEntry;
1404 	bool waitForDebugger = false;
1405 
1406 	if (team != team_get_kernel_team()) {
1407 		user_debug_thread_exiting(thread);
1408 
1409 		if (team->main_thread == thread) {
1410 			// this was the main thread in this team, so we will delete that as well
1411 			deleteTeam = true;
1412 		} else {
1413 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1414 			team_free_user_thread(thread);
1415 		}
1416 
1417 		// remove this thread from the current team and add it to the kernel
1418 		// put the thread into the kernel team until it dies
1419 		state = disable_interrupts();
1420 		GRAB_TEAM_LOCK();
1421 		GRAB_THREAD_LOCK();
1422 			// removing the thread and putting its death entry to the parent
1423 			// team needs to be an atomic operation
1424 
1425 		// remember how long this thread lasted
1426 		team->dead_threads_kernel_time += thread->kernel_time;
1427 		team->dead_threads_user_time += thread->user_time;
1428 
1429 		remove_thread_from_team(team, thread);
1430 		insert_thread_into_team(team_get_kernel_team(), thread);
1431 
1432 		cachedDeathSem = team->death_sem;
1433 
1434 		if (deleteTeam) {
1435 			// If a debugger change is in progess for the team, we'll have to
1436 			// wait until it is done later.
1437 			GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1438 			if (team->debug_info.debugger_changed_condition != NULL) {
1439 				team->debug_info.debugger_changed_condition->Add(
1440 					&waitForDebuggerEntry);
1441 				waitForDebugger = true;
1442 			}
1443 			RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1444 
1445 			struct team *parent = team->parent;
1446 
1447 			// remember who our parent was so we can send a signal
1448 			parentID = parent->id;
1449 
1450 			// Set the team job control state to "dead" and detach the job
1451 			// control entry from our team struct.
1452 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1453 			death = team->job_control_entry;
1454 			team->job_control_entry = NULL;
1455 
1456 			if (death != NULL) {
1457 				death->InitDeadState();
1458 
1459 				// team_set_job_control_state() already moved our entry
1460 				// into the parent's list. We just check the soft limit of
1461 				// death entries.
1462 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1463 					death = parent->dead_children->entries.RemoveHead();
1464 					parent->dead_children->count--;
1465 				} else
1466 					death = NULL;
1467 
1468 				RELEASE_THREAD_LOCK();
1469 			} else
1470 				RELEASE_THREAD_LOCK();
1471 
1472 			team_remove_team(team);
1473 
1474 			send_signal_etc(parentID, SIGCHLD,
1475 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1476 		} else {
1477 			// The thread is not the main thread. We store a thread death
1478 			// entry for it, unless someone is already waiting it.
1479 			if (threadDeathEntry != NULL
1480 				&& list_is_empty(&thread->exit.waiters)) {
1481 				threadDeathEntry->thread = thread->id;
1482 				threadDeathEntry->status = thread->exit.status;
1483 				threadDeathEntry->reason = thread->exit.reason;
1484 				threadDeathEntry->signal = thread->exit.signal;
1485 
1486 				// add entry -- remove and old one, if we hit the limit
1487 				list_add_item(&team->dead_threads, threadDeathEntry);
1488 				team->dead_threads_count++;
1489 				threadDeathEntry = NULL;
1490 
1491 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1492 					threadDeathEntry = (death_entry*)list_remove_head_item(
1493 						&team->dead_threads);
1494 					team->dead_threads_count--;
1495 				}
1496 			}
1497 
1498 			RELEASE_THREAD_LOCK();
1499 		}
1500 
1501 		RELEASE_TEAM_LOCK();
1502 
1503 		// swap address spaces, to make sure we're running on the kernel's pgdir
1504 		vm_swap_address_space(team->address_space, vm_kernel_address_space());
1505 		restore_interrupts(state);
1506 
1507 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1508 	}
1509 
1510 	if (threadDeathEntry != NULL)
1511 		free(threadDeathEntry);
1512 
1513 	// delete the team if we're its main thread
1514 	if (deleteTeam) {
1515 		// wait for a debugger change to be finished first
1516 		if (waitForDebugger)
1517 			waitForDebuggerEntry.Wait();
1518 
1519 		team_delete_team(team);
1520 
1521 		// we need to delete any death entry that made it to here
1522 		if (death != NULL)
1523 			delete death;
1524 
1525 		cachedDeathSem = -1;
1526 	}
1527 
1528 	state = disable_interrupts();
1529 	GRAB_THREAD_LOCK();
1530 
1531 	// remove thread from hash, so it's no longer accessible
1532 	hash_remove(sThreadHash, thread);
1533 	sUsedThreads--;
1534 
1535 	// Stop debugging for this thread
1536 	debugInfo = thread->debug_info;
1537 	clear_thread_debug_info(&thread->debug_info, true);
1538 
1539 	// Remove the select infos. We notify them a little later.
1540 	select_info* selectInfos = thread->select_infos;
1541 	thread->select_infos = NULL;
1542 
1543 	RELEASE_THREAD_LOCK();
1544 	restore_interrupts(state);
1545 
1546 	destroy_thread_debug_info(&debugInfo);
1547 
1548 	// notify select infos
1549 	select_info* info = selectInfos;
1550 	while (info != NULL) {
1551 		select_sync* sync = info->sync;
1552 
1553 		notify_select_events(info, B_EVENT_INVALID);
1554 		info = info->next;
1555 		put_select_sync(sync);
1556 	}
1557 
1558 	// notify listeners
1559 	sNotificationService.Notify(THREAD_REMOVED, thread);
1560 
1561 	// shutdown the thread messaging
1562 
1563 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1564 	if (status == B_WOULD_BLOCK) {
1565 		// there is data waiting for us, so let us eat it
1566 		thread_id sender;
1567 
1568 		delete_sem(thread->msg.write_sem);
1569 			// first, let's remove all possibly waiting writers
1570 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1571 	} else {
1572 		// we probably own the semaphore here, and we're the last to do so
1573 		delete_sem(thread->msg.write_sem);
1574 	}
1575 	// now we can safely remove the msg.read_sem
1576 	delete_sem(thread->msg.read_sem);
1577 
1578 	// fill all death entries and delete the sem that others will use to wait on us
1579 	{
1580 		sem_id cachedExitSem = thread->exit.sem;
1581 		cpu_status state;
1582 
1583 		state = disable_interrupts();
1584 		GRAB_THREAD_LOCK();
1585 
1586 		// make sure no one will grab this semaphore again
1587 		thread->exit.sem = -1;
1588 
1589 		// fill all death entries
1590 		death_entry* entry = NULL;
1591 		while ((entry = (struct death_entry*)list_get_next_item(
1592 				&thread->exit.waiters, entry)) != NULL) {
1593 			entry->status = thread->exit.status;
1594 			entry->reason = thread->exit.reason;
1595 			entry->signal = thread->exit.signal;
1596 		}
1597 
1598 		RELEASE_THREAD_LOCK();
1599 		restore_interrupts(state);
1600 
1601 		delete_sem(cachedExitSem);
1602 	}
1603 
1604 	// notify the debugger
1605 	if (teamID != team_get_kernel_team_id())
1606 		user_debug_thread_deleted(teamID, thread->id);
1607 
1608 	// enqueue in the undertaker list and reschedule for the last time
1609 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1610 
1611 	disable_interrupts();
1612 	GRAB_THREAD_LOCK();
1613 
1614 	sUndertakerEntries.Add(&undertakerEntry);
1615 	sUndertakerCondition.NotifyOne(true);
1616 
1617 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1618 	scheduler_reschedule();
1619 
1620 	panic("never can get here\n");
1621 }
1622 
1623 
1624 struct thread *
1625 thread_get_thread_struct(thread_id id)
1626 {
1627 	struct thread *thread;
1628 	cpu_status state;
1629 
1630 	state = disable_interrupts();
1631 	GRAB_THREAD_LOCK();
1632 
1633 	thread = thread_get_thread_struct_locked(id);
1634 
1635 	RELEASE_THREAD_LOCK();
1636 	restore_interrupts(state);
1637 
1638 	return thread;
1639 }
1640 
1641 
1642 struct thread *
1643 thread_get_thread_struct_locked(thread_id id)
1644 {
1645 	struct thread_key key;
1646 
1647 	key.id = id;
1648 
1649 	return (struct thread*)hash_lookup(sThreadHash, &key);
1650 }
1651 
1652 
1653 /*!
1654 	Called in the interrupt handler code when a thread enters
1655 	the kernel for any reason.
1656 	Only tracks time for now.
1657 	Interrupts are disabled.
1658 */
1659 void
1660 thread_at_kernel_entry(bigtime_t now)
1661 {
1662 	struct thread *thread = thread_get_current_thread();
1663 
1664 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1665 
1666 	// track user time
1667 	thread->user_time += now - thread->last_time;
1668 	thread->last_time = now;
1669 
1670 	thread->in_kernel = true;
1671 }
1672 
1673 
1674 /*!
1675 	Called whenever a thread exits kernel space to user space.
1676 	Tracks time, handles signals, ...
1677 	Interrupts must be enabled. When the function returns, interrupts will be
1678 	disabled.
1679 */
1680 void
1681 thread_at_kernel_exit(void)
1682 {
1683 	struct thread *thread = thread_get_current_thread();
1684 
1685 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1686 
1687 	while (handle_signals(thread)) {
1688 		InterruptsSpinLocker _(gThreadSpinlock);
1689 		scheduler_reschedule();
1690 	}
1691 
1692 	disable_interrupts();
1693 
1694 	thread->in_kernel = false;
1695 
1696 	// track kernel time
1697 	bigtime_t now = system_time();
1698 	thread->kernel_time += now - thread->last_time;
1699 	thread->last_time = now;
1700 }
1701 
1702 
1703 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1704 	and no debugging shall be done.
1705 	Interrupts must be disabled.
1706 */
1707 void
1708 thread_at_kernel_exit_no_signals(void)
1709 {
1710 	struct thread *thread = thread_get_current_thread();
1711 
1712 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1713 
1714 	thread->in_kernel = false;
1715 
1716 	// track kernel time
1717 	bigtime_t now = system_time();
1718 	thread->kernel_time += now - thread->last_time;
1719 	thread->last_time = now;
1720 }
1721 
1722 
1723 void
1724 thread_reset_for_exec(void)
1725 {
1726 	struct thread *thread = thread_get_current_thread();
1727 
1728 	cancel_timer(&thread->alarm);
1729 	reset_signals(thread);
1730 }
1731 
1732 
1733 /*! Insert a thread to the tail of a queue */
1734 void
1735 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1736 {
1737 	thread->queue_next = NULL;
1738 	if (queue->head == NULL) {
1739 		queue->head = thread;
1740 		queue->tail = thread;
1741 	} else {
1742 		queue->tail->queue_next = thread;
1743 		queue->tail = thread;
1744 	}
1745 }
1746 
1747 
1748 struct thread *
1749 thread_lookat_queue(struct thread_queue *queue)
1750 {
1751 	return queue->head;
1752 }
1753 
1754 
1755 struct thread *
1756 thread_dequeue(struct thread_queue *queue)
1757 {
1758 	struct thread *thread = queue->head;
1759 
1760 	if (thread != NULL) {
1761 		queue->head = thread->queue_next;
1762 		if (queue->tail == thread)
1763 			queue->tail = NULL;
1764 	}
1765 	return thread;
1766 }
1767 
1768 
1769 struct thread *
1770 thread_dequeue_id(struct thread_queue *q, thread_id id)
1771 {
1772 	struct thread *thread;
1773 	struct thread *last = NULL;
1774 
1775 	thread = q->head;
1776 	while (thread != NULL) {
1777 		if (thread->id == id) {
1778 			if (last == NULL)
1779 				q->head = thread->queue_next;
1780 			else
1781 				last->queue_next = thread->queue_next;
1782 
1783 			if (q->tail == thread)
1784 				q->tail = last;
1785 			break;
1786 		}
1787 		last = thread;
1788 		thread = thread->queue_next;
1789 	}
1790 	return thread;
1791 }
1792 
1793 
1794 struct thread*
1795 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1796 {
1797 	struct hash_iterator iterator;
1798 	hash_open(sThreadHash, &iterator);
1799 
1800 	struct thread* thread;
1801 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1802 			!= NULL) {
1803 		if (callback(thread, cookie))
1804 			break;
1805 	}
1806 
1807 	hash_close(sThreadHash, &iterator, false);
1808 
1809 	return thread;
1810 }
1811 
1812 
1813 thread_id
1814 allocate_thread_id(void)
1815 {
1816 	return atomic_add(&sNextThreadID, 1);
1817 }
1818 
1819 
1820 thread_id
1821 peek_next_thread_id(void)
1822 {
1823 	return atomic_get(&sNextThreadID);
1824 }
1825 
1826 
1827 /*!	Yield the CPU to other threads.
1828 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1829 	If \c false, it will continue to run, if there's no other thread in ready
1830 	state, and if it has a higher priority than the other ready threads, it
1831 	still has a good chance to continue.
1832 */
1833 void
1834 thread_yield(bool force)
1835 {
1836 	if (force) {
1837 		// snooze for roughly 3 thread quantums
1838 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1839 #if 0
1840 		cpu_status state;
1841 
1842 		struct thread *thread = thread_get_current_thread();
1843 		if (thread == NULL)
1844 			return;
1845 
1846 		state = disable_interrupts();
1847 		GRAB_THREAD_LOCK();
1848 
1849 		// mark the thread as yielded, so it will not be scheduled next
1850 		//thread->was_yielded = true;
1851 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1852 		scheduler_reschedule();
1853 
1854 		RELEASE_THREAD_LOCK();
1855 		restore_interrupts(state);
1856 #endif
1857 	} else {
1858 		struct thread *thread = thread_get_current_thread();
1859 		if (thread == NULL)
1860 			return;
1861 
1862 		// Don't force the thread off the CPU, just reschedule.
1863 		InterruptsSpinLocker _(gThreadSpinlock);
1864 		scheduler_reschedule();
1865 	}
1866 }
1867 
1868 
1869 /*!
1870 	Kernel private thread creation function.
1871 
1872 	\param threadID The ID to be assigned to the new thread. If
1873 		  \code < 0 \endcode a fresh one is allocated.
1874 */
1875 thread_id
1876 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1877 	void *arg, team_id team, thread_id threadID)
1878 {
1879 	thread_creation_attributes attributes;
1880 	attributes.entry = (thread_entry_func)function;
1881 	attributes.name = name;
1882 	attributes.priority = priority;
1883 	attributes.args1 = arg;
1884 	attributes.args2 = NULL;
1885 	attributes.stack_address = NULL;
1886 	attributes.stack_size = 0;
1887 	attributes.team = team;
1888 	attributes.thread = threadID;
1889 
1890 	return create_thread(attributes, true);
1891 }
1892 
1893 
1894 status_t
1895 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1896 	status_t *_returnCode)
1897 {
1898 	sem_id exitSem = B_BAD_THREAD_ID;
1899 	struct death_entry death;
1900 	job_control_entry* freeDeath = NULL;
1901 	struct thread *thread;
1902 	cpu_status state;
1903 	status_t status = B_OK;
1904 
1905 	if (id < B_OK)
1906 		return B_BAD_THREAD_ID;
1907 
1908 	// we need to resume the thread we're waiting for first
1909 
1910 	state = disable_interrupts();
1911 	GRAB_THREAD_LOCK();
1912 
1913 	thread = thread_get_thread_struct_locked(id);
1914 	if (thread != NULL) {
1915 		// remember the semaphore we have to wait on and place our death entry
1916 		exitSem = thread->exit.sem;
1917 		list_add_link_to_head(&thread->exit.waiters, &death);
1918 	}
1919 
1920 	death_entry* threadDeathEntry = NULL;
1921 
1922 	RELEASE_THREAD_LOCK();
1923 
1924 	if (thread == NULL) {
1925 		// we couldn't find this thread - maybe it's already gone, and we'll
1926 		// find its death entry in our team
1927 		GRAB_TEAM_LOCK();
1928 
1929 		struct team* team = thread_get_current_thread()->team;
1930 
1931 		// check the child death entries first (i.e. main threads of child
1932 		// teams)
1933 		bool deleteEntry;
1934 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1935 		if (freeDeath != NULL) {
1936 			death.status = freeDeath->status;
1937 			if (!deleteEntry)
1938 				freeDeath = NULL;
1939 		} else {
1940 			// check the thread death entries of the team (non-main threads)
1941 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1942 					&team->dead_threads, threadDeathEntry)) != NULL) {
1943 				if (threadDeathEntry->thread == id) {
1944 					list_remove_item(&team->dead_threads, threadDeathEntry);
1945 					team->dead_threads_count--;
1946 					death.status = threadDeathEntry->status;
1947 					break;
1948 				}
1949 			}
1950 
1951 			if (threadDeathEntry == NULL)
1952 				status = B_BAD_THREAD_ID;
1953 		}
1954 
1955 		RELEASE_TEAM_LOCK();
1956 	}
1957 
1958 	restore_interrupts(state);
1959 
1960 	if (thread == NULL && status == B_OK) {
1961 		// we found the thread's death entry in our team
1962 		if (_returnCode)
1963 			*_returnCode = death.status;
1964 
1965 		delete freeDeath;
1966 		free(threadDeathEntry);
1967 		return B_OK;
1968 	}
1969 
1970 	// we need to wait for the death of the thread
1971 
1972 	if (exitSem < B_OK)
1973 		return B_BAD_THREAD_ID;
1974 
1975 	resume_thread(id);
1976 		// make sure we don't wait forever on a suspended thread
1977 
1978 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1979 
1980 	if (status == B_OK) {
1981 		// this should never happen as the thread deletes the semaphore on exit
1982 		panic("could acquire exit_sem for thread %ld\n", id);
1983 	} else if (status == B_BAD_SEM_ID) {
1984 		// this is the way the thread normally exits
1985 		status = B_OK;
1986 
1987 		if (_returnCode)
1988 			*_returnCode = death.status;
1989 	} else {
1990 		// We were probably interrupted; we need to remove our death entry now.
1991 		state = disable_interrupts();
1992 		GRAB_THREAD_LOCK();
1993 
1994 		thread = thread_get_thread_struct_locked(id);
1995 		if (thread != NULL)
1996 			list_remove_link(&death);
1997 
1998 		RELEASE_THREAD_LOCK();
1999 		restore_interrupts(state);
2000 
2001 		// If the thread is already gone, we need to wait for its exit semaphore
2002 		// to make sure our death entry stays valid - it won't take long
2003 		if (thread == NULL)
2004 			acquire_sem(exitSem);
2005 	}
2006 
2007 	return status;
2008 }
2009 
2010 
2011 status_t
2012 select_thread(int32 id, struct select_info* info, bool kernel)
2013 {
2014 	InterruptsSpinLocker locker(gThreadSpinlock);
2015 
2016 	// get thread
2017 	struct thread* thread = thread_get_thread_struct_locked(id);
2018 	if (thread == NULL)
2019 		return B_BAD_THREAD_ID;
2020 
2021 	// We support only B_EVENT_INVALID at the moment.
2022 	info->selected_events &= B_EVENT_INVALID;
2023 
2024 	// add info to list
2025 	if (info->selected_events != 0) {
2026 		info->next = thread->select_infos;
2027 		thread->select_infos = info;
2028 
2029 		// we need a sync reference
2030 		atomic_add(&info->sync->ref_count, 1);
2031 	}
2032 
2033 	return B_OK;
2034 }
2035 
2036 
2037 status_t
2038 deselect_thread(int32 id, struct select_info* info, bool kernel)
2039 {
2040 	InterruptsSpinLocker locker(gThreadSpinlock);
2041 
2042 	// get thread
2043 	struct thread* thread = thread_get_thread_struct_locked(id);
2044 	if (thread == NULL)
2045 		return B_BAD_THREAD_ID;
2046 
2047 	// remove info from list
2048 	select_info** infoLocation = &thread->select_infos;
2049 	while (*infoLocation != NULL && *infoLocation != info)
2050 		infoLocation = &(*infoLocation)->next;
2051 
2052 	if (*infoLocation != info)
2053 		return B_OK;
2054 
2055 	*infoLocation = info->next;
2056 
2057 	locker.Unlock();
2058 
2059 	// surrender sync reference
2060 	put_select_sync(info->sync);
2061 
2062 	return B_OK;
2063 }
2064 
2065 
2066 int32
2067 thread_max_threads(void)
2068 {
2069 	return sMaxThreads;
2070 }
2071 
2072 
2073 int32
2074 thread_used_threads(void)
2075 {
2076 	return sUsedThreads;
2077 }
2078 
2079 
2080 const char*
2081 thread_state_to_text(struct thread* thread, int32 state)
2082 {
2083 	return state_to_text(thread, state);
2084 }
2085 
2086 
2087 int32
2088 thread_get_io_priority(thread_id id)
2089 {
2090 	// take a shortcut, if it is the current thread
2091 	struct thread* thread = thread_get_current_thread();
2092 	int32 priority;
2093 	if (id == thread->id) {
2094 		int32 priority = thread->io_priority;
2095 		return priority < 0 ? thread->priority : priority;
2096 	}
2097 
2098 	// not the current thread -- get it
2099 	InterruptsSpinLocker locker(gThreadSpinlock);
2100 
2101 	thread = thread_get_thread_struct_locked(id);
2102 	if (thread == NULL)
2103 		return B_BAD_THREAD_ID;
2104 
2105 	priority = thread->io_priority;
2106 	return priority < 0 ? thread->priority : priority;
2107 }
2108 
2109 
2110 void
2111 thread_set_io_priority(int32 priority)
2112 {
2113 	struct thread* thread = thread_get_current_thread();
2114 	thread->io_priority = priority;
2115 }
2116 
2117 
2118 status_t
2119 thread_init(kernel_args *args)
2120 {
2121 	uint32 i;
2122 
2123 	TRACE(("thread_init: entry\n"));
2124 
2125 	// create the thread hash table
2126 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2127 		&thread_struct_compare, &thread_struct_hash);
2128 
2129 	// zero out the dead thread structure q
2130 	memset(&dead_q, 0, sizeof(dead_q));
2131 
2132 	if (arch_thread_init(args) < B_OK)
2133 		panic("arch_thread_init() failed!\n");
2134 
2135 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2136 	sNextThreadID = B_SYSTEM_TEAM + 1;
2137 
2138 	// create an idle thread for each cpu
2139 
2140 	for (i = 0; i < args->num_cpus; i++) {
2141 		struct thread *thread;
2142 		area_info info;
2143 		char name[64];
2144 
2145 		sprintf(name, "idle thread %lu", i + 1);
2146 		thread = create_thread_struct(&sIdleThreads[i], name,
2147 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2148 		if (thread == NULL) {
2149 			panic("error creating idle thread struct\n");
2150 			return B_NO_MEMORY;
2151 		}
2152 
2153 		thread->team = team_get_kernel_team();
2154 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2155 		thread->state = B_THREAD_RUNNING;
2156 		thread->next_state = B_THREAD_READY;
2157 		sprintf(name, "idle thread %lu kstack", i + 1);
2158 		thread->kernel_stack_area = find_area(name);
2159 		thread->entry = NULL;
2160 
2161 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2162 			panic("error finding idle kstack area\n");
2163 
2164 		thread->kernel_stack_base = (addr_t)info.address;
2165 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2166 
2167 		hash_insert(sThreadHash, thread);
2168 		insert_thread_into_team(thread->team, thread);
2169 	}
2170 	sUsedThreads = args->num_cpus;
2171 
2172 	// init the notification service
2173 	new(&sNotificationService) ThreadNotificationService();
2174 
2175 	// start the undertaker thread
2176 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2177 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2178 
2179 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2180 		B_DISPLAY_PRIORITY, NULL);
2181 	if (undertakerThread < 0)
2182 		panic("Failed to create undertaker thread!");
2183 	send_signal_etc(undertakerThread, SIGCONT, B_DO_NOT_RESCHEDULE);
2184 
2185 	// set up some debugger commands
2186 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2187 		"[ <team> ]\n"
2188 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2189 		"all threads of the specified team.\n"
2190 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2191 	add_debugger_command_etc("ready", &dump_thread_list,
2192 		"List all ready threads",
2193 		"\n"
2194 		"Prints a list of all threads in ready state.\n", 0);
2195 	add_debugger_command_etc("running", &dump_thread_list,
2196 		"List all running threads",
2197 		"\n"
2198 		"Prints a list of all threads in running state.\n", 0);
2199 	add_debugger_command_etc("waiting", &dump_thread_list,
2200 		"List all waiting threads (optionally for a specific semaphore)",
2201 		"[ <sem> ]\n"
2202 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2203 		"specified, only the threads waiting on that semaphore are listed.\n"
2204 		"  <sem>  - ID of the semaphore.\n", 0);
2205 	add_debugger_command_etc("realtime", &dump_thread_list,
2206 		"List all realtime threads",
2207 		"\n"
2208 		"Prints a list of all threads with realtime priority.\n", 0);
2209 	add_debugger_command_etc("thread", &dump_thread_info,
2210 		"Dump info about a particular thread",
2211 		"[ -s ] ( <id> | <address> | <name> )*\n"
2212 		"Prints information about the specified thread. If no argument is\n"
2213 		"given the current thread is selected.\n"
2214 		"  -s         - Print info in compact table form (like \"threads\").\n"
2215 		"  <id>       - The ID of the thread.\n"
2216 		"  <address>  - The address of the thread structure.\n"
2217 		"  <name>     - The thread's name.\n", 0);
2218 	add_debugger_command_etc("calling", &dump_thread_list,
2219 		"Show all threads that have a specific address in their call chain",
2220 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2221 	add_debugger_command_etc("unreal", &make_thread_unreal,
2222 		"Set realtime priority threads to normal priority",
2223 		"[ <id> ]\n"
2224 		"Sets the priority of all realtime threads or, if given, the one\n"
2225 		"with the specified ID to \"normal\" priority.\n"
2226 		"  <id>  - The ID of the thread.\n", 0);
2227 	add_debugger_command_etc("suspend", &make_thread_suspended,
2228 		"Suspend a thread",
2229 		"[ <id> ]\n"
2230 		"Suspends the thread with the given ID. If no ID argument is given\n"
2231 		"the current thread is selected.\n"
2232 		"  <id>  - The ID of the thread.\n", 0);
2233 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2234 		"<id>\n"
2235 		"Resumes the specified thread, if it is currently suspended.\n"
2236 		"  <id>  - The ID of the thread.\n", 0);
2237 	add_debugger_command_etc("drop", &drop_into_debugger,
2238 		"Drop a thread into the userland debugger",
2239 		"<id>\n"
2240 		"Drops the specified (userland) thread into the userland debugger\n"
2241 		"after leaving the kernel debugger.\n"
2242 		"  <id>  - The ID of the thread.\n", 0);
2243 	add_debugger_command_etc("priority", &set_thread_prio,
2244 		"Set a thread's priority",
2245 		"<priority> [ <id> ]\n"
2246 		"Sets the priority of the thread with the specified ID to the given\n"
2247 		"priority. If no thread ID is given, the current thread is selected.\n"
2248 		"  <priority>  - The thread's new priority (0 - 120)\n"
2249 		"  <id>        - The ID of the thread.\n", 0);
2250 
2251 	return B_OK;
2252 }
2253 
2254 
2255 status_t
2256 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2257 {
2258 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2259 	// so that get_current_cpu and friends will work, which is crucial for
2260 	// a lot of low level routines
2261 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2262 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2263 	return B_OK;
2264 }
2265 
2266 
2267 //	#pragma mark - thread blocking API
2268 
2269 
2270 static status_t
2271 thread_block_timeout(timer* timer)
2272 {
2273 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2274 	// we're holding the thread lock already. This makes things comfortably
2275 	// easy.
2276 
2277 	struct thread* thread = (struct thread*)timer->user_data;
2278 	// the scheduler will tell us whether to reschedule or not via
2279 	// thread_unblock_locked's return
2280 	if (thread_unblock_locked(thread, B_TIMED_OUT))
2281 		return B_INVOKE_SCHEDULER;
2282 
2283 	return B_HANDLED_INTERRUPT;
2284 }
2285 
2286 
2287 status_t
2288 thread_block()
2289 {
2290 	InterruptsSpinLocker _(gThreadSpinlock);
2291 	return thread_block_locked(thread_get_current_thread());
2292 }
2293 
2294 
2295 bool
2296 thread_unblock(status_t threadID, status_t status)
2297 {
2298 	InterruptsSpinLocker _(gThreadSpinlock);
2299 
2300 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2301 	if (thread == NULL)
2302 		return false;
2303 	return thread_unblock_locked(thread, status);
2304 }
2305 
2306 
2307 status_t
2308 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2309 {
2310 	InterruptsSpinLocker _(gThreadSpinlock);
2311 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2312 }
2313 
2314 
2315 status_t
2316 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2317 {
2318 	struct thread* thread = thread_get_current_thread();
2319 
2320 	if (thread->wait.status != 1)
2321 		return thread->wait.status;
2322 
2323 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2324 		&& timeout != B_INFINITE_TIMEOUT;
2325 
2326 	if (useTimer) {
2327 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2328 		// avoids nasty race conditions and deadlock problems that could
2329 		// otherwise occur between our cancel_timer() and a concurrently
2330 		// executing thread_block_timeout().
2331 		uint32 timerFlags;
2332 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2333 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2334 		} else {
2335 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2336 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2337 				timeout -= rtc_boot_time();
2338 		}
2339 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2340 
2341 		// install the timer
2342 		thread->wait.unblock_timer.user_data = thread;
2343 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2344 			timerFlags);
2345 	}
2346 
2347 	// block
2348 	status_t error = thread_block_locked(thread);
2349 
2350 	// cancel timer, if it didn't fire
2351 	if (error != B_TIMED_OUT && useTimer)
2352 		cancel_timer(&thread->wait.unblock_timer);
2353 
2354 	return error;
2355 }
2356 
2357 
2358 /*!	Thread spinlock must be held.
2359 */
2360 static status_t
2361 user_unblock_thread(thread_id threadID, status_t status)
2362 {
2363 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2364 	if (thread == NULL)
2365 		return B_BAD_THREAD_ID;
2366 	if (thread->user_thread == NULL)
2367 		return B_NOT_ALLOWED;
2368 
2369 	if (thread->user_thread->wait_status > 0) {
2370 		thread->user_thread->wait_status = status;
2371 		thread_unblock_locked(thread, status);
2372 	}
2373 
2374 	return B_OK;
2375 }
2376 
2377 
2378 //	#pragma mark - public kernel API
2379 
2380 
2381 void
2382 exit_thread(status_t returnValue)
2383 {
2384 	struct thread *thread = thread_get_current_thread();
2385 
2386 	thread->exit.status = returnValue;
2387 	thread->exit.reason = THREAD_RETURN_EXIT;
2388 
2389 	// if called from a kernel thread, we don't deliver the signal,
2390 	// we just exit directly to keep the user space behaviour of
2391 	// this function
2392 	if (thread->team != team_get_kernel_team())
2393 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2394 	else
2395 		thread_exit();
2396 }
2397 
2398 
2399 status_t
2400 kill_thread(thread_id id)
2401 {
2402 	if (id <= 0)
2403 		return B_BAD_VALUE;
2404 
2405 	return send_signal(id, SIGKILLTHR);
2406 }
2407 
2408 
2409 status_t
2410 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2411 {
2412 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2413 }
2414 
2415 
2416 int32
2417 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2418 {
2419 	return receive_data_etc(sender, buffer, bufferSize, 0);
2420 }
2421 
2422 
2423 bool
2424 has_data(thread_id thread)
2425 {
2426 	int32 count;
2427 
2428 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2429 			&count) != B_OK)
2430 		return false;
2431 
2432 	return count == 0 ? false : true;
2433 }
2434 
2435 
2436 status_t
2437 _get_thread_info(thread_id id, thread_info *info, size_t size)
2438 {
2439 	status_t status = B_OK;
2440 	struct thread *thread;
2441 	cpu_status state;
2442 
2443 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2444 		return B_BAD_VALUE;
2445 
2446 	state = disable_interrupts();
2447 	GRAB_THREAD_LOCK();
2448 
2449 	thread = thread_get_thread_struct_locked(id);
2450 	if (thread == NULL) {
2451 		status = B_BAD_VALUE;
2452 		goto err;
2453 	}
2454 
2455 	fill_thread_info(thread, info, size);
2456 
2457 err:
2458 	RELEASE_THREAD_LOCK();
2459 	restore_interrupts(state);
2460 
2461 	return status;
2462 }
2463 
2464 
2465 status_t
2466 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2467 	size_t size)
2468 {
2469 	status_t status = B_BAD_VALUE;
2470 	struct thread *thread = NULL;
2471 	cpu_status state;
2472 	int slot;
2473 	thread_id lastThreadID;
2474 
2475 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2476 		return B_BAD_VALUE;
2477 
2478 	if (team == B_CURRENT_TEAM)
2479 		team = team_get_current_team_id();
2480 	else if (!team_is_valid(team))
2481 		return B_BAD_VALUE;
2482 
2483 	slot = *_cookie;
2484 
2485 	state = disable_interrupts();
2486 	GRAB_THREAD_LOCK();
2487 
2488 	lastThreadID = peek_next_thread_id();
2489 	if (slot >= lastThreadID)
2490 		goto err;
2491 
2492 	while (slot < lastThreadID
2493 		&& (!(thread = thread_get_thread_struct_locked(slot))
2494 			|| thread->team->id != team))
2495 		slot++;
2496 
2497 	if (thread != NULL && thread->team->id == team) {
2498 		fill_thread_info(thread, info, size);
2499 
2500 		*_cookie = slot + 1;
2501 		status = B_OK;
2502 	}
2503 
2504 err:
2505 	RELEASE_THREAD_LOCK();
2506 	restore_interrupts(state);
2507 
2508 	return status;
2509 }
2510 
2511 
2512 thread_id
2513 find_thread(const char *name)
2514 {
2515 	struct hash_iterator iterator;
2516 	struct thread *thread;
2517 	cpu_status state;
2518 
2519 	if (name == NULL)
2520 		return thread_get_current_thread_id();
2521 
2522 	state = disable_interrupts();
2523 	GRAB_THREAD_LOCK();
2524 
2525 	// ToDo: this might not be in the same order as find_thread() in BeOS
2526 	//		which could be theoretically problematic.
2527 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2528 	//		cheap either - although this function is probably used very rarely.
2529 
2530 	hash_open(sThreadHash, &iterator);
2531 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2532 			!= NULL) {
2533 		// Search through hash
2534 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2535 			thread_id id = thread->id;
2536 
2537 			RELEASE_THREAD_LOCK();
2538 			restore_interrupts(state);
2539 			return id;
2540 		}
2541 	}
2542 
2543 	RELEASE_THREAD_LOCK();
2544 	restore_interrupts(state);
2545 
2546 	return B_NAME_NOT_FOUND;
2547 }
2548 
2549 
2550 status_t
2551 rename_thread(thread_id id, const char *name)
2552 {
2553 	struct thread *thread = thread_get_current_thread();
2554 	status_t status = B_BAD_THREAD_ID;
2555 	cpu_status state;
2556 
2557 	if (name == NULL)
2558 		return B_BAD_VALUE;
2559 
2560 	state = disable_interrupts();
2561 	GRAB_THREAD_LOCK();
2562 
2563 	if (thread->id != id)
2564 		thread = thread_get_thread_struct_locked(id);
2565 
2566 	if (thread != NULL) {
2567 		if (thread->team == thread_get_current_thread()->team) {
2568 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2569 			status = B_OK;
2570 		} else
2571 			status = B_NOT_ALLOWED;
2572 	}
2573 
2574 	RELEASE_THREAD_LOCK();
2575 	restore_interrupts(state);
2576 
2577 	return status;
2578 }
2579 
2580 
2581 status_t
2582 set_thread_priority(thread_id id, int32 priority)
2583 {
2584 	struct thread *thread;
2585 	int32 oldPriority;
2586 
2587 	// make sure the passed in priority is within bounds
2588 	if (priority > THREAD_MAX_SET_PRIORITY)
2589 		priority = THREAD_MAX_SET_PRIORITY;
2590 	if (priority < THREAD_MIN_SET_PRIORITY)
2591 		priority = THREAD_MIN_SET_PRIORITY;
2592 
2593 	thread = thread_get_current_thread();
2594 	if (thread->id == id) {
2595 		if (thread_is_idle_thread(thread))
2596 			return B_NOT_ALLOWED;
2597 
2598 		// It's ourself, so we know we aren't in the run queue, and we can
2599 		// manipulate our structure directly
2600 		oldPriority = thread->priority;
2601 			// Note that this might not return the correct value if we are
2602 			// preempted here, and another thread changes our priority before
2603 			// the next line is executed.
2604 		thread->priority = thread->next_priority = priority;
2605 	} else {
2606 		InterruptsSpinLocker _(gThreadSpinlock);
2607 
2608 		thread = thread_get_thread_struct_locked(id);
2609 		if (thread == NULL)
2610 			return B_BAD_THREAD_ID;
2611 
2612 		if (thread_is_idle_thread(thread))
2613 			return B_NOT_ALLOWED;
2614 
2615 		oldPriority = thread->priority;
2616 		scheduler_set_thread_priority(thread, priority);
2617 	}
2618 
2619 	return oldPriority;
2620 }
2621 
2622 
2623 status_t
2624 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2625 {
2626 	status_t status;
2627 
2628 	if (timebase != B_SYSTEM_TIMEBASE)
2629 		return B_BAD_VALUE;
2630 
2631 	InterruptsSpinLocker _(gThreadSpinlock);
2632 	struct thread* thread = thread_get_current_thread();
2633 
2634 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2635 	status = thread_block_with_timeout_locked(flags, timeout);
2636 
2637 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2638 		return B_OK;
2639 
2640 	return status;
2641 }
2642 
2643 
2644 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2645 status_t
2646 snooze(bigtime_t timeout)
2647 {
2648 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2649 }
2650 
2651 
2652 /*!
2653 	snooze_until() for internal kernel use only; doesn't interrupt on
2654 	signals.
2655 */
2656 status_t
2657 snooze_until(bigtime_t timeout, int timebase)
2658 {
2659 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2660 }
2661 
2662 
2663 status_t
2664 wait_for_thread(thread_id thread, status_t *_returnCode)
2665 {
2666 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2667 }
2668 
2669 
2670 status_t
2671 suspend_thread(thread_id id)
2672 {
2673 	if (id <= 0)
2674 		return B_BAD_VALUE;
2675 
2676 	return send_signal(id, SIGSTOP);
2677 }
2678 
2679 
2680 status_t
2681 resume_thread(thread_id id)
2682 {
2683 	if (id <= 0)
2684 		return B_BAD_VALUE;
2685 
2686 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2687 		// This retains compatibility to BeOS which documents the
2688 		// combination of suspend_thread() and resume_thread() to
2689 		// interrupt threads waiting on semaphores.
2690 }
2691 
2692 
2693 thread_id
2694 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2695 	void *arg)
2696 {
2697 	thread_creation_attributes attributes;
2698 	attributes.entry = (thread_entry_func)function;
2699 	attributes.name = name;
2700 	attributes.priority = priority;
2701 	attributes.args1 = arg;
2702 	attributes.args2 = NULL;
2703 	attributes.stack_address = NULL;
2704 	attributes.stack_size = 0;
2705 	attributes.team = team_get_kernel_team()->id;
2706 	attributes.thread = -1;
2707 
2708 	return create_thread(attributes, true);
2709 }
2710 
2711 
2712 int
2713 getrlimit(int resource, struct rlimit * rlp)
2714 {
2715 	status_t error = common_getrlimit(resource, rlp);
2716 	if (error != B_OK) {
2717 		errno = error;
2718 		return -1;
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 
2725 int
2726 setrlimit(int resource, const struct rlimit * rlp)
2727 {
2728 	status_t error = common_setrlimit(resource, rlp);
2729 	if (error != B_OK) {
2730 		errno = error;
2731 		return -1;
2732 	}
2733 
2734 	return 0;
2735 }
2736 
2737 
2738 //	#pragma mark - syscalls
2739 
2740 
2741 void
2742 _user_exit_thread(status_t returnValue)
2743 {
2744 	exit_thread(returnValue);
2745 }
2746 
2747 
2748 status_t
2749 _user_kill_thread(thread_id thread)
2750 {
2751 	return kill_thread(thread);
2752 }
2753 
2754 
2755 status_t
2756 _user_resume_thread(thread_id thread)
2757 {
2758 	return resume_thread(thread);
2759 }
2760 
2761 
2762 status_t
2763 _user_suspend_thread(thread_id thread)
2764 {
2765 	return suspend_thread(thread);
2766 }
2767 
2768 
2769 status_t
2770 _user_rename_thread(thread_id thread, const char *userName)
2771 {
2772 	char name[B_OS_NAME_LENGTH];
2773 
2774 	if (!IS_USER_ADDRESS(userName)
2775 		|| userName == NULL
2776 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2777 		return B_BAD_ADDRESS;
2778 
2779 	return rename_thread(thread, name);
2780 }
2781 
2782 
2783 int32
2784 _user_set_thread_priority(thread_id thread, int32 newPriority)
2785 {
2786 	return set_thread_priority(thread, newPriority);
2787 }
2788 
2789 
2790 thread_id
2791 _user_spawn_thread(thread_creation_attributes* userAttributes)
2792 {
2793 	thread_creation_attributes attributes;
2794 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2795 		|| user_memcpy(&attributes, userAttributes,
2796 				sizeof(attributes)) != B_OK) {
2797 		return B_BAD_ADDRESS;
2798 	}
2799 
2800 	if (attributes.stack_size != 0
2801 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2802 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2803 		return B_BAD_VALUE;
2804 	}
2805 
2806 	char name[B_OS_NAME_LENGTH];
2807 	thread_id threadID;
2808 
2809 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2810 		|| (attributes.stack_address != NULL
2811 			&& !IS_USER_ADDRESS(attributes.stack_address))
2812 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2813 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2814 		return B_BAD_ADDRESS;
2815 
2816 	attributes.name = attributes.name != NULL ? name : "user thread";
2817 	attributes.team = thread_get_current_thread()->team->id;
2818 	attributes.thread = -1;
2819 
2820 	threadID = create_thread(attributes, false);
2821 
2822 	if (threadID >= 0)
2823 		user_debug_thread_created(threadID);
2824 
2825 	return threadID;
2826 }
2827 
2828 
2829 status_t
2830 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2831 {
2832 	// NOTE: We only know the system timebase at the moment.
2833 	syscall_restart_handle_timeout_pre(flags, timeout);
2834 
2835 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2836 
2837 	return syscall_restart_handle_timeout_post(error, timeout);
2838 }
2839 
2840 
2841 void
2842 _user_thread_yield(void)
2843 {
2844 	thread_yield(true);
2845 }
2846 
2847 
2848 status_t
2849 _user_get_thread_info(thread_id id, thread_info *userInfo)
2850 {
2851 	thread_info info;
2852 	status_t status;
2853 
2854 	if (!IS_USER_ADDRESS(userInfo))
2855 		return B_BAD_ADDRESS;
2856 
2857 	status = _get_thread_info(id, &info, sizeof(thread_info));
2858 
2859 	if (status >= B_OK
2860 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2861 		return B_BAD_ADDRESS;
2862 
2863 	return status;
2864 }
2865 
2866 
2867 status_t
2868 _user_get_next_thread_info(team_id team, int32 *userCookie,
2869 	thread_info *userInfo)
2870 {
2871 	status_t status;
2872 	thread_info info;
2873 	int32 cookie;
2874 
2875 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2876 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2877 		return B_BAD_ADDRESS;
2878 
2879 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2880 	if (status < B_OK)
2881 		return status;
2882 
2883 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2884 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2885 		return B_BAD_ADDRESS;
2886 
2887 	return status;
2888 }
2889 
2890 
2891 thread_id
2892 _user_find_thread(const char *userName)
2893 {
2894 	char name[B_OS_NAME_LENGTH];
2895 
2896 	if (userName == NULL)
2897 		return find_thread(NULL);
2898 
2899 	if (!IS_USER_ADDRESS(userName)
2900 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2901 		return B_BAD_ADDRESS;
2902 
2903 	return find_thread(name);
2904 }
2905 
2906 
2907 status_t
2908 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2909 {
2910 	status_t returnCode;
2911 	status_t status;
2912 
2913 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2914 		return B_BAD_ADDRESS;
2915 
2916 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2917 
2918 	if (status == B_OK && userReturnCode != NULL
2919 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2920 		return B_BAD_ADDRESS;
2921 	}
2922 
2923 	return syscall_restart_handle_post(status);
2924 }
2925 
2926 
2927 bool
2928 _user_has_data(thread_id thread)
2929 {
2930 	return has_data(thread);
2931 }
2932 
2933 
2934 status_t
2935 _user_send_data(thread_id thread, int32 code, const void *buffer,
2936 	size_t bufferSize)
2937 {
2938 	if (!IS_USER_ADDRESS(buffer))
2939 		return B_BAD_ADDRESS;
2940 
2941 	return send_data_etc(thread, code, buffer, bufferSize,
2942 		B_KILL_CAN_INTERRUPT);
2943 		// supports userland buffers
2944 }
2945 
2946 
2947 status_t
2948 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2949 {
2950 	thread_id sender;
2951 	status_t code;
2952 
2953 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2954 		|| !IS_USER_ADDRESS(buffer))
2955 		return B_BAD_ADDRESS;
2956 
2957 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2958 		// supports userland buffers
2959 
2960 	if (_userSender != NULL)
2961 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2962 			return B_BAD_ADDRESS;
2963 
2964 	return code;
2965 }
2966 
2967 
2968 status_t
2969 _user_block_thread(uint32 flags, bigtime_t timeout)
2970 {
2971 	syscall_restart_handle_timeout_pre(flags, timeout);
2972 	flags |= B_CAN_INTERRUPT;
2973 
2974 	struct thread* thread = thread_get_current_thread();
2975 
2976 	InterruptsSpinLocker locker(gThreadSpinlock);
2977 
2978 	// check, if already done
2979 	if (thread->user_thread->wait_status <= 0)
2980 		return thread->user_thread->wait_status;
2981 
2982 	// nope, so wait
2983 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2984 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2985 	thread->user_thread->wait_status = status;
2986 
2987 	return syscall_restart_handle_timeout_post(status, timeout);
2988 }
2989 
2990 
2991 status_t
2992 _user_unblock_thread(thread_id threadID, status_t status)
2993 {
2994 	InterruptsSpinLocker locker(gThreadSpinlock);
2995 	return user_unblock_thread(threadID, status);
2996 }
2997 
2998 
2999 status_t
3000 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3001 {
3002 	enum {
3003 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3004 	};
3005 
3006 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3007 		return B_BAD_ADDRESS;
3008 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3009 		return B_BAD_VALUE;
3010 
3011 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3012 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3013 		return B_BAD_ADDRESS;
3014 
3015 	for (uint32 i = 0; i < count; i++)
3016 		user_unblock_thread(threads[i], status);
3017 
3018 	return B_OK;
3019 }
3020 
3021 
3022 // TODO: the following two functions don't belong here
3023 
3024 
3025 int
3026 _user_getrlimit(int resource, struct rlimit *urlp)
3027 {
3028 	struct rlimit rl;
3029 	int ret;
3030 
3031 	if (urlp == NULL)
3032 		return EINVAL;
3033 
3034 	if (!IS_USER_ADDRESS(urlp))
3035 		return B_BAD_ADDRESS;
3036 
3037 	ret = common_getrlimit(resource, &rl);
3038 
3039 	if (ret == 0) {
3040 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3041 		if (ret < 0)
3042 			return ret;
3043 
3044 		return 0;
3045 	}
3046 
3047 	return ret;
3048 }
3049 
3050 
3051 int
3052 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3053 {
3054 	struct rlimit resourceLimit;
3055 
3056 	if (userResourceLimit == NULL)
3057 		return EINVAL;
3058 
3059 	if (!IS_USER_ADDRESS(userResourceLimit)
3060 		|| user_memcpy(&resourceLimit, userResourceLimit,
3061 			sizeof(struct rlimit)) < B_OK)
3062 		return B_BAD_ADDRESS;
3063 
3064 	return common_setrlimit(resource, &resourceLimit);
3065 }
3066