xref: /haiku/src/system/kernel/thread.cpp (revision ddac407426cd3b3d0b4589d7a161b300b3539a2a)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <Notifications.h>
34 #include <real_time_clock.h>
35 #include <smp.h>
36 #include <syscalls.h>
37 #include <syscall_restart.h>
38 #include <team.h>
39 #include <tls.h>
40 #include <user_runtime.h>
41 #include <user_thread.h>
42 #include <vfs.h>
43 #include <vm.h>
44 #include <vm_address_space.h>
45 #include <wait_for_objects.h>
46 
47 
48 //#define TRACE_THREAD
49 #ifdef TRACE_THREAD
50 #	define TRACE(x) dprintf x
51 #else
52 #	define TRACE(x) ;
53 #endif
54 
55 
56 #define THREAD_MAX_MESSAGE_SIZE		65536
57 
58 
59 struct thread_key {
60 	thread_id id;
61 };
62 
63 // global
64 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
65 
66 // thread list
67 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
68 static hash_table *sThreadHash = NULL;
69 static thread_id sNextThreadID = 1;
70 
71 // some arbitrary chosen limits - should probably depend on the available
72 // memory (the limit is not yet enforced)
73 static int32 sMaxThreads = 4096;
74 static int32 sUsedThreads = 0;
75 
76 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
77 	struct thread*	thread;
78 	team_id			teamID;
79 	sem_id			deathSem;
80 
81 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
82 		:
83 		thread(thread),
84 		teamID(teamID),
85 		deathSem(deathSem)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!
127 	Inserts a thread into a team.
128 	You must hold the team lock when you call this function.
129 */
130 static void
131 insert_thread_into_team(struct team *team, struct thread *thread)
132 {
133 	thread->team_next = team->thread_list;
134 	team->thread_list = thread;
135 	team->num_threads++;
136 
137 	if (team->num_threads == 1) {
138 		// this was the first thread
139 		team->main_thread = thread;
140 	}
141 	thread->team = team;
142 }
143 
144 
145 /*!
146 	Removes a thread from a team.
147 	You must hold the team lock when you call this function.
148 */
149 static void
150 remove_thread_from_team(struct team *team, struct thread *thread)
151 {
152 	struct thread *temp, *last = NULL;
153 
154 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
155 		if (temp == thread) {
156 			if (last == NULL)
157 				team->thread_list = temp->team_next;
158 			else
159 				last->team_next = temp->team_next;
160 
161 			team->num_threads--;
162 			break;
163 		}
164 		last = temp;
165 	}
166 }
167 
168 
169 static int
170 thread_struct_compare(void *_t, const void *_key)
171 {
172 	struct thread *thread = (struct thread*)_t;
173 	const struct thread_key *key = (const struct thread_key*)_key;
174 
175 	if (thread->id == key->id)
176 		return 0;
177 
178 	return 1;
179 }
180 
181 
182 static uint32
183 thread_struct_hash(void *_t, const void *_key, uint32 range)
184 {
185 	struct thread *thread = (struct thread*)_t;
186 	const struct thread_key *key = (const struct thread_key*)_key;
187 
188 	if (thread != NULL)
189 		return thread->id % range;
190 
191 	return (uint32)key->id % range;
192 }
193 
194 
195 static void
196 reset_signals(struct thread *thread)
197 {
198 	thread->sig_pending = 0;
199 	thread->sig_block_mask = 0;
200 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
201 	thread->signal_stack_base = 0;
202 	thread->signal_stack_size = 0;
203 	thread->signal_stack_enabled = false;
204 }
205 
206 
207 /*!
208 	Allocates and fills in thread structure (or reuses one from the
209 	dead queue).
210 
211 	\param threadID The ID to be assigned to the new thread. If
212 		  \code < 0 \endcode a fresh one is allocated.
213 	\param thread initialize this thread struct if nonnull
214 */
215 
216 static struct thread *
217 create_thread_struct(struct thread *inthread, const char *name,
218 	thread_id threadID, struct cpu_ent *cpu)
219 {
220 	struct thread *thread;
221 	cpu_status state;
222 	char temp[64];
223 	bool recycled = false;
224 
225 	if (inthread == NULL) {
226 		// try to recycle one from the dead queue first
227 		state = disable_interrupts();
228 		GRAB_THREAD_LOCK();
229 		thread = thread_dequeue(&dead_q);
230 		RELEASE_THREAD_LOCK();
231 		restore_interrupts(state);
232 
233 		// if not, create a new one
234 		if (thread == NULL) {
235 			thread = (struct thread *)malloc(sizeof(struct thread));
236 			if (thread == NULL)
237 				return NULL;
238 		} else {
239 			recycled = true;
240 		}
241 	} else {
242 		thread = inthread;
243 	}
244 
245 	if (!recycled)
246 		scheduler_on_thread_create(thread);
247 
248 	if (name != NULL)
249 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
250 	else
251 		strcpy(thread->name, "unnamed thread");
252 
253 	thread->flags = 0;
254 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
255 	thread->team = NULL;
256 	thread->cpu = cpu;
257 	thread->previous_cpu = NULL;
258 	thread->pinned_to_cpu = 0;
259 	thread->keep_scheduled = 0;
260 	thread->fault_handler = 0;
261 	thread->page_faults_allowed = 1;
262 	thread->kernel_stack_area = -1;
263 	thread->kernel_stack_base = 0;
264 	thread->user_stack_area = -1;
265 	thread->user_stack_base = 0;
266 	thread->user_local_storage = 0;
267 	thread->kernel_errno = 0;
268 	thread->team_next = NULL;
269 	thread->queue_next = NULL;
270 	thread->priority = thread->next_priority = -1;
271 	thread->io_priority = -1;
272 	thread->args1 = NULL;  thread->args2 = NULL;
273 	thread->alarm.period = 0;
274 	reset_signals(thread);
275 	thread->in_kernel = true;
276 	thread->was_yielded = false;
277 	thread->user_time = 0;
278 	thread->kernel_time = 0;
279 	thread->last_time = 0;
280 	thread->exit.status = 0;
281 	thread->exit.reason = 0;
282 	thread->exit.signal = 0;
283 	list_init(&thread->exit.waiters);
284 	thread->select_infos = NULL;
285 	thread->post_interrupt_callback = NULL;
286 	thread->post_interrupt_data = NULL;
287 	thread->user_thread = NULL;
288 
289 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
290 	thread->exit.sem = create_sem(0, temp);
291 	if (thread->exit.sem < B_OK)
292 		goto err1;
293 
294 	sprintf(temp, "%s send", thread->name);
295 	thread->msg.write_sem = create_sem(1, temp);
296 	if (thread->msg.write_sem < B_OK)
297 		goto err2;
298 
299 	sprintf(temp, "%s receive", thread->name);
300 	thread->msg.read_sem = create_sem(0, temp);
301 	if (thread->msg.read_sem < B_OK)
302 		goto err3;
303 
304 	if (arch_thread_init_thread_struct(thread) < B_OK)
305 		goto err4;
306 
307 	return thread;
308 
309 err4:
310 	delete_sem(thread->msg.read_sem);
311 err3:
312 	delete_sem(thread->msg.write_sem);
313 err2:
314 	delete_sem(thread->exit.sem);
315 err1:
316 	// ToDo: put them in the dead queue instead?
317 	if (inthread == NULL) {
318 		free(thread);
319 		scheduler_on_thread_destroy(thread);
320 	}
321 	return NULL;
322 }
323 
324 
325 static void
326 delete_thread_struct(struct thread *thread)
327 {
328 	delete_sem(thread->exit.sem);
329 	delete_sem(thread->msg.write_sem);
330 	delete_sem(thread->msg.read_sem);
331 
332 	scheduler_on_thread_destroy(thread);
333 
334 	// ToDo: put them in the dead queue instead?
335 	free(thread);
336 }
337 
338 
339 /*! This function gets run by a new thread before anything else */
340 static void
341 thread_kthread_entry(void)
342 {
343 	struct thread *thread = thread_get_current_thread();
344 
345 	// The thread is new and has been scheduled the first time. Notify the user
346 	// debugger code.
347 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
348 		user_debug_thread_scheduled(thread);
349 
350 	// simulates the thread spinlock release that would occur if the thread had been
351 	// rescheded from. The resched didn't happen because the thread is new.
352 	RELEASE_THREAD_LOCK();
353 
354 	// start tracking time
355 	thread->last_time = system_time();
356 
357 	enable_interrupts(); // this essentially simulates a return-from-interrupt
358 }
359 
360 
361 static void
362 thread_kthread_exit(void)
363 {
364 	struct thread *thread = thread_get_current_thread();
365 
366 	thread->exit.reason = THREAD_RETURN_EXIT;
367 	thread_exit();
368 }
369 
370 
371 /*!
372 	Initializes the thread and jumps to its userspace entry point.
373 	This function is called at creation time of every user thread,
374 	but not for a team's main thread.
375 */
376 static int
377 _create_user_thread_kentry(void)
378 {
379 	struct thread *thread = thread_get_current_thread();
380 
381 	// jump to the entry point in user space
382 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
383 		thread->args1, thread->args2);
384 
385 	// only get here if the above call fails
386 	return 0;
387 }
388 
389 
390 /*! Initializes the thread and calls it kernel space entry point. */
391 static int
392 _create_kernel_thread_kentry(void)
393 {
394 	struct thread *thread = thread_get_current_thread();
395 	int (*func)(void *args) = (int (*)(void *))thread->entry;
396 
397 	// call the entry function with the appropriate args
398 	return func(thread->args1);
399 }
400 
401 
402 /*!
403 	Creates a new thread in the team with the specified team ID.
404 
405 	\param threadID The ID to be assigned to the new thread. If
406 		  \code < 0 \endcode a fresh one is allocated.
407 */
408 static thread_id
409 create_thread(thread_creation_attributes& attributes, bool kernel)
410 {
411 	struct thread *thread, *currentThread;
412 	struct team *team;
413 	cpu_status state;
414 	char stack_name[B_OS_NAME_LENGTH];
415 	status_t status;
416 	bool abort = false;
417 	bool debugNewThread = false;
418 
419 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
420 		attributes.thread, kernel ? "kernel" : "user"));
421 
422 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
423 		NULL);
424 	if (thread == NULL)
425 		return B_NO_MEMORY;
426 
427 	thread->priority = attributes.priority == -1
428 		? B_NORMAL_PRIORITY : attributes.priority;
429 	thread->next_priority = thread->priority;
430 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
431 	thread->state = B_THREAD_SUSPENDED;
432 	thread->next_state = B_THREAD_SUSPENDED;
433 
434 	// init debug structure
435 	init_thread_debug_info(&thread->debug_info);
436 
437 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
438 		thread->id);
439 	thread->kernel_stack_area = create_area(stack_name,
440 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
441 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
442 		B_FULL_LOCK,
443 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
444 
445 	if (thread->kernel_stack_area < 0) {
446 		// we're not yet part of a team, so we can just bail out
447 		status = thread->kernel_stack_area;
448 
449 		dprintf("create_thread: error creating kernel stack: %s!\n",
450 			strerror(status));
451 
452 		delete_thread_struct(thread);
453 		return status;
454 	}
455 
456 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
457 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
458 
459 	state = disable_interrupts();
460 	GRAB_THREAD_LOCK();
461 
462 	// If the new thread belongs to the same team as the current thread,
463 	// it may inherit some of the thread debug flags.
464 	currentThread = thread_get_current_thread();
465 	if (currentThread && currentThread->team->id == attributes.team) {
466 		// inherit all user flags...
467 		int32 debugFlags = currentThread->debug_info.flags
468 			& B_THREAD_DEBUG_USER_FLAG_MASK;
469 
470 		// ... save the syscall tracing flags, unless explicitely specified
471 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
472 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
473 				| B_THREAD_DEBUG_POST_SYSCALL);
474 		}
475 
476 		thread->debug_info.flags = debugFlags;
477 
478 		// stop the new thread, if desired
479 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
480 	}
481 
482 	// insert into global list
483 	hash_insert(sThreadHash, thread);
484 	sUsedThreads++;
485 	scheduler_on_thread_init(thread);
486 	RELEASE_THREAD_LOCK();
487 
488 	GRAB_TEAM_LOCK();
489 	// look at the team, make sure it's not being deleted
490 	team = team_get_team_struct_locked(attributes.team);
491 
492 	if (team == NULL || team->state == TEAM_STATE_DEATH)
493 		abort = true;
494 
495 	if (!abort && !kernel) {
496 		thread->user_thread = team_allocate_user_thread(team);
497 		abort = thread->user_thread == NULL;
498 	}
499 
500 	if (!abort) {
501 		// Debug the new thread, if the parent thread required that (see above),
502 		// or the respective global team debug flag is set. But only, if a
503 		// debugger is installed for the team.
504 		debugNewThread |= (atomic_get(&team->debug_info.flags)
505 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
506 		if (debugNewThread
507 			&& (atomic_get(&team->debug_info.flags)
508 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
509 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
510 		}
511 
512 		insert_thread_into_team(team, thread);
513 	}
514 
515 	RELEASE_TEAM_LOCK();
516 	if (abort) {
517 		GRAB_THREAD_LOCK();
518 		hash_remove(sThreadHash, thread);
519 		RELEASE_THREAD_LOCK();
520 	}
521 	restore_interrupts(state);
522 	if (abort) {
523 		delete_area(thread->kernel_stack_area);
524 		delete_thread_struct(thread);
525 		return B_BAD_TEAM_ID;
526 	}
527 
528 	thread->args1 = attributes.args1;
529 	thread->args2 = attributes.args2;
530 	thread->entry = attributes.entry;
531 	status = thread->id;
532 
533 	// notify listeners
534 	sNotificationService.Notify(THREAD_ADDED, thread);
535 
536 	if (kernel) {
537 		// this sets up an initial kthread stack that runs the entry
538 
539 		// Note: whatever function wants to set up a user stack later for this
540 		// thread must initialize the TLS for it
541 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
542 			&thread_kthread_entry, &thread_kthread_exit);
543 	} else {
544 		// create user stack
545 
546 		// the stack will be between USER_STACK_REGION and the main thread stack
547 		// area (the user stack of the main thread is created in
548 		// team_create_team())
549 		if (attributes.stack_address == NULL) {
550 			thread->user_stack_base = USER_STACK_REGION;
551 			if (attributes.stack_size <= 0)
552 				thread->user_stack_size = USER_STACK_SIZE;
553 			else
554 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
555 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
556 
557 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
558 				attributes.name, thread->id);
559 			thread->user_stack_area = create_area_etc(team->id, stack_name,
560 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
561 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
562 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
563 			if (thread->user_stack_area < B_OK
564 				|| arch_thread_init_tls(thread) < B_OK) {
565 				// great, we have a fully running thread without a (usable)
566 				// stack
567 				dprintf("create_thread: unable to create proper user stack!\n");
568 				status = thread->user_stack_area;
569 				kill_thread(thread->id);
570 			}
571 		} else {
572 			thread->user_stack_base = (addr_t)attributes.stack_address;
573 			thread->user_stack_size = attributes.stack_size;
574 		}
575 
576 		user_debug_update_new_thread_flags(thread->id);
577 
578 		// copy the user entry over to the args field in the thread struct
579 		// the function this will call will immediately switch the thread into
580 		// user space.
581 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
582 			&thread_kthread_entry, &thread_kthread_exit);
583 	}
584 
585 	return status;
586 }
587 
588 
589 static status_t
590 undertaker(void* /*args*/)
591 {
592 	while (true) {
593 		// wait for a thread to bury
594 		InterruptsSpinLocker locker(gThreadSpinlock);
595 
596 		while (sUndertakerEntries.IsEmpty()) {
597 			ConditionVariableEntry conditionEntry;
598 			sUndertakerCondition.Add(&conditionEntry);
599 			locker.Unlock();
600 
601 			conditionEntry.Wait();
602 
603 			locker.Lock();
604 		}
605 
606 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
607 		locker.Unlock();
608 
609 		UndertakerEntry entry = *_entry;
610 			// we need a copy, since the original entry is on the thread's stack
611 
612 		// we've got an entry
613 		struct thread* thread = entry.thread;
614 
615 		// delete the old kernel stack area
616 		delete_area(thread->kernel_stack_area);
617 
618 		// remove this thread from all of the global lists
619 		disable_interrupts();
620 		GRAB_TEAM_LOCK();
621 
622 		remove_thread_from_team(team_get_kernel_team(), thread);
623 
624 		RELEASE_TEAM_LOCK();
625 		enable_interrupts();
626 			// needed for the debugger notification below
627 
628 		if (entry.deathSem >= 0)
629 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
630 
631 		// free the thread structure
632 		locker.Lock();
633 		thread_enqueue(thread, &dead_q);
634 			// TODO: Use the slab allocator!
635 	}
636 
637 	// never can get here
638 	return B_OK;
639 }
640 
641 
642 static sem_id
643 get_thread_wait_sem(struct thread* thread)
644 {
645 	if (thread->state == B_THREAD_WAITING
646 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
647 		return (sem_id)(addr_t)thread->wait.object;
648 	}
649 	return -1;
650 }
651 
652 
653 /*!
654 	Fills the thread_info structure with information from the specified
655 	thread.
656 	The thread lock must be held when called.
657 */
658 static void
659 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
660 {
661 	info->thread = thread->id;
662 	info->team = thread->team->id;
663 
664 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
665 
666 	if (thread->state == B_THREAD_WAITING) {
667 		info->state = B_THREAD_WAITING;
668 
669 		switch (thread->wait.type) {
670 			case THREAD_BLOCK_TYPE_SNOOZE:
671 				info->state = B_THREAD_ASLEEP;
672 				break;
673 
674 			case THREAD_BLOCK_TYPE_SEMAPHORE:
675 			{
676 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
677 				if (sem == thread->msg.read_sem)
678 					info->state = B_THREAD_RECEIVING;
679 				break;
680 			}
681 
682 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
683 			default:
684 				break;
685 		}
686 	} else
687 		info->state = (thread_state)thread->state;
688 
689 	info->priority = thread->priority;
690 	info->user_time = thread->user_time;
691 	info->kernel_time = thread->kernel_time;
692 	info->stack_base = (void *)thread->user_stack_base;
693 	info->stack_end = (void *)(thread->user_stack_base
694 		+ thread->user_stack_size);
695 	info->sem = get_thread_wait_sem(thread);
696 }
697 
698 static status_t
699 send_data_etc(thread_id id, int32 code, const void *buffer,
700 	size_t bufferSize, int32 flags)
701 {
702 	struct thread *target;
703 	sem_id cachedSem;
704 	cpu_status state;
705 	status_t status;
706 	cbuf *data;
707 
708 	state = disable_interrupts();
709 	GRAB_THREAD_LOCK();
710 	target = thread_get_thread_struct_locked(id);
711 	if (!target) {
712 		RELEASE_THREAD_LOCK();
713 		restore_interrupts(state);
714 		return B_BAD_THREAD_ID;
715 	}
716 	cachedSem = target->msg.write_sem;
717 	RELEASE_THREAD_LOCK();
718 	restore_interrupts(state);
719 
720 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
721 		return B_NO_MEMORY;
722 
723 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
724 	if (status == B_INTERRUPTED) {
725 		// We got interrupted by a signal
726 		return status;
727 	}
728 	if (status != B_OK) {
729 		// Any other acquisition problems may be due to thread deletion
730 		return B_BAD_THREAD_ID;
731 	}
732 
733 	if (bufferSize > 0) {
734 		data = cbuf_get_chain(bufferSize);
735 		if (data == NULL)
736 			return B_NO_MEMORY;
737 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
738 		if (status < B_OK) {
739 			cbuf_free_chain(data);
740 			return B_NO_MEMORY;
741 		}
742 	} else
743 		data = NULL;
744 
745 	state = disable_interrupts();
746 	GRAB_THREAD_LOCK();
747 
748 	// The target thread could have been deleted at this point
749 	target = thread_get_thread_struct_locked(id);
750 	if (target == NULL) {
751 		RELEASE_THREAD_LOCK();
752 		restore_interrupts(state);
753 		cbuf_free_chain(data);
754 		return B_BAD_THREAD_ID;
755 	}
756 
757 	// Save message informations
758 	target->msg.sender = thread_get_current_thread()->id;
759 	target->msg.code = code;
760 	target->msg.size = bufferSize;
761 	target->msg.buffer = data;
762 	cachedSem = target->msg.read_sem;
763 
764 	RELEASE_THREAD_LOCK();
765 	restore_interrupts(state);
766 
767 	release_sem(cachedSem);
768 	return B_OK;
769 }
770 
771 
772 static int32
773 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
774 	int32 flags)
775 {
776 	struct thread *thread = thread_get_current_thread();
777 	status_t status;
778 	size_t size;
779 	int32 code;
780 
781 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
782 	if (status < B_OK) {
783 		// Actually, we're not supposed to return error codes
784 		// but since the only reason this can fail is that we
785 		// were killed, it's probably okay to do so (but also
786 		// meaningless).
787 		return status;
788 	}
789 
790 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
791 		size = min_c(bufferSize, thread->msg.size);
792 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
793 			0, size);
794 		if (status < B_OK) {
795 			cbuf_free_chain(thread->msg.buffer);
796 			release_sem(thread->msg.write_sem);
797 			return status;
798 		}
799 	}
800 
801 	*_sender = thread->msg.sender;
802 	code = thread->msg.code;
803 
804 	cbuf_free_chain(thread->msg.buffer);
805 	release_sem(thread->msg.write_sem);
806 
807 	return code;
808 }
809 
810 
811 static status_t
812 common_getrlimit(int resource, struct rlimit * rlp)
813 {
814 	if (!rlp)
815 		return B_BAD_ADDRESS;
816 
817 	switch (resource) {
818 		case RLIMIT_NOFILE:
819 		case RLIMIT_NOVMON:
820 			return vfs_getrlimit(resource, rlp);
821 
822 		case RLIMIT_CORE:
823 			rlp->rlim_cur = 0;
824 			rlp->rlim_max = 0;
825 			return B_OK;
826 
827 		case RLIMIT_STACK:
828 		{
829 			struct thread *thread = thread_get_current_thread();
830 			if (!thread)
831 				return B_ERROR;
832 			rlp->rlim_cur = thread->user_stack_size;
833 			rlp->rlim_max = thread->user_stack_size;
834 			return B_OK;
835 		}
836 
837 		default:
838 			return EINVAL;
839 	}
840 
841 	return B_OK;
842 }
843 
844 
845 static status_t
846 common_setrlimit(int resource, const struct rlimit * rlp)
847 {
848 	if (!rlp)
849 		return B_BAD_ADDRESS;
850 
851 	switch (resource) {
852 		case RLIMIT_NOFILE:
853 		case RLIMIT_NOVMON:
854 			return vfs_setrlimit(resource, rlp);
855 
856 		case RLIMIT_CORE:
857 			// We don't support core file, so allow settings to 0/0 only.
858 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
859 				return EINVAL;
860 			return B_OK;
861 
862 		default:
863 			return EINVAL;
864 	}
865 
866 	return B_OK;
867 }
868 
869 
870 //	#pragma mark - debugger calls
871 
872 
873 static int
874 make_thread_unreal(int argc, char **argv)
875 {
876 	struct thread *thread;
877 	struct hash_iterator i;
878 	int32 id = -1;
879 
880 	if (argc > 2) {
881 		print_debugger_command_usage(argv[0]);
882 		return 0;
883 	}
884 
885 	if (argc > 1)
886 		id = strtoul(argv[1], NULL, 0);
887 
888 	hash_open(sThreadHash, &i);
889 
890 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
891 		if (id != -1 && thread->id != id)
892 			continue;
893 
894 		if (thread->priority > B_DISPLAY_PRIORITY) {
895 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
896 			kprintf("thread %ld made unreal\n", thread->id);
897 		}
898 	}
899 
900 	hash_close(sThreadHash, &i, false);
901 	return 0;
902 }
903 
904 
905 static int
906 set_thread_prio(int argc, char **argv)
907 {
908 	struct thread *thread;
909 	struct hash_iterator i;
910 	int32 id;
911 	int32 prio;
912 
913 	if (argc > 3 || argc < 2) {
914 		print_debugger_command_usage(argv[0]);
915 		return 0;
916 	}
917 
918 	prio = strtoul(argv[1], NULL, 0);
919 	if (prio > THREAD_MAX_SET_PRIORITY)
920 		prio = THREAD_MAX_SET_PRIORITY;
921 	if (prio < THREAD_MIN_SET_PRIORITY)
922 		prio = THREAD_MIN_SET_PRIORITY;
923 
924 	if (argc > 2)
925 		id = strtoul(argv[2], NULL, 0);
926 	else
927 		id = thread_get_current_thread()->id;
928 
929 	hash_open(sThreadHash, &i);
930 
931 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
932 		if (thread->id != id)
933 			continue;
934 		thread->priority = thread->next_priority = prio;
935 		kprintf("thread %ld set to priority %ld\n", id, prio);
936 		break;
937 	}
938 	if (!thread)
939 		kprintf("thread %ld (%#lx) not found\n", id, id);
940 
941 	hash_close(sThreadHash, &i, false);
942 	return 0;
943 }
944 
945 
946 static int
947 make_thread_suspended(int argc, char **argv)
948 {
949 	struct thread *thread;
950 	struct hash_iterator i;
951 	int32 id;
952 
953 	if (argc > 2) {
954 		print_debugger_command_usage(argv[0]);
955 		return 0;
956 	}
957 
958 	if (argc == 1)
959 		id = thread_get_current_thread()->id;
960 	else
961 		id = strtoul(argv[1], NULL, 0);
962 
963 	hash_open(sThreadHash, &i);
964 
965 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
966 		if (thread->id != id)
967 			continue;
968 
969 		thread->next_state = B_THREAD_SUSPENDED;
970 		kprintf("thread %ld suspended\n", id);
971 		break;
972 	}
973 	if (!thread)
974 		kprintf("thread %ld (%#lx) not found\n", id, id);
975 
976 	hash_close(sThreadHash, &i, false);
977 	return 0;
978 }
979 
980 
981 static int
982 make_thread_resumed(int argc, char **argv)
983 {
984 	struct thread *thread;
985 	struct hash_iterator i;
986 	int32 id;
987 
988 	if (argc != 2) {
989 		print_debugger_command_usage(argv[0]);
990 		return 0;
991 	}
992 
993 	// force user to enter a thread id, as using
994 	// the current thread is usually not intended
995 	id = strtoul(argv[1], NULL, 0);
996 
997 	hash_open(sThreadHash, &i);
998 
999 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1000 		if (thread->id != id)
1001 			continue;
1002 
1003 		if (thread->state == B_THREAD_SUSPENDED) {
1004 			scheduler_enqueue_in_run_queue(thread);
1005 			kprintf("thread %ld resumed\n", thread->id);
1006 		}
1007 		break;
1008 	}
1009 	if (!thread)
1010 		kprintf("thread %ld (%#lx) not found\n", id, id);
1011 
1012 	hash_close(sThreadHash, &i, false);
1013 	return 0;
1014 }
1015 
1016 
1017 static int
1018 drop_into_debugger(int argc, char **argv)
1019 {
1020 	status_t err;
1021 	int32 id;
1022 
1023 	if (argc > 2) {
1024 		print_debugger_command_usage(argv[0]);
1025 		return 0;
1026 	}
1027 
1028 	if (argc == 1)
1029 		id = thread_get_current_thread()->id;
1030 	else
1031 		id = strtoul(argv[1], NULL, 0);
1032 
1033 	err = _user_debug_thread(id);
1034 	if (err)
1035 		kprintf("drop failed\n");
1036 	else
1037 		kprintf("thread %ld dropped into user debugger\n", id);
1038 
1039 	return 0;
1040 }
1041 
1042 
1043 static const char *
1044 state_to_text(struct thread *thread, int32 state)
1045 {
1046 	switch (state) {
1047 		case B_THREAD_READY:
1048 			return "ready";
1049 
1050 		case B_THREAD_RUNNING:
1051 			return "running";
1052 
1053 		case B_THREAD_WAITING:
1054 		{
1055 			if (thread != NULL) {
1056 				switch (thread->wait.type) {
1057 					case THREAD_BLOCK_TYPE_SNOOZE:
1058 						return "zzz";
1059 
1060 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1061 					{
1062 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1063 						if (sem == thread->msg.read_sem)
1064 							return "receive";
1065 						break;
1066 					}
1067 				}
1068 			}
1069 
1070 			return "waiting";
1071 		}
1072 
1073 		case B_THREAD_SUSPENDED:
1074 			return "suspended";
1075 
1076 		case THREAD_STATE_FREE_ON_RESCHED:
1077 			return "death";
1078 
1079 		default:
1080 			return "UNKNOWN";
1081 	}
1082 }
1083 
1084 
1085 static void
1086 print_thread_list_table_head()
1087 {
1088 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1089 		"  team  name\n");
1090 }
1091 
1092 
1093 static void
1094 _dump_thread_info(struct thread *thread, bool shortInfo)
1095 {
1096 	if (shortInfo) {
1097 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1098 			thread->state));
1099 
1100 		// does it block on a semaphore or a condition variable?
1101 		if (thread->state == B_THREAD_WAITING) {
1102 			switch (thread->wait.type) {
1103 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1104 				{
1105 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1106 					if (sem == thread->msg.read_sem)
1107 						kprintf("                    ");
1108 					else
1109 						kprintf("sem  %12ld   ", sem);
1110 					break;
1111 				}
1112 
1113 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1114 					kprintf("cvar   %p   ", thread->wait.object);
1115 					break;
1116 
1117 				case THREAD_BLOCK_TYPE_SNOOZE:
1118 					kprintf("                    ");
1119 					break;
1120 
1121 				case THREAD_BLOCK_TYPE_SIGNAL:
1122 					kprintf("signal              ");
1123 					break;
1124 
1125 				case THREAD_BLOCK_TYPE_MUTEX:
1126 					kprintf("mutex  %p   ", thread->wait.object);
1127 					break;
1128 
1129 				case THREAD_BLOCK_TYPE_RW_LOCK:
1130 					kprintf("rwlock %p   ", thread->wait.object);
1131 					break;
1132 
1133 				case THREAD_BLOCK_TYPE_OTHER:
1134 					kprintf("other               ");
1135 					break;
1136 
1137 				default:
1138 					kprintf("???    %p   ", thread->wait.object);
1139 					break;
1140 			}
1141 		} else
1142 			kprintf("        -           ");
1143 
1144 		// on which CPU does it run?
1145 		if (thread->cpu)
1146 			kprintf("%2d", thread->cpu->cpu_num);
1147 		else
1148 			kprintf(" -");
1149 
1150 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1151 			(void *)thread->kernel_stack_base, thread->team->id,
1152 			thread->name != NULL ? thread->name : "<NULL>");
1153 
1154 		return;
1155 	}
1156 
1157 	// print the long info
1158 
1159 	struct death_entry *death = NULL;
1160 
1161 	kprintf("THREAD: %p\n", thread);
1162 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1163 	kprintf("name:               \"%s\"\n", thread->name);
1164 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1165 		thread->all_next, thread->team_next, thread->queue_next);
1166 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1167 		thread->next_priority, thread->io_priority);
1168 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1169 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1170 	kprintf("cpu:                %p ", thread->cpu);
1171 	if (thread->cpu)
1172 		kprintf("(%d)\n", thread->cpu->cpu_num);
1173 	else
1174 		kprintf("\n");
1175 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1176 		thread->sig_block_mask);
1177 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1178 
1179 	if (thread->state == B_THREAD_WAITING) {
1180 		kprintf("waiting for:        ");
1181 
1182 		switch (thread->wait.type) {
1183 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1184 			{
1185 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1186 				if (sem == thread->msg.read_sem)
1187 					kprintf("data\n");
1188 				else
1189 					kprintf("semaphore %ld\n", sem);
1190 				break;
1191 			}
1192 
1193 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1194 				kprintf("condition variable %p\n", thread->wait.object);
1195 				break;
1196 
1197 			case THREAD_BLOCK_TYPE_SNOOZE:
1198 				kprintf("snooze()\n");
1199 				break;
1200 
1201 			case THREAD_BLOCK_TYPE_SIGNAL:
1202 				kprintf("signal\n");
1203 				break;
1204 
1205 			case THREAD_BLOCK_TYPE_MUTEX:
1206 				kprintf("mutex %p\n", thread->wait.object);
1207 				break;
1208 
1209 			case THREAD_BLOCK_TYPE_RW_LOCK:
1210 				kprintf("rwlock %p\n", thread->wait.object);
1211 				break;
1212 
1213 			case THREAD_BLOCK_TYPE_OTHER:
1214 				kprintf("other (%s)\n", (char*)thread->wait.object);
1215 				break;
1216 
1217 			default:
1218 				kprintf("unknown (%p)\n", thread->wait.object);
1219 				break;
1220 		}
1221 	}
1222 
1223 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1224 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1225 	kprintf("entry:              %p\n", (void *)thread->entry);
1226 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1227 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1228 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1229 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1230 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1231 	kprintf("  exit.waiters:\n");
1232 	while ((death = (struct death_entry*)list_get_next_item(
1233 			&thread->exit.waiters, death)) != NULL) {
1234 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1235 	}
1236 
1237 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1238 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1239 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1240 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1241 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1242 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1243 		strerror(thread->kernel_errno));
1244 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1245 	kprintf("user_time:          %Ld\n", thread->user_time);
1246 	kprintf("flags:              0x%lx\n", thread->flags);
1247 	kprintf("architecture dependant section:\n");
1248 	arch_thread_dump_info(&thread->arch_info);
1249 }
1250 
1251 
1252 static int
1253 dump_thread_info(int argc, char **argv)
1254 {
1255 	bool shortInfo = false;
1256 	int argi = 1;
1257 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1258 		shortInfo = true;
1259 		print_thread_list_table_head();
1260 		argi++;
1261 	}
1262 
1263 	if (argi == argc) {
1264 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1265 		return 0;
1266 	}
1267 
1268 	for (; argi < argc; argi++) {
1269 		const char *name = argv[argi];
1270 		int32 id = strtoul(name, NULL, 0);
1271 
1272 		if (IS_KERNEL_ADDRESS(id)) {
1273 			// semi-hack
1274 			_dump_thread_info((struct thread *)id, shortInfo);
1275 			continue;
1276 		}
1277 
1278 		// walk through the thread list, trying to match name or id
1279 		bool found = false;
1280 		struct hash_iterator i;
1281 		hash_open(sThreadHash, &i);
1282 		struct thread *thread;
1283 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1284 			if (!strcmp(name, thread->name) || thread->id == id) {
1285 				_dump_thread_info(thread, shortInfo);
1286 				found = true;
1287 				break;
1288 			}
1289 		}
1290 		hash_close(sThreadHash, &i, false);
1291 
1292 		if (!found)
1293 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 
1300 static int
1301 dump_thread_list(int argc, char **argv)
1302 {
1303 	struct thread *thread;
1304 	struct hash_iterator i;
1305 	bool realTimeOnly = false;
1306 	bool calling = false;
1307 	const char *callSymbol = NULL;
1308 	addr_t callStart = 0;
1309 	addr_t callEnd = 0;
1310 	int32 requiredState = 0;
1311 	team_id team = -1;
1312 	sem_id sem = -1;
1313 
1314 	if (!strcmp(argv[0], "realtime"))
1315 		realTimeOnly = true;
1316 	else if (!strcmp(argv[0], "ready"))
1317 		requiredState = B_THREAD_READY;
1318 	else if (!strcmp(argv[0], "running"))
1319 		requiredState = B_THREAD_RUNNING;
1320 	else if (!strcmp(argv[0], "waiting")) {
1321 		requiredState = B_THREAD_WAITING;
1322 
1323 		if (argc > 1) {
1324 			sem = strtoul(argv[1], NULL, 0);
1325 			if (sem == 0)
1326 				kprintf("ignoring invalid semaphore argument.\n");
1327 		}
1328 	} else if (!strcmp(argv[0], "calling")) {
1329 		if (argc < 2) {
1330 			kprintf("Need to give a symbol name or start and end arguments.\n");
1331 			return 0;
1332 		} else if (argc == 3) {
1333 			callStart = parse_expression(argv[1]);
1334 			callEnd = parse_expression(argv[2]);
1335 		} else
1336 			callSymbol = argv[1];
1337 
1338 		calling = true;
1339 	} else if (argc > 1) {
1340 		team = strtoul(argv[1], NULL, 0);
1341 		if (team == 0)
1342 			kprintf("ignoring invalid team argument.\n");
1343 	}
1344 
1345 	print_thread_list_table_head();
1346 
1347 	hash_open(sThreadHash, &i);
1348 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1349 		// filter out threads not matching the search criteria
1350 		if ((requiredState && thread->state != requiredState)
1351 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1352 					callStart, callEnd))
1353 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1354 			|| (team > 0 && thread->team->id != team)
1355 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1356 			continue;
1357 
1358 		_dump_thread_info(thread, true);
1359 	}
1360 	hash_close(sThreadHash, &i, false);
1361 	return 0;
1362 }
1363 
1364 
1365 //	#pragma mark - private kernel API
1366 
1367 
1368 void
1369 thread_exit(void)
1370 {
1371 	cpu_status state;
1372 	struct thread *thread = thread_get_current_thread();
1373 	struct team *team = thread->team;
1374 	thread_id parentID = -1;
1375 	bool deleteTeam = false;
1376 	sem_id cachedDeathSem = -1;
1377 	status_t status;
1378 	struct thread_debug_info debugInfo;
1379 	team_id teamID = team->id;
1380 
1381 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1382 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1383 			? "due to signal" : "normally", thread->exit.status));
1384 
1385 	if (!are_interrupts_enabled())
1386 		panic("thread_exit() called with interrupts disabled!\n");
1387 
1388 	// boost our priority to get this over with
1389 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1390 
1391 	// Cancel previously installed alarm timer, if any
1392 	cancel_timer(&thread->alarm);
1393 
1394 	// delete the user stack area first, we won't need it anymore
1395 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1396 		area_id area = thread->user_stack_area;
1397 		thread->user_stack_area = -1;
1398 		vm_delete_area(team->id, area, true);
1399 	}
1400 
1401 	struct job_control_entry *death = NULL;
1402 	struct death_entry* threadDeathEntry = NULL;
1403 	ConditionVariableEntry waitForDebuggerEntry;
1404 	bool waitForDebugger = false;
1405 
1406 	if (team != team_get_kernel_team()) {
1407 		user_debug_thread_exiting(thread);
1408 
1409 		if (team->main_thread == thread) {
1410 			// this was the main thread in this team, so we will delete that as well
1411 			deleteTeam = true;
1412 		} else {
1413 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1414 			team_free_user_thread(thread);
1415 		}
1416 
1417 		// remove this thread from the current team and add it to the kernel
1418 		// put the thread into the kernel team until it dies
1419 		state = disable_interrupts();
1420 		GRAB_TEAM_LOCK();
1421 		GRAB_THREAD_LOCK();
1422 			// removing the thread and putting its death entry to the parent
1423 			// team needs to be an atomic operation
1424 
1425 		// remember how long this thread lasted
1426 		team->dead_threads_kernel_time += thread->kernel_time;
1427 		team->dead_threads_user_time += thread->user_time;
1428 
1429 		remove_thread_from_team(team, thread);
1430 		insert_thread_into_team(team_get_kernel_team(), thread);
1431 
1432 		cachedDeathSem = team->death_sem;
1433 
1434 		if (deleteTeam) {
1435 			// If a debugger change is in progess for the team, we'll have to
1436 			// wait until it is done later.
1437 			GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1438 			if (team->debug_info.debugger_changed_condition != NULL) {
1439 				team->debug_info.debugger_changed_condition->Add(
1440 					&waitForDebuggerEntry);
1441 				waitForDebugger = true;
1442 			}
1443 			RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1444 
1445 			struct team *parent = team->parent;
1446 
1447 			// remember who our parent was so we can send a signal
1448 			parentID = parent->id;
1449 
1450 			// Set the team job control state to "dead" and detach the job
1451 			// control entry from our team struct.
1452 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1453 			death = team->job_control_entry;
1454 			team->job_control_entry = NULL;
1455 
1456 			if (death != NULL) {
1457 				death->InitDeadState();
1458 
1459 				// team_set_job_control_state() already moved our entry
1460 				// into the parent's list. We just check the soft limit of
1461 				// death entries.
1462 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1463 					death = parent->dead_children->entries.RemoveHead();
1464 					parent->dead_children->count--;
1465 				} else
1466 					death = NULL;
1467 
1468 				RELEASE_THREAD_LOCK();
1469 			} else
1470 				RELEASE_THREAD_LOCK();
1471 
1472 			team_remove_team(team);
1473 
1474 			send_signal_etc(parentID, SIGCHLD,
1475 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1476 		} else {
1477 			// The thread is not the main thread. We store a thread death
1478 			// entry for it, unless someone is already waiting it.
1479 			if (threadDeathEntry != NULL
1480 				&& list_is_empty(&thread->exit.waiters)) {
1481 				threadDeathEntry->thread = thread->id;
1482 				threadDeathEntry->status = thread->exit.status;
1483 				threadDeathEntry->reason = thread->exit.reason;
1484 				threadDeathEntry->signal = thread->exit.signal;
1485 
1486 				// add entry -- remove and old one, if we hit the limit
1487 				list_add_item(&team->dead_threads, threadDeathEntry);
1488 				team->dead_threads_count++;
1489 				threadDeathEntry = NULL;
1490 
1491 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1492 					threadDeathEntry = (death_entry*)list_remove_head_item(
1493 						&team->dead_threads);
1494 					team->dead_threads_count--;
1495 				}
1496 			}
1497 
1498 			RELEASE_THREAD_LOCK();
1499 		}
1500 
1501 		RELEASE_TEAM_LOCK();
1502 
1503 		// swap address spaces, to make sure we're running on the kernel's pgdir
1504 		vm_swap_address_space(team->address_space, vm_kernel_address_space());
1505 		restore_interrupts(state);
1506 
1507 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1508 	}
1509 
1510 	if (threadDeathEntry != NULL)
1511 		free(threadDeathEntry);
1512 
1513 	// delete the team if we're its main thread
1514 	if (deleteTeam) {
1515 		// wait for a debugger change to be finished first
1516 		if (waitForDebugger)
1517 			waitForDebuggerEntry.Wait();
1518 
1519 		team_delete_team(team);
1520 
1521 		// we need to delete any death entry that made it to here
1522 		if (death != NULL)
1523 			delete death;
1524 
1525 		cachedDeathSem = -1;
1526 	}
1527 
1528 	state = disable_interrupts();
1529 	GRAB_THREAD_LOCK();
1530 
1531 	// remove thread from hash, so it's no longer accessible
1532 	hash_remove(sThreadHash, thread);
1533 	sUsedThreads--;
1534 
1535 	// Stop debugging for this thread
1536 	debugInfo = thread->debug_info;
1537 	clear_thread_debug_info(&thread->debug_info, true);
1538 
1539 	// Remove the select infos. We notify them a little later.
1540 	select_info* selectInfos = thread->select_infos;
1541 	thread->select_infos = NULL;
1542 
1543 	RELEASE_THREAD_LOCK();
1544 	restore_interrupts(state);
1545 
1546 	destroy_thread_debug_info(&debugInfo);
1547 
1548 	// notify select infos
1549 	select_info* info = selectInfos;
1550 	while (info != NULL) {
1551 		select_sync* sync = info->sync;
1552 
1553 		notify_select_events(info, B_EVENT_INVALID);
1554 		info = info->next;
1555 		put_select_sync(sync);
1556 	}
1557 
1558 	// notify listeners
1559 	sNotificationService.Notify(THREAD_REMOVED, thread);
1560 
1561 	// shutdown the thread messaging
1562 
1563 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1564 	if (status == B_WOULD_BLOCK) {
1565 		// there is data waiting for us, so let us eat it
1566 		thread_id sender;
1567 
1568 		delete_sem(thread->msg.write_sem);
1569 			// first, let's remove all possibly waiting writers
1570 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1571 	} else {
1572 		// we probably own the semaphore here, and we're the last to do so
1573 		delete_sem(thread->msg.write_sem);
1574 	}
1575 	// now we can safely remove the msg.read_sem
1576 	delete_sem(thread->msg.read_sem);
1577 
1578 	// fill all death entries and delete the sem that others will use to wait on us
1579 	{
1580 		sem_id cachedExitSem = thread->exit.sem;
1581 		cpu_status state;
1582 
1583 		state = disable_interrupts();
1584 		GRAB_THREAD_LOCK();
1585 
1586 		// make sure no one will grab this semaphore again
1587 		thread->exit.sem = -1;
1588 
1589 		// fill all death entries
1590 		death_entry* entry = NULL;
1591 		while ((entry = (struct death_entry*)list_get_next_item(
1592 				&thread->exit.waiters, entry)) != NULL) {
1593 			entry->status = thread->exit.status;
1594 			entry->reason = thread->exit.reason;
1595 			entry->signal = thread->exit.signal;
1596 		}
1597 
1598 		RELEASE_THREAD_LOCK();
1599 		restore_interrupts(state);
1600 
1601 		delete_sem(cachedExitSem);
1602 	}
1603 
1604 	// notify the debugger
1605 	if (teamID != team_get_kernel_team_id())
1606 		user_debug_thread_deleted(teamID, thread->id);
1607 
1608 	// enqueue in the undertaker list and reschedule for the last time
1609 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1610 
1611 	disable_interrupts();
1612 	GRAB_THREAD_LOCK();
1613 
1614 	sUndertakerEntries.Add(&undertakerEntry);
1615 	sUndertakerCondition.NotifyOne(true);
1616 
1617 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1618 	scheduler_reschedule();
1619 
1620 	panic("never can get here\n");
1621 }
1622 
1623 
1624 struct thread *
1625 thread_get_thread_struct(thread_id id)
1626 {
1627 	struct thread *thread;
1628 	cpu_status state;
1629 
1630 	state = disable_interrupts();
1631 	GRAB_THREAD_LOCK();
1632 
1633 	thread = thread_get_thread_struct_locked(id);
1634 
1635 	RELEASE_THREAD_LOCK();
1636 	restore_interrupts(state);
1637 
1638 	return thread;
1639 }
1640 
1641 
1642 struct thread *
1643 thread_get_thread_struct_locked(thread_id id)
1644 {
1645 	struct thread_key key;
1646 
1647 	key.id = id;
1648 
1649 	return (struct thread*)hash_lookup(sThreadHash, &key);
1650 }
1651 
1652 
1653 /*!
1654 	Called in the interrupt handler code when a thread enters
1655 	the kernel for any reason.
1656 	Only tracks time for now.
1657 	Interrupts are disabled.
1658 */
1659 void
1660 thread_at_kernel_entry(bigtime_t now)
1661 {
1662 	struct thread *thread = thread_get_current_thread();
1663 
1664 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1665 
1666 	// track user time
1667 	thread->user_time += now - thread->last_time;
1668 	thread->last_time = now;
1669 
1670 	thread->in_kernel = true;
1671 }
1672 
1673 
1674 /*!
1675 	Called whenever a thread exits kernel space to user space.
1676 	Tracks time, handles signals, ...
1677 	Interrupts must be enabled. When the function returns, interrupts will be
1678 	disabled.
1679 */
1680 void
1681 thread_at_kernel_exit(void)
1682 {
1683 	struct thread *thread = thread_get_current_thread();
1684 
1685 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1686 
1687 	while (handle_signals(thread)) {
1688 		InterruptsSpinLocker _(gThreadSpinlock);
1689 		scheduler_reschedule();
1690 	}
1691 
1692 	disable_interrupts();
1693 
1694 	thread->in_kernel = false;
1695 
1696 	// track kernel time
1697 	bigtime_t now = system_time();
1698 	thread->kernel_time += now - thread->last_time;
1699 	thread->last_time = now;
1700 }
1701 
1702 
1703 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1704 	and no debugging shall be done.
1705 	Interrupts must be disabled.
1706 */
1707 void
1708 thread_at_kernel_exit_no_signals(void)
1709 {
1710 	struct thread *thread = thread_get_current_thread();
1711 
1712 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1713 
1714 	thread->in_kernel = false;
1715 
1716 	// track kernel time
1717 	bigtime_t now = system_time();
1718 	thread->kernel_time += now - thread->last_time;
1719 	thread->last_time = now;
1720 }
1721 
1722 
1723 void
1724 thread_reset_for_exec(void)
1725 {
1726 	struct thread *thread = thread_get_current_thread();
1727 
1728 	cancel_timer(&thread->alarm);
1729 	reset_signals(thread);
1730 }
1731 
1732 
1733 /*! Insert a thread to the tail of a queue */
1734 void
1735 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1736 {
1737 	thread->queue_next = NULL;
1738 	if (queue->head == NULL) {
1739 		queue->head = thread;
1740 		queue->tail = thread;
1741 	} else {
1742 		queue->tail->queue_next = thread;
1743 		queue->tail = thread;
1744 	}
1745 }
1746 
1747 
1748 struct thread *
1749 thread_lookat_queue(struct thread_queue *queue)
1750 {
1751 	return queue->head;
1752 }
1753 
1754 
1755 struct thread *
1756 thread_dequeue(struct thread_queue *queue)
1757 {
1758 	struct thread *thread = queue->head;
1759 
1760 	if (thread != NULL) {
1761 		queue->head = thread->queue_next;
1762 		if (queue->tail == thread)
1763 			queue->tail = NULL;
1764 	}
1765 	return thread;
1766 }
1767 
1768 
1769 struct thread *
1770 thread_dequeue_id(struct thread_queue *q, thread_id id)
1771 {
1772 	struct thread *thread;
1773 	struct thread *last = NULL;
1774 
1775 	thread = q->head;
1776 	while (thread != NULL) {
1777 		if (thread->id == id) {
1778 			if (last == NULL)
1779 				q->head = thread->queue_next;
1780 			else
1781 				last->queue_next = thread->queue_next;
1782 
1783 			if (q->tail == thread)
1784 				q->tail = last;
1785 			break;
1786 		}
1787 		last = thread;
1788 		thread = thread->queue_next;
1789 	}
1790 	return thread;
1791 }
1792 
1793 
1794 struct thread*
1795 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1796 {
1797 	struct hash_iterator iterator;
1798 	hash_open(sThreadHash, &iterator);
1799 
1800 	struct thread* thread;
1801 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1802 			!= NULL) {
1803 		if (callback(thread, cookie))
1804 			break;
1805 	}
1806 
1807 	hash_close(sThreadHash, &iterator, false);
1808 
1809 	return thread;
1810 }
1811 
1812 
1813 thread_id
1814 allocate_thread_id(void)
1815 {
1816 	return atomic_add(&sNextThreadID, 1);
1817 }
1818 
1819 
1820 thread_id
1821 peek_next_thread_id(void)
1822 {
1823 	return atomic_get(&sNextThreadID);
1824 }
1825 
1826 
1827 /*!	Yield the CPU to other threads.
1828 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1829 	If \c false, it will continue to run, if there's no other thread in ready
1830 	state, and if it has a higher priority than the other ready threads, it
1831 	still has a good chance to continue.
1832 */
1833 void
1834 thread_yield(bool force)
1835 {
1836 	if (force) {
1837 		// snooze for roughly 3 thread quantums
1838 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1839 #if 0
1840 		cpu_status state;
1841 
1842 		struct thread *thread = thread_get_current_thread();
1843 		if (thread == NULL)
1844 			return;
1845 
1846 		state = disable_interrupts();
1847 		GRAB_THREAD_LOCK();
1848 
1849 		// mark the thread as yielded, so it will not be scheduled next
1850 		//thread->was_yielded = true;
1851 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1852 		scheduler_reschedule();
1853 
1854 		RELEASE_THREAD_LOCK();
1855 		restore_interrupts(state);
1856 #endif
1857 	} else {
1858 		struct thread *thread = thread_get_current_thread();
1859 		if (thread == NULL)
1860 			return;
1861 
1862 		// Don't force the thread off the CPU, just reschedule.
1863 		InterruptsSpinLocker _(gThreadSpinlock);
1864 		scheduler_reschedule();
1865 	}
1866 }
1867 
1868 
1869 /*!
1870 	Kernel private thread creation function.
1871 
1872 	\param threadID The ID to be assigned to the new thread. If
1873 		  \code < 0 \endcode a fresh one is allocated.
1874 */
1875 thread_id
1876 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1877 	void *arg, team_id team, thread_id threadID)
1878 {
1879 	thread_creation_attributes attributes;
1880 	attributes.entry = (thread_entry_func)function;
1881 	attributes.name = name;
1882 	attributes.priority = priority;
1883 	attributes.args1 = arg;
1884 	attributes.args2 = NULL;
1885 	attributes.stack_address = NULL;
1886 	attributes.stack_size = 0;
1887 	attributes.team = team;
1888 	attributes.thread = threadID;
1889 
1890 	return create_thread(attributes, true);
1891 }
1892 
1893 
1894 status_t
1895 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1896 	status_t *_returnCode)
1897 {
1898 	sem_id exitSem = B_BAD_THREAD_ID;
1899 	struct death_entry death;
1900 	job_control_entry* freeDeath = NULL;
1901 	struct thread *thread;
1902 	cpu_status state;
1903 	status_t status = B_OK;
1904 
1905 	if (id < B_OK)
1906 		return B_BAD_THREAD_ID;
1907 
1908 	// we need to resume the thread we're waiting for first
1909 
1910 	state = disable_interrupts();
1911 	GRAB_THREAD_LOCK();
1912 
1913 	thread = thread_get_thread_struct_locked(id);
1914 	if (thread != NULL) {
1915 		// remember the semaphore we have to wait on and place our death entry
1916 		exitSem = thread->exit.sem;
1917 		list_add_link_to_head(&thread->exit.waiters, &death);
1918 	}
1919 
1920 	death_entry* threadDeathEntry = NULL;
1921 
1922 	RELEASE_THREAD_LOCK();
1923 
1924 	if (thread == NULL) {
1925 		// we couldn't find this thread - maybe it's already gone, and we'll
1926 		// find its death entry in our team
1927 		GRAB_TEAM_LOCK();
1928 
1929 		struct team* team = thread_get_current_thread()->team;
1930 
1931 		// check the child death entries first (i.e. main threads of child
1932 		// teams)
1933 		bool deleteEntry;
1934 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1935 		if (freeDeath != NULL) {
1936 			death.status = freeDeath->status;
1937 			if (!deleteEntry)
1938 				freeDeath = NULL;
1939 		} else {
1940 			// check the thread death entries of the team (non-main threads)
1941 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1942 					&team->dead_threads, threadDeathEntry)) != NULL) {
1943 				if (threadDeathEntry->thread == id) {
1944 					list_remove_item(&team->dead_threads, threadDeathEntry);
1945 					team->dead_threads_count--;
1946 					death.status = threadDeathEntry->status;
1947 					break;
1948 				}
1949 			}
1950 
1951 			if (threadDeathEntry == NULL)
1952 				status = B_BAD_THREAD_ID;
1953 		}
1954 
1955 		RELEASE_TEAM_LOCK();
1956 	}
1957 
1958 	restore_interrupts(state);
1959 
1960 	if (thread == NULL && status == B_OK) {
1961 		// we found the thread's death entry in our team
1962 		if (_returnCode)
1963 			*_returnCode = death.status;
1964 
1965 		delete freeDeath;
1966 		free(threadDeathEntry);
1967 		return B_OK;
1968 	}
1969 
1970 	// we need to wait for the death of the thread
1971 
1972 	if (exitSem < B_OK)
1973 		return B_BAD_THREAD_ID;
1974 
1975 	resume_thread(id);
1976 		// make sure we don't wait forever on a suspended thread
1977 
1978 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1979 
1980 	if (status == B_OK) {
1981 		// this should never happen as the thread deletes the semaphore on exit
1982 		panic("could acquire exit_sem for thread %ld\n", id);
1983 	} else if (status == B_BAD_SEM_ID) {
1984 		// this is the way the thread normally exits
1985 		status = B_OK;
1986 
1987 		if (_returnCode)
1988 			*_returnCode = death.status;
1989 	} else {
1990 		// We were probably interrupted; we need to remove our death entry now.
1991 		state = disable_interrupts();
1992 		GRAB_THREAD_LOCK();
1993 
1994 		thread = thread_get_thread_struct_locked(id);
1995 		if (thread != NULL)
1996 			list_remove_link(&death);
1997 
1998 		RELEASE_THREAD_LOCK();
1999 		restore_interrupts(state);
2000 
2001 		// If the thread is already gone, we need to wait for its exit semaphore
2002 		// to make sure our death entry stays valid - it won't take long
2003 		if (thread == NULL)
2004 			acquire_sem(exitSem);
2005 	}
2006 
2007 	return status;
2008 }
2009 
2010 
2011 status_t
2012 select_thread(int32 id, struct select_info* info, bool kernel)
2013 {
2014 	InterruptsSpinLocker locker(gThreadSpinlock);
2015 
2016 	// get thread
2017 	struct thread* thread = thread_get_thread_struct_locked(id);
2018 	if (thread == NULL)
2019 		return B_BAD_THREAD_ID;
2020 
2021 	// We support only B_EVENT_INVALID at the moment.
2022 	info->selected_events &= B_EVENT_INVALID;
2023 
2024 	// add info to list
2025 	if (info->selected_events != 0) {
2026 		info->next = thread->select_infos;
2027 		thread->select_infos = info;
2028 
2029 		// we need a sync reference
2030 		atomic_add(&info->sync->ref_count, 1);
2031 	}
2032 
2033 	return B_OK;
2034 }
2035 
2036 
2037 status_t
2038 deselect_thread(int32 id, struct select_info* info, bool kernel)
2039 {
2040 	InterruptsSpinLocker locker(gThreadSpinlock);
2041 
2042 	// get thread
2043 	struct thread* thread = thread_get_thread_struct_locked(id);
2044 	if (thread == NULL)
2045 		return B_BAD_THREAD_ID;
2046 
2047 	// remove info from list
2048 	select_info** infoLocation = &thread->select_infos;
2049 	while (*infoLocation != NULL && *infoLocation != info)
2050 		infoLocation = &(*infoLocation)->next;
2051 
2052 	if (*infoLocation != info)
2053 		return B_OK;
2054 
2055 	*infoLocation = info->next;
2056 
2057 	locker.Unlock();
2058 
2059 	// surrender sync reference
2060 	put_select_sync(info->sync);
2061 
2062 	return B_OK;
2063 }
2064 
2065 
2066 int32
2067 thread_max_threads(void)
2068 {
2069 	return sMaxThreads;
2070 }
2071 
2072 
2073 int32
2074 thread_used_threads(void)
2075 {
2076 	return sUsedThreads;
2077 }
2078 
2079 
2080 const char*
2081 thread_state_to_text(struct thread* thread, int32 state)
2082 {
2083 	return state_to_text(thread, state);
2084 }
2085 
2086 
2087 int32
2088 thread_get_io_priority(thread_id id)
2089 {
2090 	// take a shortcut, if it is the current thread
2091 	struct thread* thread = thread_get_current_thread();
2092 	int32 priority;
2093 	if (id == thread->id) {
2094 		int32 priority = thread->io_priority;
2095 		return priority < 0 ? thread->priority : priority;
2096 	}
2097 
2098 	// not the current thread -- get it
2099 	InterruptsSpinLocker locker(gThreadSpinlock);
2100 
2101 	thread = thread_get_thread_struct_locked(id);
2102 	if (thread == NULL)
2103 		return B_BAD_THREAD_ID;
2104 
2105 	priority = thread->io_priority;
2106 	return priority < 0 ? thread->priority : priority;
2107 }
2108 
2109 
2110 void
2111 thread_set_io_priority(int32 priority)
2112 {
2113 	struct thread* thread = thread_get_current_thread();
2114 	thread->io_priority = priority;
2115 }
2116 
2117 
2118 status_t
2119 thread_init(kernel_args *args)
2120 {
2121 	uint32 i;
2122 
2123 	TRACE(("thread_init: entry\n"));
2124 
2125 	// create the thread hash table
2126 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2127 		&thread_struct_compare, &thread_struct_hash);
2128 
2129 	// zero out the dead thread structure q
2130 	memset(&dead_q, 0, sizeof(dead_q));
2131 
2132 	if (arch_thread_init(args) < B_OK)
2133 		panic("arch_thread_init() failed!\n");
2134 
2135 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2136 	sNextThreadID = B_SYSTEM_TEAM + 1;
2137 
2138 	// create an idle thread for each cpu
2139 
2140 	for (i = 0; i < args->num_cpus; i++) {
2141 		struct thread *thread;
2142 		area_info info;
2143 		char name[64];
2144 
2145 		sprintf(name, "idle thread %lu", i + 1);
2146 		thread = create_thread_struct(&sIdleThreads[i], name,
2147 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2148 		if (thread == NULL) {
2149 			panic("error creating idle thread struct\n");
2150 			return B_NO_MEMORY;
2151 		}
2152 
2153 		thread->team = team_get_kernel_team();
2154 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2155 		thread->state = B_THREAD_RUNNING;
2156 		thread->next_state = B_THREAD_READY;
2157 		sprintf(name, "idle thread %lu kstack", i + 1);
2158 		thread->kernel_stack_area = find_area(name);
2159 		thread->entry = NULL;
2160 
2161 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2162 			panic("error finding idle kstack area\n");
2163 
2164 		thread->kernel_stack_base = (addr_t)info.address;
2165 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2166 
2167 		hash_insert(sThreadHash, thread);
2168 		insert_thread_into_team(thread->team, thread);
2169 	}
2170 	sUsedThreads = args->num_cpus;
2171 
2172 	// init the notification service
2173 	new(&sNotificationService) ThreadNotificationService();
2174 
2175 	// start the undertaker thread
2176 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2177 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2178 
2179 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2180 		B_DISPLAY_PRIORITY, NULL);
2181 	if (undertakerThread < 0)
2182 		panic("Failed to create undertaker thread!");
2183 	send_signal_etc(undertakerThread, SIGCONT, B_DO_NOT_RESCHEDULE);
2184 
2185 	// set up some debugger commands
2186 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2187 		"[ <team> ]\n"
2188 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2189 		"all threads of the specified team.\n"
2190 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2191 	add_debugger_command_etc("ready", &dump_thread_list,
2192 		"List all ready threads",
2193 		"\n"
2194 		"Prints a list of all threads in ready state.\n", 0);
2195 	add_debugger_command_etc("running", &dump_thread_list,
2196 		"List all running threads",
2197 		"\n"
2198 		"Prints a list of all threads in running state.\n", 0);
2199 	add_debugger_command_etc("waiting", &dump_thread_list,
2200 		"List all waiting threads (optionally for a specific semaphore)",
2201 		"[ <sem> ]\n"
2202 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2203 		"specified, only the threads waiting on that semaphore are listed.\n"
2204 		"  <sem>  - ID of the semaphore.\n", 0);
2205 	add_debugger_command_etc("realtime", &dump_thread_list,
2206 		"List all realtime threads",
2207 		"\n"
2208 		"Prints a list of all threads with realtime priority.\n", 0);
2209 	add_debugger_command_etc("thread", &dump_thread_info,
2210 		"Dump info about a particular thread",
2211 		"[ -s ] ( <id> | <address> | <name> )*\n"
2212 		"Prints information about the specified thread. If no argument is\n"
2213 		"given the current thread is selected.\n"
2214 		"  -s         - Print info in compact table form (like \"threads\").\n"
2215 		"  <id>       - The ID of the thread.\n"
2216 		"  <address>  - The address of the thread structure.\n"
2217 		"  <name>     - The thread's name.\n", 0);
2218 	add_debugger_command_etc("calling", &dump_thread_list,
2219 		"Show all threads that have a specific address in their call chain",
2220 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2221 	add_debugger_command_etc("unreal", &make_thread_unreal,
2222 		"Set realtime priority threads to normal priority",
2223 		"[ <id> ]\n"
2224 		"Sets the priority of all realtime threads or, if given, the one\n"
2225 		"with the specified ID to \"normal\" priority.\n"
2226 		"  <id>  - The ID of the thread.\n", 0);
2227 	add_debugger_command_etc("suspend", &make_thread_suspended,
2228 		"Suspend a thread",
2229 		"[ <id> ]\n"
2230 		"Suspends the thread with the given ID. If no ID argument is given\n"
2231 		"the current thread is selected.\n"
2232 		"  <id>  - The ID of the thread.\n", 0);
2233 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2234 		"<id>\n"
2235 		"Resumes the specified thread, if it is currently suspended.\n"
2236 		"  <id>  - The ID of the thread.\n", 0);
2237 	add_debugger_command_etc("drop", &drop_into_debugger,
2238 		"Drop a thread into the userland debugger",
2239 		"<id>\n"
2240 		"Drops the specified (userland) thread into the userland debugger\n"
2241 		"after leaving the kernel debugger.\n"
2242 		"  <id>  - The ID of the thread.\n", 0);
2243 	add_debugger_command_etc("priority", &set_thread_prio,
2244 		"Set a thread's priority",
2245 		"<priority> [ <id> ]\n"
2246 		"Sets the priority of the thread with the specified ID to the given\n"
2247 		"priority. If no thread ID is given, the current thread is selected.\n"
2248 		"  <priority>  - The thread's new priority (0 - 120)\n"
2249 		"  <id>        - The ID of the thread.\n", 0);
2250 
2251 	return B_OK;
2252 }
2253 
2254 
2255 status_t
2256 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2257 {
2258 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2259 	// so that get_current_cpu and friends will work, which is crucial for
2260 	// a lot of low level routines
2261 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2262 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2263 	return B_OK;
2264 }
2265 
2266 
2267 //	#pragma mark - thread blocking API
2268 
2269 
2270 static status_t
2271 thread_block_timeout(timer* timer)
2272 {
2273 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2274 	// we're holding the thread lock already. This makes things comfortably
2275 	// easy.
2276 
2277 	struct thread* thread = (struct thread*)timer->user_data;
2278 	if (thread_unblock_locked(thread, B_TIMED_OUT)) {
2279 		// We actually woke up the thread. If it has a higher priority than the
2280 		// currently running thread, we invoke the scheduler.
2281 		// TODO: Is this really such a good idea or should we do that only when
2282 		// the woken up thread has realtime priority?
2283 		if (thread->priority > thread_get_current_thread()->priority)
2284 			return B_INVOKE_SCHEDULER;
2285 	}
2286 
2287 	return B_HANDLED_INTERRUPT;
2288 }
2289 
2290 
2291 status_t
2292 thread_block()
2293 {
2294 	InterruptsSpinLocker _(gThreadSpinlock);
2295 	return thread_block_locked(thread_get_current_thread());
2296 }
2297 
2298 
2299 bool
2300 thread_unblock(status_t threadID, status_t status)
2301 {
2302 	InterruptsSpinLocker _(gThreadSpinlock);
2303 
2304 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2305 	if (thread == NULL)
2306 		return false;
2307 	return thread_unblock_locked(thread, status);
2308 }
2309 
2310 
2311 status_t
2312 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2313 {
2314 	InterruptsSpinLocker _(gThreadSpinlock);
2315 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2316 }
2317 
2318 
2319 status_t
2320 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2321 {
2322 	struct thread* thread = thread_get_current_thread();
2323 
2324 	if (thread->wait.status != 1)
2325 		return thread->wait.status;
2326 
2327 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2328 		&& timeout != B_INFINITE_TIMEOUT;
2329 
2330 	if (useTimer) {
2331 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2332 		// avoids nasty race conditions and deadlock problems that could
2333 		// otherwise occur between our cancel_timer() and a concurrently
2334 		// executing thread_block_timeout().
2335 		uint32 timerFlags;
2336 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2337 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2338 		} else {
2339 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2340 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2341 				timeout -= rtc_boot_time();
2342 		}
2343 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2344 
2345 		// install the timer
2346 		thread->wait.unblock_timer.user_data = thread;
2347 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2348 			timerFlags);
2349 	}
2350 
2351 	// block
2352 	status_t error = thread_block_locked(thread);
2353 
2354 	// cancel timer, if it didn't fire
2355 	if (error != B_TIMED_OUT && useTimer)
2356 		cancel_timer(&thread->wait.unblock_timer);
2357 
2358 	return error;
2359 }
2360 
2361 
2362 /*!	Thread spinlock must be held.
2363 */
2364 static status_t
2365 user_unblock_thread(thread_id threadID, status_t status)
2366 {
2367 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2368 	if (thread == NULL)
2369 		return B_BAD_THREAD_ID;
2370 	if (thread->user_thread == NULL)
2371 		return B_NOT_ALLOWED;
2372 
2373 	if (thread->user_thread->wait_status > 0) {
2374 		thread->user_thread->wait_status = status;
2375 		thread_unblock_locked(thread, status);
2376 	}
2377 
2378 	return B_OK;
2379 }
2380 
2381 
2382 //	#pragma mark - public kernel API
2383 
2384 
2385 void
2386 exit_thread(status_t returnValue)
2387 {
2388 	struct thread *thread = thread_get_current_thread();
2389 
2390 	thread->exit.status = returnValue;
2391 	thread->exit.reason = THREAD_RETURN_EXIT;
2392 
2393 	// if called from a kernel thread, we don't deliver the signal,
2394 	// we just exit directly to keep the user space behaviour of
2395 	// this function
2396 	if (thread->team != team_get_kernel_team())
2397 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2398 	else
2399 		thread_exit();
2400 }
2401 
2402 
2403 status_t
2404 kill_thread(thread_id id)
2405 {
2406 	if (id <= 0)
2407 		return B_BAD_VALUE;
2408 
2409 	return send_signal(id, SIGKILLTHR);
2410 }
2411 
2412 
2413 status_t
2414 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2415 {
2416 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2417 }
2418 
2419 
2420 int32
2421 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2422 {
2423 	return receive_data_etc(sender, buffer, bufferSize, 0);
2424 }
2425 
2426 
2427 bool
2428 has_data(thread_id thread)
2429 {
2430 	int32 count;
2431 
2432 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2433 			&count) != B_OK)
2434 		return false;
2435 
2436 	return count == 0 ? false : true;
2437 }
2438 
2439 
2440 status_t
2441 _get_thread_info(thread_id id, thread_info *info, size_t size)
2442 {
2443 	status_t status = B_OK;
2444 	struct thread *thread;
2445 	cpu_status state;
2446 
2447 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2448 		return B_BAD_VALUE;
2449 
2450 	state = disable_interrupts();
2451 	GRAB_THREAD_LOCK();
2452 
2453 	thread = thread_get_thread_struct_locked(id);
2454 	if (thread == NULL) {
2455 		status = B_BAD_VALUE;
2456 		goto err;
2457 	}
2458 
2459 	fill_thread_info(thread, info, size);
2460 
2461 err:
2462 	RELEASE_THREAD_LOCK();
2463 	restore_interrupts(state);
2464 
2465 	return status;
2466 }
2467 
2468 
2469 status_t
2470 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2471 	size_t size)
2472 {
2473 	status_t status = B_BAD_VALUE;
2474 	struct thread *thread = NULL;
2475 	cpu_status state;
2476 	int slot;
2477 	thread_id lastThreadID;
2478 
2479 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2480 		return B_BAD_VALUE;
2481 
2482 	if (team == B_CURRENT_TEAM)
2483 		team = team_get_current_team_id();
2484 	else if (!team_is_valid(team))
2485 		return B_BAD_VALUE;
2486 
2487 	slot = *_cookie;
2488 
2489 	state = disable_interrupts();
2490 	GRAB_THREAD_LOCK();
2491 
2492 	lastThreadID = peek_next_thread_id();
2493 	if (slot >= lastThreadID)
2494 		goto err;
2495 
2496 	while (slot < lastThreadID
2497 		&& (!(thread = thread_get_thread_struct_locked(slot))
2498 			|| thread->team->id != team))
2499 		slot++;
2500 
2501 	if (thread != NULL && thread->team->id == team) {
2502 		fill_thread_info(thread, info, size);
2503 
2504 		*_cookie = slot + 1;
2505 		status = B_OK;
2506 	}
2507 
2508 err:
2509 	RELEASE_THREAD_LOCK();
2510 	restore_interrupts(state);
2511 
2512 	return status;
2513 }
2514 
2515 
2516 thread_id
2517 find_thread(const char *name)
2518 {
2519 	struct hash_iterator iterator;
2520 	struct thread *thread;
2521 	cpu_status state;
2522 
2523 	if (name == NULL)
2524 		return thread_get_current_thread_id();
2525 
2526 	state = disable_interrupts();
2527 	GRAB_THREAD_LOCK();
2528 
2529 	// ToDo: this might not be in the same order as find_thread() in BeOS
2530 	//		which could be theoretically problematic.
2531 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2532 	//		cheap either - although this function is probably used very rarely.
2533 
2534 	hash_open(sThreadHash, &iterator);
2535 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2536 			!= NULL) {
2537 		// Search through hash
2538 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2539 			thread_id id = thread->id;
2540 
2541 			RELEASE_THREAD_LOCK();
2542 			restore_interrupts(state);
2543 			return id;
2544 		}
2545 	}
2546 
2547 	RELEASE_THREAD_LOCK();
2548 	restore_interrupts(state);
2549 
2550 	return B_NAME_NOT_FOUND;
2551 }
2552 
2553 
2554 status_t
2555 rename_thread(thread_id id, const char *name)
2556 {
2557 	struct thread *thread = thread_get_current_thread();
2558 	status_t status = B_BAD_THREAD_ID;
2559 	cpu_status state;
2560 
2561 	if (name == NULL)
2562 		return B_BAD_VALUE;
2563 
2564 	state = disable_interrupts();
2565 	GRAB_THREAD_LOCK();
2566 
2567 	if (thread->id != id)
2568 		thread = thread_get_thread_struct_locked(id);
2569 
2570 	if (thread != NULL) {
2571 		if (thread->team == thread_get_current_thread()->team) {
2572 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2573 			status = B_OK;
2574 		} else
2575 			status = B_NOT_ALLOWED;
2576 	}
2577 
2578 	RELEASE_THREAD_LOCK();
2579 	restore_interrupts(state);
2580 
2581 	return status;
2582 }
2583 
2584 
2585 status_t
2586 set_thread_priority(thread_id id, int32 priority)
2587 {
2588 	struct thread *thread;
2589 	int32 oldPriority;
2590 
2591 	// make sure the passed in priority is within bounds
2592 	if (priority > THREAD_MAX_SET_PRIORITY)
2593 		priority = THREAD_MAX_SET_PRIORITY;
2594 	if (priority < THREAD_MIN_SET_PRIORITY)
2595 		priority = THREAD_MIN_SET_PRIORITY;
2596 
2597 	thread = thread_get_current_thread();
2598 	if (thread->id == id) {
2599 		if (thread_is_idle_thread(thread))
2600 			return B_NOT_ALLOWED;
2601 
2602 		// It's ourself, so we know we aren't in the run queue, and we can
2603 		// manipulate our structure directly
2604 		oldPriority = thread->priority;
2605 			// Note that this might not return the correct value if we are
2606 			// preempted here, and another thread changes our priority before
2607 			// the next line is executed.
2608 		thread->priority = thread->next_priority = priority;
2609 	} else {
2610 		InterruptsSpinLocker _(gThreadSpinlock);
2611 
2612 		thread = thread_get_thread_struct_locked(id);
2613 		if (thread == NULL)
2614 			return B_BAD_THREAD_ID;
2615 
2616 		if (thread_is_idle_thread(thread))
2617 			return B_NOT_ALLOWED;
2618 
2619 		oldPriority = thread->priority;
2620 		scheduler_set_thread_priority(thread, priority);
2621 	}
2622 
2623 	return oldPriority;
2624 }
2625 
2626 
2627 status_t
2628 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2629 {
2630 	status_t status;
2631 
2632 	if (timebase != B_SYSTEM_TIMEBASE)
2633 		return B_BAD_VALUE;
2634 
2635 	InterruptsSpinLocker _(gThreadSpinlock);
2636 	struct thread* thread = thread_get_current_thread();
2637 
2638 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2639 	status = thread_block_with_timeout_locked(flags, timeout);
2640 
2641 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2642 		return B_OK;
2643 
2644 	return status;
2645 }
2646 
2647 
2648 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2649 status_t
2650 snooze(bigtime_t timeout)
2651 {
2652 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2653 }
2654 
2655 
2656 /*!
2657 	snooze_until() for internal kernel use only; doesn't interrupt on
2658 	signals.
2659 */
2660 status_t
2661 snooze_until(bigtime_t timeout, int timebase)
2662 {
2663 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2664 }
2665 
2666 
2667 status_t
2668 wait_for_thread(thread_id thread, status_t *_returnCode)
2669 {
2670 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2671 }
2672 
2673 
2674 status_t
2675 suspend_thread(thread_id id)
2676 {
2677 	if (id <= 0)
2678 		return B_BAD_VALUE;
2679 
2680 	return send_signal(id, SIGSTOP);
2681 }
2682 
2683 
2684 status_t
2685 resume_thread(thread_id id)
2686 {
2687 	if (id <= 0)
2688 		return B_BAD_VALUE;
2689 
2690 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2691 		// This retains compatibility to BeOS which documents the
2692 		// combination of suspend_thread() and resume_thread() to
2693 		// interrupt threads waiting on semaphores.
2694 }
2695 
2696 
2697 thread_id
2698 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2699 	void *arg)
2700 {
2701 	thread_creation_attributes attributes;
2702 	attributes.entry = (thread_entry_func)function;
2703 	attributes.name = name;
2704 	attributes.priority = priority;
2705 	attributes.args1 = arg;
2706 	attributes.args2 = NULL;
2707 	attributes.stack_address = NULL;
2708 	attributes.stack_size = 0;
2709 	attributes.team = team_get_kernel_team()->id;
2710 	attributes.thread = -1;
2711 
2712 	return create_thread(attributes, true);
2713 }
2714 
2715 
2716 int
2717 getrlimit(int resource, struct rlimit * rlp)
2718 {
2719 	status_t error = common_getrlimit(resource, rlp);
2720 	if (error != B_OK) {
2721 		errno = error;
2722 		return -1;
2723 	}
2724 
2725 	return 0;
2726 }
2727 
2728 
2729 int
2730 setrlimit(int resource, const struct rlimit * rlp)
2731 {
2732 	status_t error = common_setrlimit(resource, rlp);
2733 	if (error != B_OK) {
2734 		errno = error;
2735 		return -1;
2736 	}
2737 
2738 	return 0;
2739 }
2740 
2741 
2742 //	#pragma mark - syscalls
2743 
2744 
2745 void
2746 _user_exit_thread(status_t returnValue)
2747 {
2748 	exit_thread(returnValue);
2749 }
2750 
2751 
2752 status_t
2753 _user_kill_thread(thread_id thread)
2754 {
2755 	return kill_thread(thread);
2756 }
2757 
2758 
2759 status_t
2760 _user_resume_thread(thread_id thread)
2761 {
2762 	return resume_thread(thread);
2763 }
2764 
2765 
2766 status_t
2767 _user_suspend_thread(thread_id thread)
2768 {
2769 	return suspend_thread(thread);
2770 }
2771 
2772 
2773 status_t
2774 _user_rename_thread(thread_id thread, const char *userName)
2775 {
2776 	char name[B_OS_NAME_LENGTH];
2777 
2778 	if (!IS_USER_ADDRESS(userName)
2779 		|| userName == NULL
2780 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2781 		return B_BAD_ADDRESS;
2782 
2783 	return rename_thread(thread, name);
2784 }
2785 
2786 
2787 int32
2788 _user_set_thread_priority(thread_id thread, int32 newPriority)
2789 {
2790 	return set_thread_priority(thread, newPriority);
2791 }
2792 
2793 
2794 thread_id
2795 _user_spawn_thread(thread_creation_attributes* userAttributes)
2796 {
2797 	thread_creation_attributes attributes;
2798 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2799 		|| user_memcpy(&attributes, userAttributes,
2800 				sizeof(attributes)) != B_OK) {
2801 		return B_BAD_ADDRESS;
2802 	}
2803 
2804 	if (attributes.stack_size != 0
2805 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2806 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2807 		return B_BAD_VALUE;
2808 	}
2809 
2810 	char name[B_OS_NAME_LENGTH];
2811 	thread_id threadID;
2812 
2813 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2814 		|| (attributes.stack_address != NULL
2815 			&& !IS_USER_ADDRESS(attributes.stack_address))
2816 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2817 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2818 		return B_BAD_ADDRESS;
2819 
2820 	attributes.name = attributes.name != NULL ? name : "user thread";
2821 	attributes.team = thread_get_current_thread()->team->id;
2822 	attributes.thread = -1;
2823 
2824 	threadID = create_thread(attributes, false);
2825 
2826 	if (threadID >= 0)
2827 		user_debug_thread_created(threadID);
2828 
2829 	return threadID;
2830 }
2831 
2832 
2833 status_t
2834 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2835 {
2836 	// NOTE: We only know the system timebase at the moment.
2837 	syscall_restart_handle_timeout_pre(flags, timeout);
2838 
2839 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2840 
2841 	return syscall_restart_handle_timeout_post(error, timeout);
2842 }
2843 
2844 
2845 void
2846 _user_thread_yield(void)
2847 {
2848 	thread_yield(true);
2849 }
2850 
2851 
2852 status_t
2853 _user_get_thread_info(thread_id id, thread_info *userInfo)
2854 {
2855 	thread_info info;
2856 	status_t status;
2857 
2858 	if (!IS_USER_ADDRESS(userInfo))
2859 		return B_BAD_ADDRESS;
2860 
2861 	status = _get_thread_info(id, &info, sizeof(thread_info));
2862 
2863 	if (status >= B_OK
2864 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2865 		return B_BAD_ADDRESS;
2866 
2867 	return status;
2868 }
2869 
2870 
2871 status_t
2872 _user_get_next_thread_info(team_id team, int32 *userCookie,
2873 	thread_info *userInfo)
2874 {
2875 	status_t status;
2876 	thread_info info;
2877 	int32 cookie;
2878 
2879 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2880 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2881 		return B_BAD_ADDRESS;
2882 
2883 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2884 	if (status < B_OK)
2885 		return status;
2886 
2887 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2888 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2889 		return B_BAD_ADDRESS;
2890 
2891 	return status;
2892 }
2893 
2894 
2895 thread_id
2896 _user_find_thread(const char *userName)
2897 {
2898 	char name[B_OS_NAME_LENGTH];
2899 
2900 	if (userName == NULL)
2901 		return find_thread(NULL);
2902 
2903 	if (!IS_USER_ADDRESS(userName)
2904 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2905 		return B_BAD_ADDRESS;
2906 
2907 	return find_thread(name);
2908 }
2909 
2910 
2911 status_t
2912 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2913 {
2914 	status_t returnCode;
2915 	status_t status;
2916 
2917 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2918 		return B_BAD_ADDRESS;
2919 
2920 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2921 
2922 	if (status == B_OK && userReturnCode != NULL
2923 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2924 		return B_BAD_ADDRESS;
2925 	}
2926 
2927 	return syscall_restart_handle_post(status);
2928 }
2929 
2930 
2931 bool
2932 _user_has_data(thread_id thread)
2933 {
2934 	return has_data(thread);
2935 }
2936 
2937 
2938 status_t
2939 _user_send_data(thread_id thread, int32 code, const void *buffer,
2940 	size_t bufferSize)
2941 {
2942 	if (!IS_USER_ADDRESS(buffer))
2943 		return B_BAD_ADDRESS;
2944 
2945 	return send_data_etc(thread, code, buffer, bufferSize,
2946 		B_KILL_CAN_INTERRUPT);
2947 		// supports userland buffers
2948 }
2949 
2950 
2951 status_t
2952 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2953 {
2954 	thread_id sender;
2955 	status_t code;
2956 
2957 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2958 		|| !IS_USER_ADDRESS(buffer))
2959 		return B_BAD_ADDRESS;
2960 
2961 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2962 		// supports userland buffers
2963 
2964 	if (_userSender != NULL)
2965 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2966 			return B_BAD_ADDRESS;
2967 
2968 	return code;
2969 }
2970 
2971 
2972 status_t
2973 _user_block_thread(uint32 flags, bigtime_t timeout)
2974 {
2975 	syscall_restart_handle_timeout_pre(flags, timeout);
2976 	flags |= B_CAN_INTERRUPT;
2977 
2978 	struct thread* thread = thread_get_current_thread();
2979 
2980 	InterruptsSpinLocker locker(gThreadSpinlock);
2981 
2982 	// check, if already done
2983 	if (thread->user_thread->wait_status <= 0)
2984 		return thread->user_thread->wait_status;
2985 
2986 	// nope, so wait
2987 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2988 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2989 	thread->user_thread->wait_status = status;
2990 
2991 	return syscall_restart_handle_timeout_post(status, timeout);
2992 }
2993 
2994 
2995 status_t
2996 _user_unblock_thread(thread_id threadID, status_t status)
2997 {
2998 	InterruptsSpinLocker locker(gThreadSpinlock);
2999 	return user_unblock_thread(threadID, status);
3000 }
3001 
3002 
3003 status_t
3004 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3005 {
3006 	enum {
3007 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3008 	};
3009 
3010 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3011 		return B_BAD_ADDRESS;
3012 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3013 		return B_BAD_VALUE;
3014 
3015 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3016 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3017 		return B_BAD_ADDRESS;
3018 
3019 	for (uint32 i = 0; i < count; i++)
3020 		user_unblock_thread(threads[i], status);
3021 
3022 	return B_OK;
3023 }
3024 
3025 
3026 // TODO: the following two functions don't belong here
3027 
3028 
3029 int
3030 _user_getrlimit(int resource, struct rlimit *urlp)
3031 {
3032 	struct rlimit rl;
3033 	int ret;
3034 
3035 	if (urlp == NULL)
3036 		return EINVAL;
3037 
3038 	if (!IS_USER_ADDRESS(urlp))
3039 		return B_BAD_ADDRESS;
3040 
3041 	ret = common_getrlimit(resource, &rl);
3042 
3043 	if (ret == 0) {
3044 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3045 		if (ret < 0)
3046 			return ret;
3047 
3048 		return 0;
3049 	}
3050 
3051 	return ret;
3052 }
3053 
3054 
3055 int
3056 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3057 {
3058 	struct rlimit resourceLimit;
3059 
3060 	if (userResourceLimit == NULL)
3061 		return EINVAL;
3062 
3063 	if (!IS_USER_ADDRESS(userResourceLimit)
3064 		|| user_memcpy(&resourceLimit, userResourceLimit,
3065 			sizeof(struct rlimit)) < B_OK)
3066 		return B_BAD_ADDRESS;
3067 
3068 	return common_setrlimit(resource, &resourceLimit);
3069 }
3070