xref: /haiku/src/system/kernel/thread.cpp (revision a1163de83ea633463a79de234b8742ee106531b2)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <Notifications.h>
34 #include <real_time_clock.h>
35 #include <smp.h>
36 #include <syscalls.h>
37 #include <syscall_restart.h>
38 #include <team.h>
39 #include <tls.h>
40 #include <user_runtime.h>
41 #include <user_thread.h>
42 #include <vfs.h>
43 #include <vm.h>
44 #include <vm_address_space.h>
45 #include <wait_for_objects.h>
46 
47 
48 //#define TRACE_THREAD
49 #ifdef TRACE_THREAD
50 #	define TRACE(x) dprintf x
51 #else
52 #	define TRACE(x) ;
53 #endif
54 
55 
56 #define THREAD_MAX_MESSAGE_SIZE		65536
57 
58 
59 struct thread_key {
60 	thread_id id;
61 };
62 
63 // global
64 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
65 
66 // thread list
67 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
68 static hash_table *sThreadHash = NULL;
69 static thread_id sNextThreadID = 1;
70 
71 // some arbitrary chosen limits - should probably depend on the available
72 // memory (the limit is not yet enforced)
73 static int32 sMaxThreads = 4096;
74 static int32 sUsedThreads = 0;
75 
76 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
77 	struct thread*	thread;
78 	team_id			teamID;
79 	sem_id			deathSem;
80 
81 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
82 		:
83 		thread(thread),
84 		teamID(teamID),
85 		deathSem(deathSem)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!
127 	Inserts a thread into a team.
128 	You must hold the team lock when you call this function.
129 */
130 static void
131 insert_thread_into_team(struct team *team, struct thread *thread)
132 {
133 	thread->team_next = team->thread_list;
134 	team->thread_list = thread;
135 	team->num_threads++;
136 
137 	if (team->num_threads == 1) {
138 		// this was the first thread
139 		team->main_thread = thread;
140 	}
141 	thread->team = team;
142 }
143 
144 
145 /*!
146 	Removes a thread from a team.
147 	You must hold the team lock when you call this function.
148 */
149 static void
150 remove_thread_from_team(struct team *team, struct thread *thread)
151 {
152 	struct thread *temp, *last = NULL;
153 
154 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
155 		if (temp == thread) {
156 			if (last == NULL)
157 				team->thread_list = temp->team_next;
158 			else
159 				last->team_next = temp->team_next;
160 
161 			team->num_threads--;
162 			break;
163 		}
164 		last = temp;
165 	}
166 }
167 
168 
169 static int
170 thread_struct_compare(void *_t, const void *_key)
171 {
172 	struct thread *thread = (struct thread*)_t;
173 	const struct thread_key *key = (const struct thread_key*)_key;
174 
175 	if (thread->id == key->id)
176 		return 0;
177 
178 	return 1;
179 }
180 
181 
182 static uint32
183 thread_struct_hash(void *_t, const void *_key, uint32 range)
184 {
185 	struct thread *thread = (struct thread*)_t;
186 	const struct thread_key *key = (const struct thread_key*)_key;
187 
188 	if (thread != NULL)
189 		return thread->id % range;
190 
191 	return (uint32)key->id % range;
192 }
193 
194 
195 static void
196 reset_signals(struct thread *thread)
197 {
198 	thread->sig_pending = 0;
199 	thread->sig_block_mask = 0;
200 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
201 	thread->signal_stack_base = 0;
202 	thread->signal_stack_size = 0;
203 	thread->signal_stack_enabled = false;
204 }
205 
206 
207 /*!
208 	Allocates and fills in thread structure (or reuses one from the
209 	dead queue).
210 
211 	\param threadID The ID to be assigned to the new thread. If
212 		  \code < 0 \endcode a fresh one is allocated.
213 	\param thread initialize this thread struct if nonnull
214 */
215 
216 static struct thread *
217 create_thread_struct(struct thread *inthread, const char *name,
218 	thread_id threadID, struct cpu_ent *cpu)
219 {
220 	struct thread *thread;
221 	cpu_status state;
222 	char temp[64];
223 	bool recycled = false;
224 
225 	if (inthread == NULL) {
226 		// try to recycle one from the dead queue first
227 		state = disable_interrupts();
228 		GRAB_THREAD_LOCK();
229 		thread = thread_dequeue(&dead_q);
230 		RELEASE_THREAD_LOCK();
231 		restore_interrupts(state);
232 
233 		// if not, create a new one
234 		if (thread == NULL) {
235 			thread = (struct thread *)malloc(sizeof(struct thread));
236 			if (thread == NULL)
237 				return NULL;
238 		} else {
239 			recycled = true;
240 		}
241 	} else {
242 		thread = inthread;
243 	}
244 
245 	if (!recycled)
246 		scheduler_on_thread_create(thread);
247 
248 	if (name != NULL)
249 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
250 	else
251 		strcpy(thread->name, "unnamed thread");
252 
253 	thread->flags = 0;
254 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
255 	thread->team = NULL;
256 	thread->cpu = cpu;
257 	thread->previous_cpu = NULL;
258 	thread->pinned_to_cpu = 0;
259 	thread->keep_scheduled = 0;
260 	thread->fault_handler = 0;
261 	thread->page_faults_allowed = 1;
262 	thread->kernel_stack_area = -1;
263 	thread->kernel_stack_base = 0;
264 	thread->user_stack_area = -1;
265 	thread->user_stack_base = 0;
266 	thread->user_local_storage = 0;
267 	thread->kernel_errno = 0;
268 	thread->team_next = NULL;
269 	thread->queue_next = NULL;
270 	thread->priority = thread->next_priority = -1;
271 	thread->io_priority = -1;
272 	thread->args1 = NULL;  thread->args2 = NULL;
273 	thread->alarm.period = 0;
274 	reset_signals(thread);
275 	thread->in_kernel = true;
276 	thread->was_yielded = false;
277 	thread->user_time = 0;
278 	thread->kernel_time = 0;
279 	thread->last_time = 0;
280 	thread->exit.status = 0;
281 	thread->exit.reason = 0;
282 	thread->exit.signal = 0;
283 	list_init(&thread->exit.waiters);
284 	thread->select_infos = NULL;
285 	thread->post_interrupt_callback = NULL;
286 	thread->post_interrupt_data = NULL;
287 
288 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
289 	thread->exit.sem = create_sem(0, temp);
290 	if (thread->exit.sem < B_OK)
291 		goto err1;
292 
293 	sprintf(temp, "%s send", thread->name);
294 	thread->msg.write_sem = create_sem(1, temp);
295 	if (thread->msg.write_sem < B_OK)
296 		goto err2;
297 
298 	sprintf(temp, "%s receive", thread->name);
299 	thread->msg.read_sem = create_sem(0, temp);
300 	if (thread->msg.read_sem < B_OK)
301 		goto err3;
302 
303 	if (arch_thread_init_thread_struct(thread) < B_OK)
304 		goto err4;
305 
306 	return thread;
307 
308 err4:
309 	delete_sem(thread->msg.read_sem);
310 err3:
311 	delete_sem(thread->msg.write_sem);
312 err2:
313 	delete_sem(thread->exit.sem);
314 err1:
315 	// ToDo: put them in the dead queue instead?
316 	if (inthread == NULL) {
317 		free(thread);
318 		scheduler_on_thread_destroy(thread);
319 	}
320 	return NULL;
321 }
322 
323 
324 static void
325 delete_thread_struct(struct thread *thread)
326 {
327 	delete_sem(thread->exit.sem);
328 	delete_sem(thread->msg.write_sem);
329 	delete_sem(thread->msg.read_sem);
330 
331 	scheduler_on_thread_destroy(thread);
332 
333 	// ToDo: put them in the dead queue instead?
334 	free(thread);
335 }
336 
337 
338 /*! This function gets run by a new thread before anything else */
339 static void
340 thread_kthread_entry(void)
341 {
342 	struct thread *thread = thread_get_current_thread();
343 
344 	// The thread is new and has been scheduled the first time. Notify the user
345 	// debugger code.
346 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
347 		user_debug_thread_scheduled(thread);
348 
349 	// simulates the thread spinlock release that would occur if the thread had been
350 	// rescheded from. The resched didn't happen because the thread is new.
351 	RELEASE_THREAD_LOCK();
352 
353 	// start tracking time
354 	thread->last_time = system_time();
355 
356 	enable_interrupts(); // this essentially simulates a return-from-interrupt
357 }
358 
359 
360 static void
361 thread_kthread_exit(void)
362 {
363 	struct thread *thread = thread_get_current_thread();
364 
365 	thread->exit.reason = THREAD_RETURN_EXIT;
366 	thread_exit();
367 }
368 
369 
370 /*!
371 	Initializes the thread and jumps to its userspace entry point.
372 	This function is called at creation time of every user thread,
373 	but not for a team's main thread.
374 */
375 static int
376 _create_user_thread_kentry(void)
377 {
378 	struct thread *thread = thread_get_current_thread();
379 
380 	// jump to the entry point in user space
381 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
382 		thread->args1, thread->args2);
383 
384 	// only get here if the above call fails
385 	return 0;
386 }
387 
388 
389 /*! Initializes the thread and calls it kernel space entry point. */
390 static int
391 _create_kernel_thread_kentry(void)
392 {
393 	struct thread *thread = thread_get_current_thread();
394 	int (*func)(void *args) = (int (*)(void *))thread->entry;
395 
396 	// call the entry function with the appropriate args
397 	return func(thread->args1);
398 }
399 
400 
401 /*!
402 	Creates a new thread in the team with the specified team ID.
403 
404 	\param threadID The ID to be assigned to the new thread. If
405 		  \code < 0 \endcode a fresh one is allocated.
406 */
407 static thread_id
408 create_thread(thread_creation_attributes& attributes, bool kernel)
409 {
410 	struct thread *thread, *currentThread;
411 	struct team *team;
412 	cpu_status state;
413 	char stack_name[B_OS_NAME_LENGTH];
414 	status_t status;
415 	bool abort = false;
416 	bool debugNewThread = false;
417 
418 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
419 		attributes.thread, kernel ? "kernel" : "user"));
420 
421 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
422 		NULL);
423 	if (thread == NULL)
424 		return B_NO_MEMORY;
425 
426 	thread->priority = attributes.priority == -1
427 		? B_NORMAL_PRIORITY : attributes.priority;
428 	thread->next_priority = thread->priority;
429 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
430 	thread->state = B_THREAD_SUSPENDED;
431 	thread->next_state = B_THREAD_SUSPENDED;
432 
433 	// init debug structure
434 	init_thread_debug_info(&thread->debug_info);
435 
436 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
437 		thread->id);
438 	thread->kernel_stack_area = create_area(stack_name,
439 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
440 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
441 		B_FULL_LOCK,
442 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
443 
444 	if (thread->kernel_stack_area < 0) {
445 		// we're not yet part of a team, so we can just bail out
446 		status = thread->kernel_stack_area;
447 
448 		dprintf("create_thread: error creating kernel stack: %s!\n",
449 			strerror(status));
450 
451 		delete_thread_struct(thread);
452 		return status;
453 	}
454 
455 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
456 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
457 
458 	state = disable_interrupts();
459 	GRAB_THREAD_LOCK();
460 
461 	// If the new thread belongs to the same team as the current thread,
462 	// it may inherit some of the thread debug flags.
463 	currentThread = thread_get_current_thread();
464 	if (currentThread && currentThread->team->id == attributes.team) {
465 		// inherit all user flags...
466 		int32 debugFlags = currentThread->debug_info.flags
467 			& B_THREAD_DEBUG_USER_FLAG_MASK;
468 
469 		// ... save the syscall tracing flags, unless explicitely specified
470 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
471 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
472 				| B_THREAD_DEBUG_POST_SYSCALL);
473 		}
474 
475 		thread->debug_info.flags = debugFlags;
476 
477 		// stop the new thread, if desired
478 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
479 	}
480 
481 	// insert into global list
482 	hash_insert(sThreadHash, thread);
483 	sUsedThreads++;
484 	scheduler_on_thread_init(thread);
485 	RELEASE_THREAD_LOCK();
486 
487 	GRAB_TEAM_LOCK();
488 	// look at the team, make sure it's not being deleted
489 	team = team_get_team_struct_locked(attributes.team);
490 
491 	if (team == NULL || team->state == TEAM_STATE_DEATH)
492 		abort = true;
493 
494 	if (!abort && !kernel) {
495 		thread->user_thread = team_allocate_user_thread(team);
496 		abort = thread->user_thread == NULL;
497 	}
498 
499 	if (!abort) {
500 		// Debug the new thread, if the parent thread required that (see above),
501 		// or the respective global team debug flag is set. But only, if a
502 		// debugger is installed for the team.
503 		debugNewThread |= (atomic_get(&team->debug_info.flags)
504 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
505 		if (debugNewThread
506 			&& (atomic_get(&team->debug_info.flags)
507 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
508 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
509 		}
510 
511 		insert_thread_into_team(team, thread);
512 	}
513 
514 	RELEASE_TEAM_LOCK();
515 	if (abort) {
516 		GRAB_THREAD_LOCK();
517 		hash_remove(sThreadHash, thread);
518 		RELEASE_THREAD_LOCK();
519 	}
520 	restore_interrupts(state);
521 	if (abort) {
522 		delete_area(thread->kernel_stack_area);
523 		delete_thread_struct(thread);
524 		return B_BAD_TEAM_ID;
525 	}
526 
527 	thread->args1 = attributes.args1;
528 	thread->args2 = attributes.args2;
529 	thread->entry = attributes.entry;
530 	status = thread->id;
531 
532 	// notify listeners
533 	sNotificationService.Notify(THREAD_ADDED, thread);
534 
535 	if (kernel) {
536 		// this sets up an initial kthread stack that runs the entry
537 
538 		// Note: whatever function wants to set up a user stack later for this
539 		// thread must initialize the TLS for it
540 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
541 			&thread_kthread_entry, &thread_kthread_exit);
542 	} else {
543 		// create user stack
544 
545 		// the stack will be between USER_STACK_REGION and the main thread stack
546 		// area (the user stack of the main thread is created in
547 		// team_create_team())
548 		if (attributes.stack_address == NULL) {
549 			thread->user_stack_base = USER_STACK_REGION;
550 			if (attributes.stack_size <= 0)
551 				thread->user_stack_size = USER_STACK_SIZE;
552 			else
553 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
554 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
555 
556 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
557 				attributes.name, thread->id);
558 			thread->user_stack_area = create_area_etc(team->id, stack_name,
559 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
560 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
561 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
562 			if (thread->user_stack_area < B_OK
563 				|| arch_thread_init_tls(thread) < B_OK) {
564 				// great, we have a fully running thread without a (usable)
565 				// stack
566 				dprintf("create_thread: unable to create proper user stack!\n");
567 				status = thread->user_stack_area;
568 				kill_thread(thread->id);
569 			}
570 		} else {
571 			thread->user_stack_base = (addr_t)attributes.stack_address;
572 			thread->user_stack_size = attributes.stack_size;
573 		}
574 
575 		user_debug_update_new_thread_flags(thread->id);
576 
577 		// copy the user entry over to the args field in the thread struct
578 		// the function this will call will immediately switch the thread into
579 		// user space.
580 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
581 			&thread_kthread_entry, &thread_kthread_exit);
582 	}
583 
584 	return status;
585 }
586 
587 
588 static status_t
589 undertaker(void* /*args*/)
590 {
591 	while (true) {
592 		// wait for a thread to bury
593 		InterruptsSpinLocker locker(gThreadSpinlock);
594 
595 		while (sUndertakerEntries.IsEmpty()) {
596 			ConditionVariableEntry conditionEntry;
597 			sUndertakerCondition.Add(&conditionEntry);
598 			locker.Unlock();
599 
600 			conditionEntry.Wait();
601 
602 			locker.Lock();
603 		}
604 
605 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
606 		locker.Unlock();
607 
608 		UndertakerEntry entry = *_entry;
609 			// we need a copy, since the original entry is on the thread's stack
610 
611 		// we've got an entry
612 		struct thread* thread = entry.thread;
613 
614 		// delete the old kernel stack area
615 		delete_area(thread->kernel_stack_area);
616 
617 		// remove this thread from all of the global lists
618 		disable_interrupts();
619 		GRAB_TEAM_LOCK();
620 
621 		remove_thread_from_team(team_get_kernel_team(), thread);
622 
623 		RELEASE_TEAM_LOCK();
624 		enable_interrupts();
625 			// needed for the debugger notification below
626 
627 		if (entry.deathSem >= 0)
628 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
629 
630 		// free the thread structure
631 		locker.Lock();
632 		thread_enqueue(thread, &dead_q);
633 			// TODO: Use the slab allocator!
634 	}
635 
636 	// never can get here
637 	return B_OK;
638 }
639 
640 
641 static sem_id
642 get_thread_wait_sem(struct thread* thread)
643 {
644 	if (thread->state == B_THREAD_WAITING
645 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
646 		return (sem_id)(addr_t)thread->wait.object;
647 	}
648 	return -1;
649 }
650 
651 
652 /*!
653 	Fills the thread_info structure with information from the specified
654 	thread.
655 	The thread lock must be held when called.
656 */
657 static void
658 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
659 {
660 	info->thread = thread->id;
661 	info->team = thread->team->id;
662 
663 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
664 
665 	if (thread->state == B_THREAD_WAITING) {
666 		info->state = B_THREAD_WAITING;
667 
668 		switch (thread->wait.type) {
669 			case THREAD_BLOCK_TYPE_SNOOZE:
670 				info->state = B_THREAD_ASLEEP;
671 				break;
672 
673 			case THREAD_BLOCK_TYPE_SEMAPHORE:
674 			{
675 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
676 				if (sem == thread->msg.read_sem)
677 					info->state = B_THREAD_RECEIVING;
678 				break;
679 			}
680 
681 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
682 			default:
683 				break;
684 		}
685 	} else
686 		info->state = (thread_state)thread->state;
687 
688 	info->priority = thread->priority;
689 	info->user_time = thread->user_time;
690 	info->kernel_time = thread->kernel_time;
691 	info->stack_base = (void *)thread->user_stack_base;
692 	info->stack_end = (void *)(thread->user_stack_base
693 		+ thread->user_stack_size);
694 	info->sem = get_thread_wait_sem(thread);
695 }
696 
697 static status_t
698 send_data_etc(thread_id id, int32 code, const void *buffer,
699 	size_t bufferSize, int32 flags)
700 {
701 	struct thread *target;
702 	sem_id cachedSem;
703 	cpu_status state;
704 	status_t status;
705 	cbuf *data;
706 
707 	state = disable_interrupts();
708 	GRAB_THREAD_LOCK();
709 	target = thread_get_thread_struct_locked(id);
710 	if (!target) {
711 		RELEASE_THREAD_LOCK();
712 		restore_interrupts(state);
713 		return B_BAD_THREAD_ID;
714 	}
715 	cachedSem = target->msg.write_sem;
716 	RELEASE_THREAD_LOCK();
717 	restore_interrupts(state);
718 
719 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
720 		return B_NO_MEMORY;
721 
722 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
723 	if (status == B_INTERRUPTED) {
724 		// We got interrupted by a signal
725 		return status;
726 	}
727 	if (status != B_OK) {
728 		// Any other acquisition problems may be due to thread deletion
729 		return B_BAD_THREAD_ID;
730 	}
731 
732 	if (bufferSize > 0) {
733 		data = cbuf_get_chain(bufferSize);
734 		if (data == NULL)
735 			return B_NO_MEMORY;
736 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
737 		if (status < B_OK) {
738 			cbuf_free_chain(data);
739 			return B_NO_MEMORY;
740 		}
741 	} else
742 		data = NULL;
743 
744 	state = disable_interrupts();
745 	GRAB_THREAD_LOCK();
746 
747 	// The target thread could have been deleted at this point
748 	target = thread_get_thread_struct_locked(id);
749 	if (target == NULL) {
750 		RELEASE_THREAD_LOCK();
751 		restore_interrupts(state);
752 		cbuf_free_chain(data);
753 		return B_BAD_THREAD_ID;
754 	}
755 
756 	// Save message informations
757 	target->msg.sender = thread_get_current_thread()->id;
758 	target->msg.code = code;
759 	target->msg.size = bufferSize;
760 	target->msg.buffer = data;
761 	cachedSem = target->msg.read_sem;
762 
763 	RELEASE_THREAD_LOCK();
764 	restore_interrupts(state);
765 
766 	release_sem(cachedSem);
767 	return B_OK;
768 }
769 
770 
771 static int32
772 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
773 	int32 flags)
774 {
775 	struct thread *thread = thread_get_current_thread();
776 	status_t status;
777 	size_t size;
778 	int32 code;
779 
780 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
781 	if (status < B_OK) {
782 		// Actually, we're not supposed to return error codes
783 		// but since the only reason this can fail is that we
784 		// were killed, it's probably okay to do so (but also
785 		// meaningless).
786 		return status;
787 	}
788 
789 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
790 		size = min_c(bufferSize, thread->msg.size);
791 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
792 			0, size);
793 		if (status < B_OK) {
794 			cbuf_free_chain(thread->msg.buffer);
795 			release_sem(thread->msg.write_sem);
796 			return status;
797 		}
798 	}
799 
800 	*_sender = thread->msg.sender;
801 	code = thread->msg.code;
802 
803 	cbuf_free_chain(thread->msg.buffer);
804 	release_sem(thread->msg.write_sem);
805 
806 	return code;
807 }
808 
809 
810 static status_t
811 common_getrlimit(int resource, struct rlimit * rlp)
812 {
813 	if (!rlp)
814 		return B_BAD_ADDRESS;
815 
816 	switch (resource) {
817 		case RLIMIT_NOFILE:
818 		case RLIMIT_NOVMON:
819 			return vfs_getrlimit(resource, rlp);
820 
821 		case RLIMIT_CORE:
822 			rlp->rlim_cur = 0;
823 			rlp->rlim_max = 0;
824 			return B_OK;
825 
826 		case RLIMIT_STACK:
827 		{
828 			struct thread *thread = thread_get_current_thread();
829 			if (!thread)
830 				return B_ERROR;
831 			rlp->rlim_cur = thread->user_stack_size;
832 			rlp->rlim_max = thread->user_stack_size;
833 			return B_OK;
834 		}
835 
836 		default:
837 			return EINVAL;
838 	}
839 
840 	return B_OK;
841 }
842 
843 
844 static status_t
845 common_setrlimit(int resource, const struct rlimit * rlp)
846 {
847 	if (!rlp)
848 		return B_BAD_ADDRESS;
849 
850 	switch (resource) {
851 		case RLIMIT_NOFILE:
852 		case RLIMIT_NOVMON:
853 			return vfs_setrlimit(resource, rlp);
854 
855 		case RLIMIT_CORE:
856 			// We don't support core file, so allow settings to 0/0 only.
857 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
858 				return EINVAL;
859 			return B_OK;
860 
861 		default:
862 			return EINVAL;
863 	}
864 
865 	return B_OK;
866 }
867 
868 
869 //	#pragma mark - debugger calls
870 
871 
872 static int
873 make_thread_unreal(int argc, char **argv)
874 {
875 	struct thread *thread;
876 	struct hash_iterator i;
877 	int32 id = -1;
878 
879 	if (argc > 2) {
880 		print_debugger_command_usage(argv[0]);
881 		return 0;
882 	}
883 
884 	if (argc > 1)
885 		id = strtoul(argv[1], NULL, 0);
886 
887 	hash_open(sThreadHash, &i);
888 
889 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
890 		if (id != -1 && thread->id != id)
891 			continue;
892 
893 		if (thread->priority > B_DISPLAY_PRIORITY) {
894 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
895 			kprintf("thread %ld made unreal\n", thread->id);
896 		}
897 	}
898 
899 	hash_close(sThreadHash, &i, false);
900 	return 0;
901 }
902 
903 
904 static int
905 set_thread_prio(int argc, char **argv)
906 {
907 	struct thread *thread;
908 	struct hash_iterator i;
909 	int32 id;
910 	int32 prio;
911 
912 	if (argc > 3 || argc < 2) {
913 		print_debugger_command_usage(argv[0]);
914 		return 0;
915 	}
916 
917 	prio = strtoul(argv[1], NULL, 0);
918 	if (prio > THREAD_MAX_SET_PRIORITY)
919 		prio = THREAD_MAX_SET_PRIORITY;
920 	if (prio < THREAD_MIN_SET_PRIORITY)
921 		prio = THREAD_MIN_SET_PRIORITY;
922 
923 	if (argc > 2)
924 		id = strtoul(argv[2], NULL, 0);
925 	else
926 		id = thread_get_current_thread()->id;
927 
928 	hash_open(sThreadHash, &i);
929 
930 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
931 		if (thread->id != id)
932 			continue;
933 		thread->priority = thread->next_priority = prio;
934 		kprintf("thread %ld set to priority %ld\n", id, prio);
935 		break;
936 	}
937 	if (!thread)
938 		kprintf("thread %ld (%#lx) not found\n", id, id);
939 
940 	hash_close(sThreadHash, &i, false);
941 	return 0;
942 }
943 
944 
945 static int
946 make_thread_suspended(int argc, char **argv)
947 {
948 	struct thread *thread;
949 	struct hash_iterator i;
950 	int32 id;
951 
952 	if (argc > 2) {
953 		print_debugger_command_usage(argv[0]);
954 		return 0;
955 	}
956 
957 	if (argc == 1)
958 		id = thread_get_current_thread()->id;
959 	else
960 		id = strtoul(argv[1], NULL, 0);
961 
962 	hash_open(sThreadHash, &i);
963 
964 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
965 		if (thread->id != id)
966 			continue;
967 
968 		thread->next_state = B_THREAD_SUSPENDED;
969 		kprintf("thread %ld suspended\n", id);
970 		break;
971 	}
972 	if (!thread)
973 		kprintf("thread %ld (%#lx) not found\n", id, id);
974 
975 	hash_close(sThreadHash, &i, false);
976 	return 0;
977 }
978 
979 
980 static int
981 make_thread_resumed(int argc, char **argv)
982 {
983 	struct thread *thread;
984 	struct hash_iterator i;
985 	int32 id;
986 
987 	if (argc != 2) {
988 		print_debugger_command_usage(argv[0]);
989 		return 0;
990 	}
991 
992 	// force user to enter a thread id, as using
993 	// the current thread is usually not intended
994 	id = strtoul(argv[1], NULL, 0);
995 
996 	hash_open(sThreadHash, &i);
997 
998 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
999 		if (thread->id != id)
1000 			continue;
1001 
1002 		if (thread->state == B_THREAD_SUSPENDED) {
1003 			scheduler_enqueue_in_run_queue(thread);
1004 			kprintf("thread %ld resumed\n", thread->id);
1005 		}
1006 		break;
1007 	}
1008 	if (!thread)
1009 		kprintf("thread %ld (%#lx) not found\n", id, id);
1010 
1011 	hash_close(sThreadHash, &i, false);
1012 	return 0;
1013 }
1014 
1015 
1016 static int
1017 drop_into_debugger(int argc, char **argv)
1018 {
1019 	status_t err;
1020 	int32 id;
1021 
1022 	if (argc > 2) {
1023 		print_debugger_command_usage(argv[0]);
1024 		return 0;
1025 	}
1026 
1027 	if (argc == 1)
1028 		id = thread_get_current_thread()->id;
1029 	else
1030 		id = strtoul(argv[1], NULL, 0);
1031 
1032 	err = _user_debug_thread(id);
1033 	if (err)
1034 		kprintf("drop failed\n");
1035 	else
1036 		kprintf("thread %ld dropped into user debugger\n", id);
1037 
1038 	return 0;
1039 }
1040 
1041 
1042 static const char *
1043 state_to_text(struct thread *thread, int32 state)
1044 {
1045 	switch (state) {
1046 		case B_THREAD_READY:
1047 			return "ready";
1048 
1049 		case B_THREAD_RUNNING:
1050 			return "running";
1051 
1052 		case B_THREAD_WAITING:
1053 		{
1054 			if (thread != NULL) {
1055 				switch (thread->wait.type) {
1056 					case THREAD_BLOCK_TYPE_SNOOZE:
1057 						return "zzz";
1058 
1059 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1060 					{
1061 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1062 						if (sem == thread->msg.read_sem)
1063 							return "receive";
1064 						break;
1065 					}
1066 				}
1067 			}
1068 
1069 			return "waiting";
1070 		}
1071 
1072 		case B_THREAD_SUSPENDED:
1073 			return "suspended";
1074 
1075 		case THREAD_STATE_FREE_ON_RESCHED:
1076 			return "death";
1077 
1078 		default:
1079 			return "UNKNOWN";
1080 	}
1081 }
1082 
1083 
1084 static void
1085 print_thread_list_table_head()
1086 {
1087 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1088 		"  team  name\n");
1089 }
1090 
1091 
1092 static void
1093 _dump_thread_info(struct thread *thread, bool shortInfo)
1094 {
1095 	if (shortInfo) {
1096 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1097 			thread->state));
1098 
1099 		// does it block on a semaphore or a condition variable?
1100 		if (thread->state == B_THREAD_WAITING) {
1101 			switch (thread->wait.type) {
1102 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1103 				{
1104 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1105 					if (sem == thread->msg.read_sem)
1106 						kprintf("                    ");
1107 					else
1108 						kprintf("sem  %12ld   ", sem);
1109 					break;
1110 				}
1111 
1112 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1113 					kprintf("cvar   %p   ", thread->wait.object);
1114 					break;
1115 
1116 				case THREAD_BLOCK_TYPE_SNOOZE:
1117 					kprintf("                    ");
1118 					break;
1119 
1120 				case THREAD_BLOCK_TYPE_SIGNAL:
1121 					kprintf("signal              ");
1122 					break;
1123 
1124 				case THREAD_BLOCK_TYPE_MUTEX:
1125 					kprintf("mutex  %p   ", thread->wait.object);
1126 					break;
1127 
1128 				case THREAD_BLOCK_TYPE_RW_LOCK:
1129 					kprintf("rwlock %p   ", thread->wait.object);
1130 					break;
1131 
1132 				case THREAD_BLOCK_TYPE_OTHER:
1133 					kprintf("other               ");
1134 					break;
1135 
1136 				default:
1137 					kprintf("???    %p   ", thread->wait.object);
1138 					break;
1139 			}
1140 		} else
1141 			kprintf("        -           ");
1142 
1143 		// on which CPU does it run?
1144 		if (thread->cpu)
1145 			kprintf("%2d", thread->cpu->cpu_num);
1146 		else
1147 			kprintf(" -");
1148 
1149 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1150 			(void *)thread->kernel_stack_base, thread->team->id,
1151 			thread->name != NULL ? thread->name : "<NULL>");
1152 
1153 		return;
1154 	}
1155 
1156 	// print the long info
1157 
1158 	struct death_entry *death = NULL;
1159 
1160 	kprintf("THREAD: %p\n", thread);
1161 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1162 	kprintf("name:               \"%s\"\n", thread->name);
1163 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1164 		thread->all_next, thread->team_next, thread->queue_next);
1165 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1166 		thread->next_priority, thread->io_priority);
1167 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1168 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1169 	kprintf("cpu:                %p ", thread->cpu);
1170 	if (thread->cpu)
1171 		kprintf("(%d)\n", thread->cpu->cpu_num);
1172 	else
1173 		kprintf("\n");
1174 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1175 		thread->sig_block_mask);
1176 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1177 
1178 	if (thread->state == B_THREAD_WAITING) {
1179 		kprintf("waiting for:        ");
1180 
1181 		switch (thread->wait.type) {
1182 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1183 			{
1184 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1185 				if (sem == thread->msg.read_sem)
1186 					kprintf("data\n");
1187 				else
1188 					kprintf("semaphore %ld\n", sem);
1189 				break;
1190 			}
1191 
1192 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1193 				kprintf("condition variable %p\n", thread->wait.object);
1194 				break;
1195 
1196 			case THREAD_BLOCK_TYPE_SNOOZE:
1197 				kprintf("snooze()\n");
1198 				break;
1199 
1200 			case THREAD_BLOCK_TYPE_SIGNAL:
1201 				kprintf("signal\n");
1202 				break;
1203 
1204 			case THREAD_BLOCK_TYPE_MUTEX:
1205 				kprintf("mutex %p\n", thread->wait.object);
1206 				break;
1207 
1208 			case THREAD_BLOCK_TYPE_RW_LOCK:
1209 				kprintf("rwlock %p\n", thread->wait.object);
1210 				break;
1211 
1212 			case THREAD_BLOCK_TYPE_OTHER:
1213 				kprintf("other (%s)\n", (char*)thread->wait.object);
1214 				break;
1215 
1216 			default:
1217 				kprintf("unknown (%p)\n", thread->wait.object);
1218 				break;
1219 		}
1220 	}
1221 
1222 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1223 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1224 	kprintf("entry:              %p\n", (void *)thread->entry);
1225 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1226 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1227 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1228 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1229 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1230 	kprintf("  exit.waiters:\n");
1231 	while ((death = (struct death_entry*)list_get_next_item(
1232 			&thread->exit.waiters, death)) != NULL) {
1233 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1234 	}
1235 
1236 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1237 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1238 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1239 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1240 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1241 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1242 		strerror(thread->kernel_errno));
1243 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1244 	kprintf("user_time:          %Ld\n", thread->user_time);
1245 	kprintf("flags:              0x%lx\n", thread->flags);
1246 	kprintf("architecture dependant section:\n");
1247 	arch_thread_dump_info(&thread->arch_info);
1248 }
1249 
1250 
1251 static int
1252 dump_thread_info(int argc, char **argv)
1253 {
1254 	bool shortInfo = false;
1255 	int argi = 1;
1256 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1257 		shortInfo = true;
1258 		print_thread_list_table_head();
1259 		argi++;
1260 	}
1261 
1262 	if (argi == argc) {
1263 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1264 		return 0;
1265 	}
1266 
1267 	for (; argi < argc; argi++) {
1268 		const char *name = argv[argi];
1269 		int32 id = strtoul(name, NULL, 0);
1270 
1271 		if (IS_KERNEL_ADDRESS(id)) {
1272 			// semi-hack
1273 			_dump_thread_info((struct thread *)id, shortInfo);
1274 			continue;
1275 		}
1276 
1277 		// walk through the thread list, trying to match name or id
1278 		bool found = false;
1279 		struct hash_iterator i;
1280 		hash_open(sThreadHash, &i);
1281 		struct thread *thread;
1282 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1283 			if (!strcmp(name, thread->name) || thread->id == id) {
1284 				_dump_thread_info(thread, shortInfo);
1285 				found = true;
1286 				break;
1287 			}
1288 		}
1289 		hash_close(sThreadHash, &i, false);
1290 
1291 		if (!found)
1292 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 
1299 static int
1300 dump_thread_list(int argc, char **argv)
1301 {
1302 	struct thread *thread;
1303 	struct hash_iterator i;
1304 	bool realTimeOnly = false;
1305 	bool calling = false;
1306 	const char *callSymbol = NULL;
1307 	addr_t callStart = 0;
1308 	addr_t callEnd = 0;
1309 	int32 requiredState = 0;
1310 	team_id team = -1;
1311 	sem_id sem = -1;
1312 
1313 	if (!strcmp(argv[0], "realtime"))
1314 		realTimeOnly = true;
1315 	else if (!strcmp(argv[0], "ready"))
1316 		requiredState = B_THREAD_READY;
1317 	else if (!strcmp(argv[0], "running"))
1318 		requiredState = B_THREAD_RUNNING;
1319 	else if (!strcmp(argv[0], "waiting")) {
1320 		requiredState = B_THREAD_WAITING;
1321 
1322 		if (argc > 1) {
1323 			sem = strtoul(argv[1], NULL, 0);
1324 			if (sem == 0)
1325 				kprintf("ignoring invalid semaphore argument.\n");
1326 		}
1327 	} else if (!strcmp(argv[0], "calling")) {
1328 		if (argc < 2) {
1329 			kprintf("Need to give a symbol name or start and end arguments.\n");
1330 			return 0;
1331 		} else if (argc == 3) {
1332 			callStart = parse_expression(argv[1]);
1333 			callEnd = parse_expression(argv[2]);
1334 		} else
1335 			callSymbol = argv[1];
1336 
1337 		calling = true;
1338 	} else if (argc > 1) {
1339 		team = strtoul(argv[1], NULL, 0);
1340 		if (team == 0)
1341 			kprintf("ignoring invalid team argument.\n");
1342 	}
1343 
1344 	print_thread_list_table_head();
1345 
1346 	hash_open(sThreadHash, &i);
1347 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1348 		// filter out threads not matching the search criteria
1349 		if ((requiredState && thread->state != requiredState)
1350 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1351 					callStart, callEnd))
1352 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1353 			|| (team > 0 && thread->team->id != team)
1354 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1355 			continue;
1356 
1357 		_dump_thread_info(thread, true);
1358 	}
1359 	hash_close(sThreadHash, &i, false);
1360 	return 0;
1361 }
1362 
1363 
1364 //	#pragma mark - private kernel API
1365 
1366 
1367 void
1368 thread_exit(void)
1369 {
1370 	cpu_status state;
1371 	struct thread *thread = thread_get_current_thread();
1372 	struct team *team = thread->team;
1373 	thread_id parentID = -1;
1374 	bool deleteTeam = false;
1375 	sem_id cachedDeathSem = -1;
1376 	status_t status;
1377 	struct thread_debug_info debugInfo;
1378 	team_id teamID = team->id;
1379 
1380 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1381 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1382 			? "due to signal" : "normally", thread->exit.status));
1383 
1384 	if (!are_interrupts_enabled())
1385 		panic("thread_exit() called with interrupts disabled!\n");
1386 
1387 	// boost our priority to get this over with
1388 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1389 
1390 	// Cancel previously installed alarm timer, if any
1391 	cancel_timer(&thread->alarm);
1392 
1393 	// delete the user stack area first, we won't need it anymore
1394 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1395 		area_id area = thread->user_stack_area;
1396 		thread->user_stack_area = -1;
1397 		vm_delete_area(team->id, area, true);
1398 	}
1399 
1400 	struct job_control_entry *death = NULL;
1401 	struct death_entry* threadDeathEntry = NULL;
1402 
1403 	if (team != team_get_kernel_team()) {
1404 		user_debug_thread_exiting(thread);
1405 
1406 		if (team->main_thread == thread) {
1407 			// this was the main thread in this team, so we will delete that as well
1408 			deleteTeam = true;
1409 		} else {
1410 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1411 			team_free_user_thread(thread);
1412 		}
1413 
1414 		// remove this thread from the current team and add it to the kernel
1415 		// put the thread into the kernel team until it dies
1416 		state = disable_interrupts();
1417 		GRAB_TEAM_LOCK();
1418 		GRAB_THREAD_LOCK();
1419 			// removing the thread and putting its death entry to the parent
1420 			// team needs to be an atomic operation
1421 
1422 		// remember how long this thread lasted
1423 		team->dead_threads_kernel_time += thread->kernel_time;
1424 		team->dead_threads_user_time += thread->user_time;
1425 
1426 		remove_thread_from_team(team, thread);
1427 		insert_thread_into_team(team_get_kernel_team(), thread);
1428 
1429 		cachedDeathSem = team->death_sem;
1430 
1431 		if (deleteTeam) {
1432 			struct team *parent = team->parent;
1433 
1434 			// remember who our parent was so we can send a signal
1435 			parentID = parent->id;
1436 
1437 			// Set the team job control state to "dead" and detach the job
1438 			// control entry from our team struct.
1439 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1440 			death = team->job_control_entry;
1441 			team->job_control_entry = NULL;
1442 
1443 			if (death != NULL) {
1444 				death->InitDeadState();
1445 
1446 				// team_set_job_control_state() already moved our entry
1447 				// into the parent's list. We just check the soft limit of
1448 				// death entries.
1449 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1450 					death = parent->dead_children->entries.RemoveHead();
1451 					parent->dead_children->count--;
1452 				} else
1453 					death = NULL;
1454 
1455 				RELEASE_THREAD_LOCK();
1456 			} else
1457 				RELEASE_THREAD_LOCK();
1458 
1459 			team_remove_team(team);
1460 
1461 			send_signal_etc(parentID, SIGCHLD,
1462 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1463 		} else {
1464 			// The thread is not the main thread. We store a thread death
1465 			// entry for it, unless someone is already waiting it.
1466 			if (threadDeathEntry != NULL
1467 				&& list_is_empty(&thread->exit.waiters)) {
1468 				threadDeathEntry->thread = thread->id;
1469 				threadDeathEntry->status = thread->exit.status;
1470 				threadDeathEntry->reason = thread->exit.reason;
1471 				threadDeathEntry->signal = thread->exit.signal;
1472 
1473 				// add entry -- remove and old one, if we hit the limit
1474 				list_add_item(&team->dead_threads, threadDeathEntry);
1475 				team->dead_threads_count++;
1476 				threadDeathEntry = NULL;
1477 
1478 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1479 					threadDeathEntry = (death_entry*)list_remove_head_item(
1480 						&team->dead_threads);
1481 					team->dead_threads_count--;
1482 				}
1483 			}
1484 
1485 			RELEASE_THREAD_LOCK();
1486 		}
1487 
1488 		RELEASE_TEAM_LOCK();
1489 
1490 		// swap address spaces, to make sure we're running on the kernel's pgdir
1491 		vm_swap_address_space(team->address_space, vm_kernel_address_space());
1492 		restore_interrupts(state);
1493 
1494 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1495 	}
1496 
1497 	if (threadDeathEntry != NULL)
1498 		free(threadDeathEntry);
1499 
1500 	// delete the team if we're its main thread
1501 	if (deleteTeam) {
1502 		team_delete_team(team);
1503 
1504 		// we need to delete any death entry that made it to here
1505 		if (death != NULL)
1506 			delete death;
1507 
1508 		cachedDeathSem = -1;
1509 	}
1510 
1511 	state = disable_interrupts();
1512 	GRAB_THREAD_LOCK();
1513 
1514 	// remove thread from hash, so it's no longer accessible
1515 	hash_remove(sThreadHash, thread);
1516 	sUsedThreads--;
1517 
1518 	// Stop debugging for this thread
1519 	debugInfo = thread->debug_info;
1520 	clear_thread_debug_info(&thread->debug_info, true);
1521 
1522 	// Remove the select infos. We notify them a little later.
1523 	select_info* selectInfos = thread->select_infos;
1524 	thread->select_infos = NULL;
1525 
1526 	RELEASE_THREAD_LOCK();
1527 	restore_interrupts(state);
1528 
1529 	destroy_thread_debug_info(&debugInfo);
1530 
1531 	// notify select infos
1532 	select_info* info = selectInfos;
1533 	while (info != NULL) {
1534 		select_sync* sync = info->sync;
1535 
1536 		notify_select_events(info, B_EVENT_INVALID);
1537 		info = info->next;
1538 		put_select_sync(sync);
1539 	}
1540 
1541 	// notify listeners
1542 	sNotificationService.Notify(THREAD_REMOVED, thread);
1543 
1544 	// shutdown the thread messaging
1545 
1546 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1547 	if (status == B_WOULD_BLOCK) {
1548 		// there is data waiting for us, so let us eat it
1549 		thread_id sender;
1550 
1551 		delete_sem(thread->msg.write_sem);
1552 			// first, let's remove all possibly waiting writers
1553 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1554 	} else {
1555 		// we probably own the semaphore here, and we're the last to do so
1556 		delete_sem(thread->msg.write_sem);
1557 	}
1558 	// now we can safely remove the msg.read_sem
1559 	delete_sem(thread->msg.read_sem);
1560 
1561 	// fill all death entries and delete the sem that others will use to wait on us
1562 	{
1563 		sem_id cachedExitSem = thread->exit.sem;
1564 		cpu_status state;
1565 
1566 		state = disable_interrupts();
1567 		GRAB_THREAD_LOCK();
1568 
1569 		// make sure no one will grab this semaphore again
1570 		thread->exit.sem = -1;
1571 
1572 		// fill all death entries
1573 		death_entry* entry = NULL;
1574 		while ((entry = (struct death_entry*)list_get_next_item(
1575 				&thread->exit.waiters, entry)) != NULL) {
1576 			entry->status = thread->exit.status;
1577 			entry->reason = thread->exit.reason;
1578 			entry->signal = thread->exit.signal;
1579 		}
1580 
1581 		RELEASE_THREAD_LOCK();
1582 		restore_interrupts(state);
1583 
1584 		delete_sem(cachedExitSem);
1585 	}
1586 
1587 	// notify the debugger
1588 	if (teamID != team_get_kernel_team_id())
1589 		user_debug_thread_deleted(teamID, thread->id);
1590 
1591 	// enqueue in the undertaker list and reschedule for the last time
1592 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1593 
1594 	disable_interrupts();
1595 	GRAB_THREAD_LOCK();
1596 
1597 	sUndertakerEntries.Add(&undertakerEntry);
1598 	sUndertakerCondition.NotifyOne(true);
1599 
1600 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1601 	scheduler_reschedule();
1602 
1603 	panic("never can get here\n");
1604 }
1605 
1606 
1607 struct thread *
1608 thread_get_thread_struct(thread_id id)
1609 {
1610 	struct thread *thread;
1611 	cpu_status state;
1612 
1613 	state = disable_interrupts();
1614 	GRAB_THREAD_LOCK();
1615 
1616 	thread = thread_get_thread_struct_locked(id);
1617 
1618 	RELEASE_THREAD_LOCK();
1619 	restore_interrupts(state);
1620 
1621 	return thread;
1622 }
1623 
1624 
1625 struct thread *
1626 thread_get_thread_struct_locked(thread_id id)
1627 {
1628 	struct thread_key key;
1629 
1630 	key.id = id;
1631 
1632 	return (struct thread*)hash_lookup(sThreadHash, &key);
1633 }
1634 
1635 
1636 /*!
1637 	Called in the interrupt handler code when a thread enters
1638 	the kernel for any reason.
1639 	Only tracks time for now.
1640 	Interrupts are disabled.
1641 */
1642 void
1643 thread_at_kernel_entry(bigtime_t now)
1644 {
1645 	struct thread *thread = thread_get_current_thread();
1646 
1647 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1648 
1649 	// track user time
1650 	thread->user_time += now - thread->last_time;
1651 	thread->last_time = now;
1652 
1653 	thread->in_kernel = true;
1654 }
1655 
1656 
1657 /*!
1658 	Called whenever a thread exits kernel space to user space.
1659 	Tracks time, handles signals, ...
1660 	Interrupts must be enabled. When the function returns, interrupts will be
1661 	disabled.
1662 */
1663 void
1664 thread_at_kernel_exit(void)
1665 {
1666 	struct thread *thread = thread_get_current_thread();
1667 
1668 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1669 
1670 	while (handle_signals(thread)) {
1671 		InterruptsSpinLocker _(gThreadSpinlock);
1672 		scheduler_reschedule();
1673 	}
1674 
1675 	disable_interrupts();
1676 
1677 	thread->in_kernel = false;
1678 
1679 	// track kernel time
1680 	bigtime_t now = system_time();
1681 	thread->kernel_time += now - thread->last_time;
1682 	thread->last_time = now;
1683 }
1684 
1685 
1686 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1687 	and no debugging shall be done.
1688 	Interrupts must be disabled.
1689 */
1690 void
1691 thread_at_kernel_exit_no_signals(void)
1692 {
1693 	struct thread *thread = thread_get_current_thread();
1694 
1695 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1696 
1697 	thread->in_kernel = false;
1698 
1699 	// track kernel time
1700 	bigtime_t now = system_time();
1701 	thread->kernel_time += now - thread->last_time;
1702 	thread->last_time = now;
1703 }
1704 
1705 
1706 void
1707 thread_reset_for_exec(void)
1708 {
1709 	struct thread *thread = thread_get_current_thread();
1710 
1711 	cancel_timer(&thread->alarm);
1712 	reset_signals(thread);
1713 }
1714 
1715 
1716 /*! Insert a thread to the tail of a queue */
1717 void
1718 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1719 {
1720 	thread->queue_next = NULL;
1721 	if (queue->head == NULL) {
1722 		queue->head = thread;
1723 		queue->tail = thread;
1724 	} else {
1725 		queue->tail->queue_next = thread;
1726 		queue->tail = thread;
1727 	}
1728 }
1729 
1730 
1731 struct thread *
1732 thread_lookat_queue(struct thread_queue *queue)
1733 {
1734 	return queue->head;
1735 }
1736 
1737 
1738 struct thread *
1739 thread_dequeue(struct thread_queue *queue)
1740 {
1741 	struct thread *thread = queue->head;
1742 
1743 	if (thread != NULL) {
1744 		queue->head = thread->queue_next;
1745 		if (queue->tail == thread)
1746 			queue->tail = NULL;
1747 	}
1748 	return thread;
1749 }
1750 
1751 
1752 struct thread *
1753 thread_dequeue_id(struct thread_queue *q, thread_id id)
1754 {
1755 	struct thread *thread;
1756 	struct thread *last = NULL;
1757 
1758 	thread = q->head;
1759 	while (thread != NULL) {
1760 		if (thread->id == id) {
1761 			if (last == NULL)
1762 				q->head = thread->queue_next;
1763 			else
1764 				last->queue_next = thread->queue_next;
1765 
1766 			if (q->tail == thread)
1767 				q->tail = last;
1768 			break;
1769 		}
1770 		last = thread;
1771 		thread = thread->queue_next;
1772 	}
1773 	return thread;
1774 }
1775 
1776 
1777 struct thread*
1778 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1779 {
1780 	struct hash_iterator iterator;
1781 	hash_open(sThreadHash, &iterator);
1782 
1783 	struct thread* thread;
1784 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1785 			!= NULL) {
1786 		if (callback(thread, cookie))
1787 			break;
1788 	}
1789 
1790 	hash_close(sThreadHash, &iterator, false);
1791 
1792 	return thread;
1793 }
1794 
1795 
1796 thread_id
1797 allocate_thread_id(void)
1798 {
1799 	return atomic_add(&sNextThreadID, 1);
1800 }
1801 
1802 
1803 thread_id
1804 peek_next_thread_id(void)
1805 {
1806 	return atomic_get(&sNextThreadID);
1807 }
1808 
1809 
1810 /*!	Yield the CPU to other threads.
1811 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1812 	If \c false, it will continue to run, if there's no other thread in ready
1813 	state, and if it has a higher priority than the other ready threads, it
1814 	still has a good chance to continue.
1815 */
1816 void
1817 thread_yield(bool force)
1818 {
1819 	if (force) {
1820 		// snooze for roughly 3 thread quantums
1821 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1822 #if 0
1823 		cpu_status state;
1824 
1825 		struct thread *thread = thread_get_current_thread();
1826 		if (thread == NULL)
1827 			return;
1828 
1829 		state = disable_interrupts();
1830 		GRAB_THREAD_LOCK();
1831 
1832 		// mark the thread as yielded, so it will not be scheduled next
1833 		//thread->was_yielded = true;
1834 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1835 		scheduler_reschedule();
1836 
1837 		RELEASE_THREAD_LOCK();
1838 		restore_interrupts(state);
1839 #endif
1840 	} else {
1841 		struct thread *thread = thread_get_current_thread();
1842 		if (thread == NULL)
1843 			return;
1844 
1845 		// Don't force the thread off the CPU, just reschedule.
1846 		InterruptsSpinLocker _(gThreadSpinlock);
1847 		scheduler_reschedule();
1848 	}
1849 }
1850 
1851 
1852 /*!
1853 	Kernel private thread creation function.
1854 
1855 	\param threadID The ID to be assigned to the new thread. If
1856 		  \code < 0 \endcode a fresh one is allocated.
1857 */
1858 thread_id
1859 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1860 	void *arg, team_id team, thread_id threadID)
1861 {
1862 	thread_creation_attributes attributes;
1863 	attributes.entry = (thread_entry_func)function;
1864 	attributes.name = name;
1865 	attributes.priority = priority;
1866 	attributes.args1 = arg;
1867 	attributes.args2 = NULL;
1868 	attributes.stack_address = NULL;
1869 	attributes.stack_size = 0;
1870 	attributes.team = team;
1871 	attributes.thread = threadID;
1872 
1873 	return create_thread(attributes, true);
1874 }
1875 
1876 
1877 status_t
1878 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1879 	status_t *_returnCode)
1880 {
1881 	sem_id exitSem = B_BAD_THREAD_ID;
1882 	struct death_entry death;
1883 	job_control_entry* freeDeath = NULL;
1884 	struct thread *thread;
1885 	cpu_status state;
1886 	status_t status = B_OK;
1887 
1888 	if (id < B_OK)
1889 		return B_BAD_THREAD_ID;
1890 
1891 	// we need to resume the thread we're waiting for first
1892 
1893 	state = disable_interrupts();
1894 	GRAB_THREAD_LOCK();
1895 
1896 	thread = thread_get_thread_struct_locked(id);
1897 	if (thread != NULL) {
1898 		// remember the semaphore we have to wait on and place our death entry
1899 		exitSem = thread->exit.sem;
1900 		list_add_link_to_head(&thread->exit.waiters, &death);
1901 	}
1902 
1903 	death_entry* threadDeathEntry = NULL;
1904 
1905 	RELEASE_THREAD_LOCK();
1906 
1907 	if (thread == NULL) {
1908 		// we couldn't find this thread - maybe it's already gone, and we'll
1909 		// find its death entry in our team
1910 		GRAB_TEAM_LOCK();
1911 
1912 		struct team* team = thread_get_current_thread()->team;
1913 
1914 		// check the child death entries first (i.e. main threads of child
1915 		// teams)
1916 		bool deleteEntry;
1917 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1918 		if (freeDeath != NULL) {
1919 			death.status = freeDeath->status;
1920 			if (!deleteEntry)
1921 				freeDeath = NULL;
1922 		} else {
1923 			// check the thread death entries of the team (non-main threads)
1924 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1925 					&team->dead_threads, threadDeathEntry)) != NULL) {
1926 				if (threadDeathEntry->thread == id) {
1927 					list_remove_item(&team->dead_threads, threadDeathEntry);
1928 					team->dead_threads_count--;
1929 					death.status = threadDeathEntry->status;
1930 					break;
1931 				}
1932 			}
1933 
1934 			if (threadDeathEntry == NULL)
1935 				status = B_BAD_THREAD_ID;
1936 		}
1937 
1938 		RELEASE_TEAM_LOCK();
1939 	}
1940 
1941 	restore_interrupts(state);
1942 
1943 	if (thread == NULL && status == B_OK) {
1944 		// we found the thread's death entry in our team
1945 		if (_returnCode)
1946 			*_returnCode = death.status;
1947 
1948 		delete freeDeath;
1949 		free(threadDeathEntry);
1950 		return B_OK;
1951 	}
1952 
1953 	// we need to wait for the death of the thread
1954 
1955 	if (exitSem < B_OK)
1956 		return B_BAD_THREAD_ID;
1957 
1958 	resume_thread(id);
1959 		// make sure we don't wait forever on a suspended thread
1960 
1961 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1962 
1963 	if (status == B_OK) {
1964 		// this should never happen as the thread deletes the semaphore on exit
1965 		panic("could acquire exit_sem for thread %ld\n", id);
1966 	} else if (status == B_BAD_SEM_ID) {
1967 		// this is the way the thread normally exits
1968 		status = B_OK;
1969 
1970 		if (_returnCode)
1971 			*_returnCode = death.status;
1972 	} else {
1973 		// We were probably interrupted; we need to remove our death entry now.
1974 		state = disable_interrupts();
1975 		GRAB_THREAD_LOCK();
1976 
1977 		thread = thread_get_thread_struct_locked(id);
1978 		if (thread != NULL)
1979 			list_remove_link(&death);
1980 
1981 		RELEASE_THREAD_LOCK();
1982 		restore_interrupts(state);
1983 
1984 		// If the thread is already gone, we need to wait for its exit semaphore
1985 		// to make sure our death entry stays valid - it won't take long
1986 		if (thread == NULL)
1987 			acquire_sem(exitSem);
1988 	}
1989 
1990 	return status;
1991 }
1992 
1993 
1994 status_t
1995 select_thread(int32 id, struct select_info* info, bool kernel)
1996 {
1997 	InterruptsSpinLocker locker(gThreadSpinlock);
1998 
1999 	// get thread
2000 	struct thread* thread = thread_get_thread_struct_locked(id);
2001 	if (thread == NULL)
2002 		return B_BAD_THREAD_ID;
2003 
2004 	// We support only B_EVENT_INVALID at the moment.
2005 	info->selected_events &= B_EVENT_INVALID;
2006 
2007 	// add info to list
2008 	if (info->selected_events != 0) {
2009 		info->next = thread->select_infos;
2010 		thread->select_infos = info;
2011 
2012 		// we need a sync reference
2013 		atomic_add(&info->sync->ref_count, 1);
2014 	}
2015 
2016 	return B_OK;
2017 }
2018 
2019 
2020 status_t
2021 deselect_thread(int32 id, struct select_info* info, bool kernel)
2022 {
2023 	InterruptsSpinLocker locker(gThreadSpinlock);
2024 
2025 	// get thread
2026 	struct thread* thread = thread_get_thread_struct_locked(id);
2027 	if (thread == NULL)
2028 		return B_BAD_THREAD_ID;
2029 
2030 	// remove info from list
2031 	select_info** infoLocation = &thread->select_infos;
2032 	while (*infoLocation != NULL && *infoLocation != info)
2033 		infoLocation = &(*infoLocation)->next;
2034 
2035 	if (*infoLocation != info)
2036 		return B_OK;
2037 
2038 	*infoLocation = info->next;
2039 
2040 	locker.Unlock();
2041 
2042 	// surrender sync reference
2043 	put_select_sync(info->sync);
2044 
2045 	return B_OK;
2046 }
2047 
2048 
2049 int32
2050 thread_max_threads(void)
2051 {
2052 	return sMaxThreads;
2053 }
2054 
2055 
2056 int32
2057 thread_used_threads(void)
2058 {
2059 	return sUsedThreads;
2060 }
2061 
2062 
2063 const char*
2064 thread_state_to_text(struct thread* thread, int32 state)
2065 {
2066 	return state_to_text(thread, state);
2067 }
2068 
2069 
2070 int32
2071 thread_get_io_priority(thread_id id)
2072 {
2073 	// take a shortcut, if it is the current thread
2074 	struct thread* thread = thread_get_current_thread();
2075 	int32 priority;
2076 	if (id == thread->id) {
2077 		int32 priority = thread->io_priority;
2078 		return priority < 0 ? thread->priority : priority;
2079 	}
2080 
2081 	// not the current thread -- get it
2082 	InterruptsSpinLocker locker(gThreadSpinlock);
2083 
2084 	thread = thread_get_thread_struct_locked(id);
2085 	if (thread == NULL)
2086 		return B_BAD_THREAD_ID;
2087 
2088 	priority = thread->io_priority;
2089 	return priority < 0 ? thread->priority : priority;
2090 }
2091 
2092 
2093 void
2094 thread_set_io_priority(int32 priority)
2095 {
2096 	struct thread* thread = thread_get_current_thread();
2097 	thread->io_priority = priority;
2098 }
2099 
2100 
2101 status_t
2102 thread_init(kernel_args *args)
2103 {
2104 	uint32 i;
2105 
2106 	TRACE(("thread_init: entry\n"));
2107 
2108 	// create the thread hash table
2109 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2110 		&thread_struct_compare, &thread_struct_hash);
2111 
2112 	// zero out the dead thread structure q
2113 	memset(&dead_q, 0, sizeof(dead_q));
2114 
2115 	if (arch_thread_init(args) < B_OK)
2116 		panic("arch_thread_init() failed!\n");
2117 
2118 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2119 	sNextThreadID = B_SYSTEM_TEAM + 1;
2120 
2121 	// create an idle thread for each cpu
2122 
2123 	for (i = 0; i < args->num_cpus; i++) {
2124 		struct thread *thread;
2125 		area_info info;
2126 		char name[64];
2127 
2128 		sprintf(name, "idle thread %lu", i + 1);
2129 		thread = create_thread_struct(&sIdleThreads[i], name,
2130 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2131 		if (thread == NULL) {
2132 			panic("error creating idle thread struct\n");
2133 			return B_NO_MEMORY;
2134 		}
2135 
2136 		thread->team = team_get_kernel_team();
2137 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2138 		thread->state = B_THREAD_RUNNING;
2139 		thread->next_state = B_THREAD_READY;
2140 		sprintf(name, "idle thread %lu kstack", i + 1);
2141 		thread->kernel_stack_area = find_area(name);
2142 		thread->entry = NULL;
2143 
2144 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2145 			panic("error finding idle kstack area\n");
2146 
2147 		thread->kernel_stack_base = (addr_t)info.address;
2148 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2149 
2150 		hash_insert(sThreadHash, thread);
2151 		insert_thread_into_team(thread->team, thread);
2152 	}
2153 	sUsedThreads = args->num_cpus;
2154 
2155 	// init the notification service
2156 	new(&sNotificationService) ThreadNotificationService();
2157 
2158 	// start the undertaker thread
2159 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2160 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2161 
2162 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2163 		B_DISPLAY_PRIORITY, NULL);
2164 	if (undertakerThread < 0)
2165 		panic("Failed to create undertaker thread!");
2166 	send_signal_etc(undertakerThread, SIGCONT, B_DO_NOT_RESCHEDULE);
2167 
2168 	// set up some debugger commands
2169 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2170 		"[ <team> ]\n"
2171 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2172 		"all threads of the specified team.\n"
2173 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2174 	add_debugger_command_etc("ready", &dump_thread_list,
2175 		"List all ready threads",
2176 		"\n"
2177 		"Prints a list of all threads in ready state.\n", 0);
2178 	add_debugger_command_etc("running", &dump_thread_list,
2179 		"List all running threads",
2180 		"\n"
2181 		"Prints a list of all threads in running state.\n", 0);
2182 	add_debugger_command_etc("waiting", &dump_thread_list,
2183 		"List all waiting threads (optionally for a specific semaphore)",
2184 		"[ <sem> ]\n"
2185 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2186 		"specified, only the threads waiting on that semaphore are listed.\n"
2187 		"  <sem>  - ID of the semaphore.\n", 0);
2188 	add_debugger_command_etc("realtime", &dump_thread_list,
2189 		"List all realtime threads",
2190 		"\n"
2191 		"Prints a list of all threads with realtime priority.\n", 0);
2192 	add_debugger_command_etc("thread", &dump_thread_info,
2193 		"Dump info about a particular thread",
2194 		"[ -s ] ( <id> | <address> | <name> )*\n"
2195 		"Prints information about the specified thread. If no argument is\n"
2196 		"given the current thread is selected.\n"
2197 		"  -s         - Print info in compact table form (like \"threads\").\n"
2198 		"  <id>       - The ID of the thread.\n"
2199 		"  <address>  - The address of the thread structure.\n"
2200 		"  <name>     - The thread's name.\n", 0);
2201 	add_debugger_command_etc("calling", &dump_thread_list,
2202 		"Show all threads that have a specific address in their call chain",
2203 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2204 	add_debugger_command_etc("unreal", &make_thread_unreal,
2205 		"Set realtime priority threads to normal priority",
2206 		"[ <id> ]\n"
2207 		"Sets the priority of all realtime threads or, if given, the one\n"
2208 		"with the specified ID to \"normal\" priority.\n"
2209 		"  <id>  - The ID of the thread.\n", 0);
2210 	add_debugger_command_etc("suspend", &make_thread_suspended,
2211 		"Suspend a thread",
2212 		"[ <id> ]\n"
2213 		"Suspends the thread with the given ID. If no ID argument is given\n"
2214 		"the current thread is selected.\n"
2215 		"  <id>  - The ID of the thread.\n", 0);
2216 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2217 		"<id>\n"
2218 		"Resumes the specified thread, if it is currently suspended.\n"
2219 		"  <id>  - The ID of the thread.\n", 0);
2220 	add_debugger_command_etc("drop", &drop_into_debugger,
2221 		"Drop a thread into the userland debugger",
2222 		"<id>\n"
2223 		"Drops the specified (userland) thread into the userland debugger\n"
2224 		"after leaving the kernel debugger.\n"
2225 		"  <id>  - The ID of the thread.\n", 0);
2226 	add_debugger_command_etc("priority", &set_thread_prio,
2227 		"Set a thread's priority",
2228 		"<priority> [ <id> ]\n"
2229 		"Sets the priority of the thread with the specified ID to the given\n"
2230 		"priority. If no thread ID is given, the current thread is selected.\n"
2231 		"  <priority>  - The thread's new priority (0 - 120)\n"
2232 		"  <id>        - The ID of the thread.\n", 0);
2233 
2234 	return B_OK;
2235 }
2236 
2237 
2238 status_t
2239 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2240 {
2241 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2242 	// so that get_current_cpu and friends will work, which is crucial for
2243 	// a lot of low level routines
2244 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2245 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2246 	return B_OK;
2247 }
2248 
2249 
2250 //	#pragma mark - thread blocking API
2251 
2252 
2253 static status_t
2254 thread_block_timeout(timer* timer)
2255 {
2256 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2257 	// we're holding the thread lock already. This makes things comfortably
2258 	// easy.
2259 
2260 	struct thread* thread = (struct thread*)timer->user_data;
2261 	if (thread_unblock_locked(thread, B_TIMED_OUT)) {
2262 		// We actually woke up the thread. If it has a higher priority than the
2263 		// currently running thread, we invoke the scheduler.
2264 		// TODO: Is this really such a good idea or should we do that only when
2265 		// the woken up thread has realtime priority?
2266 		if (thread->priority > thread_get_current_thread()->priority)
2267 			return B_INVOKE_SCHEDULER;
2268 	}
2269 
2270 	return B_HANDLED_INTERRUPT;
2271 }
2272 
2273 
2274 status_t
2275 thread_block()
2276 {
2277 	InterruptsSpinLocker _(gThreadSpinlock);
2278 	return thread_block_locked(thread_get_current_thread());
2279 }
2280 
2281 
2282 bool
2283 thread_unblock(status_t threadID, status_t status)
2284 {
2285 	InterruptsSpinLocker _(gThreadSpinlock);
2286 
2287 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2288 	if (thread == NULL)
2289 		return false;
2290 	return thread_unblock_locked(thread, status);
2291 }
2292 
2293 
2294 status_t
2295 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2296 {
2297 	InterruptsSpinLocker _(gThreadSpinlock);
2298 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2299 }
2300 
2301 
2302 status_t
2303 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2304 {
2305 	struct thread* thread = thread_get_current_thread();
2306 
2307 	if (thread->wait.status != 1)
2308 		return thread->wait.status;
2309 
2310 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2311 		&& timeout != B_INFINITE_TIMEOUT;
2312 
2313 	if (useTimer) {
2314 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2315 		// avoids nasty race conditions and deadlock problems that could
2316 		// otherwise occur between our cancel_timer() and a concurrently
2317 		// executing thread_block_timeout().
2318 		uint32 timerFlags;
2319 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2320 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2321 		} else {
2322 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2323 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2324 				timeout -= rtc_boot_time();
2325 		}
2326 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2327 
2328 		// install the timer
2329 		thread->wait.unblock_timer.user_data = thread;
2330 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2331 			timerFlags);
2332 	}
2333 
2334 	// block
2335 	status_t error = thread_block_locked(thread);
2336 
2337 	// cancel timer, if it didn't fire
2338 	if (error != B_TIMED_OUT && useTimer)
2339 		cancel_timer(&thread->wait.unblock_timer);
2340 
2341 	return error;
2342 }
2343 
2344 
2345 /*!	Thread spinlock must be held.
2346 */
2347 static status_t
2348 user_unblock_thread(thread_id threadID, status_t status)
2349 {
2350 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2351 	if (thread == NULL)
2352 		return B_BAD_THREAD_ID;
2353 	if (thread->user_thread == NULL)
2354 		return B_NOT_ALLOWED;
2355 
2356 	if (thread->user_thread->wait_status > 0) {
2357 		thread->user_thread->wait_status = status;
2358 		thread_unblock_locked(thread, status);
2359 	}
2360 
2361 	return B_OK;
2362 }
2363 
2364 
2365 //	#pragma mark - public kernel API
2366 
2367 
2368 void
2369 exit_thread(status_t returnValue)
2370 {
2371 	struct thread *thread = thread_get_current_thread();
2372 
2373 	thread->exit.status = returnValue;
2374 	thread->exit.reason = THREAD_RETURN_EXIT;
2375 
2376 	// if called from a kernel thread, we don't deliver the signal,
2377 	// we just exit directly to keep the user space behaviour of
2378 	// this function
2379 	if (thread->team != team_get_kernel_team())
2380 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2381 	else
2382 		thread_exit();
2383 }
2384 
2385 
2386 status_t
2387 kill_thread(thread_id id)
2388 {
2389 	if (id <= 0)
2390 		return B_BAD_VALUE;
2391 
2392 	return send_signal(id, SIGKILLTHR);
2393 }
2394 
2395 
2396 status_t
2397 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2398 {
2399 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2400 }
2401 
2402 
2403 int32
2404 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2405 {
2406 	return receive_data_etc(sender, buffer, bufferSize, 0);
2407 }
2408 
2409 
2410 bool
2411 has_data(thread_id thread)
2412 {
2413 	int32 count;
2414 
2415 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2416 			&count) != B_OK)
2417 		return false;
2418 
2419 	return count == 0 ? false : true;
2420 }
2421 
2422 
2423 status_t
2424 _get_thread_info(thread_id id, thread_info *info, size_t size)
2425 {
2426 	status_t status = B_OK;
2427 	struct thread *thread;
2428 	cpu_status state;
2429 
2430 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2431 		return B_BAD_VALUE;
2432 
2433 	state = disable_interrupts();
2434 	GRAB_THREAD_LOCK();
2435 
2436 	thread = thread_get_thread_struct_locked(id);
2437 	if (thread == NULL) {
2438 		status = B_BAD_VALUE;
2439 		goto err;
2440 	}
2441 
2442 	fill_thread_info(thread, info, size);
2443 
2444 err:
2445 	RELEASE_THREAD_LOCK();
2446 	restore_interrupts(state);
2447 
2448 	return status;
2449 }
2450 
2451 
2452 status_t
2453 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2454 	size_t size)
2455 {
2456 	status_t status = B_BAD_VALUE;
2457 	struct thread *thread = NULL;
2458 	cpu_status state;
2459 	int slot;
2460 	thread_id lastThreadID;
2461 
2462 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2463 		return B_BAD_VALUE;
2464 
2465 	if (team == B_CURRENT_TEAM)
2466 		team = team_get_current_team_id();
2467 	else if (!team_is_valid(team))
2468 		return B_BAD_VALUE;
2469 
2470 	slot = *_cookie;
2471 
2472 	state = disable_interrupts();
2473 	GRAB_THREAD_LOCK();
2474 
2475 	lastThreadID = peek_next_thread_id();
2476 	if (slot >= lastThreadID)
2477 		goto err;
2478 
2479 	while (slot < lastThreadID
2480 		&& (!(thread = thread_get_thread_struct_locked(slot))
2481 			|| thread->team->id != team))
2482 		slot++;
2483 
2484 	if (thread != NULL && thread->team->id == team) {
2485 		fill_thread_info(thread, info, size);
2486 
2487 		*_cookie = slot + 1;
2488 		status = B_OK;
2489 	}
2490 
2491 err:
2492 	RELEASE_THREAD_LOCK();
2493 	restore_interrupts(state);
2494 
2495 	return status;
2496 }
2497 
2498 
2499 thread_id
2500 find_thread(const char *name)
2501 {
2502 	struct hash_iterator iterator;
2503 	struct thread *thread;
2504 	cpu_status state;
2505 
2506 	if (name == NULL)
2507 		return thread_get_current_thread_id();
2508 
2509 	state = disable_interrupts();
2510 	GRAB_THREAD_LOCK();
2511 
2512 	// ToDo: this might not be in the same order as find_thread() in BeOS
2513 	//		which could be theoretically problematic.
2514 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2515 	//		cheap either - although this function is probably used very rarely.
2516 
2517 	hash_open(sThreadHash, &iterator);
2518 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2519 			!= NULL) {
2520 		// Search through hash
2521 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2522 			thread_id id = thread->id;
2523 
2524 			RELEASE_THREAD_LOCK();
2525 			restore_interrupts(state);
2526 			return id;
2527 		}
2528 	}
2529 
2530 	RELEASE_THREAD_LOCK();
2531 	restore_interrupts(state);
2532 
2533 	return B_NAME_NOT_FOUND;
2534 }
2535 
2536 
2537 status_t
2538 rename_thread(thread_id id, const char *name)
2539 {
2540 	struct thread *thread = thread_get_current_thread();
2541 	status_t status = B_BAD_THREAD_ID;
2542 	cpu_status state;
2543 
2544 	if (name == NULL)
2545 		return B_BAD_VALUE;
2546 
2547 	state = disable_interrupts();
2548 	GRAB_THREAD_LOCK();
2549 
2550 	if (thread->id != id)
2551 		thread = thread_get_thread_struct_locked(id);
2552 
2553 	if (thread != NULL) {
2554 		if (thread->team == thread_get_current_thread()->team) {
2555 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2556 			status = B_OK;
2557 		} else
2558 			status = B_NOT_ALLOWED;
2559 	}
2560 
2561 	RELEASE_THREAD_LOCK();
2562 	restore_interrupts(state);
2563 
2564 	return status;
2565 }
2566 
2567 
2568 status_t
2569 set_thread_priority(thread_id id, int32 priority)
2570 {
2571 	struct thread *thread;
2572 	int32 oldPriority;
2573 
2574 	// make sure the passed in priority is within bounds
2575 	if (priority > THREAD_MAX_SET_PRIORITY)
2576 		priority = THREAD_MAX_SET_PRIORITY;
2577 	if (priority < THREAD_MIN_SET_PRIORITY)
2578 		priority = THREAD_MIN_SET_PRIORITY;
2579 
2580 	thread = thread_get_current_thread();
2581 	if (thread->id == id) {
2582 		if (thread_is_idle_thread(thread))
2583 			return B_NOT_ALLOWED;
2584 
2585 		// It's ourself, so we know we aren't in the run queue, and we can
2586 		// manipulate our structure directly
2587 		oldPriority = thread->priority;
2588 			// Note that this might not return the correct value if we are
2589 			// preempted here, and another thread changes our priority before
2590 			// the next line is executed.
2591 		thread->priority = thread->next_priority = priority;
2592 	} else {
2593 		InterruptsSpinLocker _(gThreadSpinlock);
2594 
2595 		thread = thread_get_thread_struct_locked(id);
2596 		if (thread == NULL)
2597 			return B_BAD_THREAD_ID;
2598 
2599 		if (thread_is_idle_thread(thread))
2600 			return B_NOT_ALLOWED;
2601 
2602 		oldPriority = thread->priority;
2603 		scheduler_set_thread_priority(thread, priority);
2604 	}
2605 
2606 	return oldPriority;
2607 }
2608 
2609 
2610 status_t
2611 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2612 {
2613 	status_t status;
2614 
2615 	if (timebase != B_SYSTEM_TIMEBASE)
2616 		return B_BAD_VALUE;
2617 
2618 	InterruptsSpinLocker _(gThreadSpinlock);
2619 	struct thread* thread = thread_get_current_thread();
2620 
2621 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2622 	status = thread_block_with_timeout_locked(flags, timeout);
2623 
2624 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2625 		return B_OK;
2626 
2627 	return status;
2628 }
2629 
2630 
2631 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2632 status_t
2633 snooze(bigtime_t timeout)
2634 {
2635 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2636 }
2637 
2638 
2639 /*!
2640 	snooze_until() for internal kernel use only; doesn't interrupt on
2641 	signals.
2642 */
2643 status_t
2644 snooze_until(bigtime_t timeout, int timebase)
2645 {
2646 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2647 }
2648 
2649 
2650 status_t
2651 wait_for_thread(thread_id thread, status_t *_returnCode)
2652 {
2653 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2654 }
2655 
2656 
2657 status_t
2658 suspend_thread(thread_id id)
2659 {
2660 	if (id <= 0)
2661 		return B_BAD_VALUE;
2662 
2663 	return send_signal(id, SIGSTOP);
2664 }
2665 
2666 
2667 status_t
2668 resume_thread(thread_id id)
2669 {
2670 	if (id <= 0)
2671 		return B_BAD_VALUE;
2672 
2673 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2674 		// This retains compatibility to BeOS which documents the
2675 		// combination of suspend_thread() and resume_thread() to
2676 		// interrupt threads waiting on semaphores.
2677 }
2678 
2679 
2680 thread_id
2681 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2682 	void *arg)
2683 {
2684 	thread_creation_attributes attributes;
2685 	attributes.entry = (thread_entry_func)function;
2686 	attributes.name = name;
2687 	attributes.priority = priority;
2688 	attributes.args1 = arg;
2689 	attributes.args2 = NULL;
2690 	attributes.stack_address = NULL;
2691 	attributes.stack_size = 0;
2692 	attributes.team = team_get_kernel_team()->id;
2693 	attributes.thread = -1;
2694 
2695 	return create_thread(attributes, true);
2696 }
2697 
2698 
2699 int
2700 getrlimit(int resource, struct rlimit * rlp)
2701 {
2702 	status_t error = common_getrlimit(resource, rlp);
2703 	if (error != B_OK) {
2704 		errno = error;
2705 		return -1;
2706 	}
2707 
2708 	return 0;
2709 }
2710 
2711 
2712 int
2713 setrlimit(int resource, const struct rlimit * rlp)
2714 {
2715 	status_t error = common_setrlimit(resource, rlp);
2716 	if (error != B_OK) {
2717 		errno = error;
2718 		return -1;
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 
2725 //	#pragma mark - syscalls
2726 
2727 
2728 void
2729 _user_exit_thread(status_t returnValue)
2730 {
2731 	exit_thread(returnValue);
2732 }
2733 
2734 
2735 status_t
2736 _user_kill_thread(thread_id thread)
2737 {
2738 	return kill_thread(thread);
2739 }
2740 
2741 
2742 status_t
2743 _user_resume_thread(thread_id thread)
2744 {
2745 	return resume_thread(thread);
2746 }
2747 
2748 
2749 status_t
2750 _user_suspend_thread(thread_id thread)
2751 {
2752 	return suspend_thread(thread);
2753 }
2754 
2755 
2756 status_t
2757 _user_rename_thread(thread_id thread, const char *userName)
2758 {
2759 	char name[B_OS_NAME_LENGTH];
2760 
2761 	if (!IS_USER_ADDRESS(userName)
2762 		|| userName == NULL
2763 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2764 		return B_BAD_ADDRESS;
2765 
2766 	return rename_thread(thread, name);
2767 }
2768 
2769 
2770 int32
2771 _user_set_thread_priority(thread_id thread, int32 newPriority)
2772 {
2773 	return set_thread_priority(thread, newPriority);
2774 }
2775 
2776 
2777 thread_id
2778 _user_spawn_thread(thread_creation_attributes* userAttributes)
2779 {
2780 	thread_creation_attributes attributes;
2781 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2782 		|| user_memcpy(&attributes, userAttributes,
2783 				sizeof(attributes)) != B_OK) {
2784 		return B_BAD_ADDRESS;
2785 	}
2786 
2787 	if (attributes.stack_size != 0
2788 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2789 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2790 		return B_BAD_VALUE;
2791 	}
2792 
2793 	char name[B_OS_NAME_LENGTH];
2794 	thread_id threadID;
2795 
2796 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2797 		|| (attributes.stack_address != NULL
2798 			&& !IS_USER_ADDRESS(attributes.stack_address))
2799 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2800 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2801 		return B_BAD_ADDRESS;
2802 
2803 	attributes.name = attributes.name != NULL ? name : "user thread";
2804 	attributes.team = thread_get_current_thread()->team->id;
2805 	attributes.thread = -1;
2806 
2807 	threadID = create_thread(attributes, false);
2808 
2809 	if (threadID >= 0)
2810 		user_debug_thread_created(threadID);
2811 
2812 	return threadID;
2813 }
2814 
2815 
2816 status_t
2817 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2818 {
2819 	// NOTE: We only know the system timebase at the moment.
2820 	syscall_restart_handle_timeout_pre(flags, timeout);
2821 
2822 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2823 
2824 	return syscall_restart_handle_timeout_post(error, timeout);
2825 }
2826 
2827 
2828 void
2829 _user_thread_yield(void)
2830 {
2831 	thread_yield(true);
2832 }
2833 
2834 
2835 status_t
2836 _user_get_thread_info(thread_id id, thread_info *userInfo)
2837 {
2838 	thread_info info;
2839 	status_t status;
2840 
2841 	if (!IS_USER_ADDRESS(userInfo))
2842 		return B_BAD_ADDRESS;
2843 
2844 	status = _get_thread_info(id, &info, sizeof(thread_info));
2845 
2846 	if (status >= B_OK
2847 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2848 		return B_BAD_ADDRESS;
2849 
2850 	return status;
2851 }
2852 
2853 
2854 status_t
2855 _user_get_next_thread_info(team_id team, int32 *userCookie,
2856 	thread_info *userInfo)
2857 {
2858 	status_t status;
2859 	thread_info info;
2860 	int32 cookie;
2861 
2862 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2863 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2864 		return B_BAD_ADDRESS;
2865 
2866 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2867 	if (status < B_OK)
2868 		return status;
2869 
2870 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2871 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2872 		return B_BAD_ADDRESS;
2873 
2874 	return status;
2875 }
2876 
2877 
2878 thread_id
2879 _user_find_thread(const char *userName)
2880 {
2881 	char name[B_OS_NAME_LENGTH];
2882 
2883 	if (userName == NULL)
2884 		return find_thread(NULL);
2885 
2886 	if (!IS_USER_ADDRESS(userName)
2887 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2888 		return B_BAD_ADDRESS;
2889 
2890 	return find_thread(name);
2891 }
2892 
2893 
2894 status_t
2895 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2896 {
2897 	status_t returnCode;
2898 	status_t status;
2899 
2900 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2901 		return B_BAD_ADDRESS;
2902 
2903 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2904 
2905 	if (status == B_OK && userReturnCode != NULL
2906 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2907 		return B_BAD_ADDRESS;
2908 	}
2909 
2910 	return syscall_restart_handle_post(status);
2911 }
2912 
2913 
2914 bool
2915 _user_has_data(thread_id thread)
2916 {
2917 	return has_data(thread);
2918 }
2919 
2920 
2921 status_t
2922 _user_send_data(thread_id thread, int32 code, const void *buffer,
2923 	size_t bufferSize)
2924 {
2925 	if (!IS_USER_ADDRESS(buffer))
2926 		return B_BAD_ADDRESS;
2927 
2928 	return send_data_etc(thread, code, buffer, bufferSize,
2929 		B_KILL_CAN_INTERRUPT);
2930 		// supports userland buffers
2931 }
2932 
2933 
2934 status_t
2935 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2936 {
2937 	thread_id sender;
2938 	status_t code;
2939 
2940 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2941 		|| !IS_USER_ADDRESS(buffer))
2942 		return B_BAD_ADDRESS;
2943 
2944 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2945 		// supports userland buffers
2946 
2947 	if (_userSender != NULL)
2948 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2949 			return B_BAD_ADDRESS;
2950 
2951 	return code;
2952 }
2953 
2954 
2955 status_t
2956 _user_block_thread(uint32 flags, bigtime_t timeout)
2957 {
2958 	syscall_restart_handle_timeout_pre(flags, timeout);
2959 	flags |= B_CAN_INTERRUPT;
2960 
2961 	struct thread* thread = thread_get_current_thread();
2962 
2963 	InterruptsSpinLocker locker(gThreadSpinlock);
2964 
2965 	// check, if already done
2966 	if (thread->user_thread->wait_status <= 0)
2967 		return thread->user_thread->wait_status;
2968 
2969 	// nope, so wait
2970 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2971 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2972 	thread->user_thread->wait_status = status;
2973 
2974 	return syscall_restart_handle_timeout_post(status, timeout);
2975 }
2976 
2977 
2978 status_t
2979 _user_unblock_thread(thread_id threadID, status_t status)
2980 {
2981 	InterruptsSpinLocker locker(gThreadSpinlock);
2982 	return user_unblock_thread(threadID, status);
2983 }
2984 
2985 
2986 status_t
2987 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2988 {
2989 	enum {
2990 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2991 	};
2992 
2993 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2994 		return B_BAD_ADDRESS;
2995 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2996 		return B_BAD_VALUE;
2997 
2998 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2999 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3000 		return B_BAD_ADDRESS;
3001 
3002 	for (uint32 i = 0; i < count; i++)
3003 		user_unblock_thread(threads[i], status);
3004 
3005 	return B_OK;
3006 }
3007 
3008 
3009 // TODO: the following two functions don't belong here
3010 
3011 
3012 int
3013 _user_getrlimit(int resource, struct rlimit *urlp)
3014 {
3015 	struct rlimit rl;
3016 	int ret;
3017 
3018 	if (urlp == NULL)
3019 		return EINVAL;
3020 
3021 	if (!IS_USER_ADDRESS(urlp))
3022 		return B_BAD_ADDRESS;
3023 
3024 	ret = common_getrlimit(resource, &rl);
3025 
3026 	if (ret == 0) {
3027 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3028 		if (ret < 0)
3029 			return ret;
3030 
3031 		return 0;
3032 	}
3033 
3034 	return ret;
3035 }
3036 
3037 
3038 int
3039 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3040 {
3041 	struct rlimit resourceLimit;
3042 
3043 	if (userResourceLimit == NULL)
3044 		return EINVAL;
3045 
3046 	if (!IS_USER_ADDRESS(userResourceLimit)
3047 		|| user_memcpy(&resourceLimit, userResourceLimit,
3048 			sizeof(struct rlimit)) < B_OK)
3049 		return B_BAD_ADDRESS;
3050 
3051 	return common_setrlimit(resource, &resourceLimit);
3052 }
3053