xref: /haiku/src/system/kernel/thread.cpp (revision d2e1e872611179c9cfaa43ce11bd58b1e3554e4b)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/resource.h>
18 
19 #include <OS.h>
20 
21 #include <util/AutoLock.h>
22 #include <util/khash.h>
23 
24 #include <arch/debug.h>
25 #include <boot/kernel_args.h>
26 #include <condition_variable.h>
27 #include <cpu.h>
28 #include <int.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <smp.h>
33 #include <syscalls.h>
34 #include <syscall_restart.h>
35 #include <team.h>
36 #include <tls.h>
37 #include <user_runtime.h>
38 #include <vfs.h>
39 #include <vm.h>
40 #include <vm_address_space.h>
41 #include <wait_for_objects.h>
42 
43 
44 //#define TRACE_THREAD
45 #ifdef TRACE_THREAD
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 #define THREAD_MAX_MESSAGE_SIZE		65536
53 
54 // used to pass messages between thread_exit and thread_exit2
55 
56 struct thread_exit_args {
57 	struct thread	*thread;
58 	area_id			old_kernel_stack;
59 	uint32			death_stack;
60 	sem_id			death_sem;
61 	team_id			original_team_id;
62 };
63 
64 struct thread_key {
65 	thread_id id;
66 };
67 
68 // global
69 spinlock thread_spinlock = 0;
70 
71 // thread list
72 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
73 static hash_table *sThreadHash = NULL;
74 static thread_id sNextThreadID = 1;
75 
76 // some arbitrary chosen limits - should probably depend on the available
77 // memory (the limit is not yet enforced)
78 static int32 sMaxThreads = 4096;
79 static int32 sUsedThreads = 0;
80 
81 static sem_id sSnoozeSem = -1;
82 
83 // death stacks - used temporarily as a thread cleans itself up
84 struct death_stack {
85 	area_id	area;
86 	addr_t	address;
87 	bool	in_use;
88 };
89 static struct death_stack *sDeathStacks;
90 static unsigned int sNumDeathStacks;
91 static unsigned int volatile sDeathStackBitmap;
92 static sem_id sDeathStackSem;
93 static spinlock sDeathStackLock = 0;
94 
95 // The dead queue is used as a pool from which to retrieve and reuse previously
96 // allocated thread structs when creating a new thread. It should be gone once
97 // the slab allocator is in.
98 struct thread_queue dead_q;
99 
100 static void thread_kthread_entry(void);
101 static void thread_kthread_exit(void);
102 
103 
104 /*!
105 	Inserts a thread into a team.
106 	You must hold the team lock when you call this function.
107 */
108 static void
109 insert_thread_into_team(struct team *team, struct thread *thread)
110 {
111 	thread->team_next = team->thread_list;
112 	team->thread_list = thread;
113 	team->num_threads++;
114 
115 	if (team->num_threads == 1) {
116 		// this was the first thread
117 		team->main_thread = thread;
118 	}
119 	thread->team = team;
120 }
121 
122 
123 /*!
124 	Removes a thread from a team.
125 	You must hold the team lock when you call this function.
126 */
127 static void
128 remove_thread_from_team(struct team *team, struct thread *thread)
129 {
130 	struct thread *temp, *last = NULL;
131 
132 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
133 		if (temp == thread) {
134 			if (last == NULL)
135 				team->thread_list = temp->team_next;
136 			else
137 				last->team_next = temp->team_next;
138 
139 			team->num_threads--;
140 			break;
141 		}
142 		last = temp;
143 	}
144 }
145 
146 
147 static int
148 thread_struct_compare(void *_t, const void *_key)
149 {
150 	struct thread *thread = (struct thread*)_t;
151 	const struct thread_key *key = (const struct thread_key*)_key;
152 
153 	if (thread->id == key->id)
154 		return 0;
155 
156 	return 1;
157 }
158 
159 
160 static uint32
161 thread_struct_hash(void *_t, const void *_key, uint32 range)
162 {
163 	struct thread *thread = (struct thread*)_t;
164 	const struct thread_key *key = (const struct thread_key*)_key;
165 
166 	if (thread != NULL)
167 		return thread->id % range;
168 
169 	return (uint32)key->id % range;
170 }
171 
172 
173 static void
174 reset_signals(struct thread *thread)
175 {
176 	thread->sig_pending = 0;
177 	thread->sig_block_mask = 0;
178 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
179 	thread->signal_stack_base = 0;
180 	thread->signal_stack_size = 0;
181 	thread->signal_stack_enabled = false;
182 }
183 
184 
185 /*!
186 	Allocates and fills in thread structure (or reuses one from the
187 	dead queue).
188 
189 	\param threadID The ID to be assigned to the new thread. If
190 		  \code < 0 \endcode a fresh one is allocated.
191 	\param thread initialize this thread struct if nonnull
192 */
193 
194 static struct thread *
195 create_thread_struct(struct thread *inthread, const char *name,
196 	thread_id threadID, struct cpu_ent *cpu)
197 {
198 	struct thread *thread;
199 	cpu_status state;
200 	char temp[64];
201 
202 	if (inthread == NULL) {
203 		// try to recycle one from the dead queue first
204 		state = disable_interrupts();
205 		GRAB_THREAD_LOCK();
206 		thread = thread_dequeue(&dead_q);
207 		RELEASE_THREAD_LOCK();
208 		restore_interrupts(state);
209 
210 		// if not, create a new one
211 		if (thread == NULL) {
212 			thread = (struct thread *)malloc(sizeof(struct thread));
213 			if (thread == NULL)
214 				return NULL;
215 		}
216 	} else {
217 		thread = inthread;
218 	}
219 
220 	if (name != NULL)
221 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
222 	else
223 		strcpy(thread->name, "unnamed thread");
224 
225 	thread->flags = 0;
226 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
227 	thread->team = NULL;
228 	thread->cpu = cpu;
229 	thread->sem.blocking = -1;
230 	thread->condition_variable_entry = NULL;
231 	thread->fault_handler = 0;
232 	thread->page_faults_allowed = 1;
233 	thread->kernel_stack_area = -1;
234 	thread->kernel_stack_base = 0;
235 	thread->user_stack_area = -1;
236 	thread->user_stack_base = 0;
237 	thread->user_local_storage = 0;
238 	thread->kernel_errno = 0;
239 	thread->team_next = NULL;
240 	thread->queue_next = NULL;
241 	thread->priority = thread->next_priority = -1;
242 	thread->args1 = NULL;  thread->args2 = NULL;
243 	thread->alarm.period = 0;
244 	reset_signals(thread);
245 	thread->in_kernel = true;
246 	thread->was_yielded = false;
247 	thread->user_time = 0;
248 	thread->kernel_time = 0;
249 	thread->last_time = 0;
250 	thread->exit.status = 0;
251 	thread->exit.reason = 0;
252 	thread->exit.signal = 0;
253 	list_init(&thread->exit.waiters);
254 	thread->select_infos = NULL;
255 
256 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
257 	thread->exit.sem = create_sem(0, temp);
258 	if (thread->exit.sem < B_OK)
259 		goto err1;
260 
261 	sprintf(temp, "%s send", thread->name);
262 	thread->msg.write_sem = create_sem(1, temp);
263 	if (thread->msg.write_sem < B_OK)
264 		goto err2;
265 
266 	sprintf(temp, "%s receive", thread->name);
267 	thread->msg.read_sem = create_sem(0, temp);
268 	if (thread->msg.read_sem < B_OK)
269 		goto err3;
270 
271 	if (arch_thread_init_thread_struct(thread) < B_OK)
272 		goto err4;
273 
274 	return thread;
275 
276 err4:
277 	delete_sem(thread->msg.read_sem);
278 err3:
279 	delete_sem(thread->msg.write_sem);
280 err2:
281 	delete_sem(thread->exit.sem);
282 err1:
283 	// ToDo: put them in the dead queue instead?
284 	if (inthread == NULL)
285 		free(thread);
286 	return NULL;
287 }
288 
289 
290 static void
291 delete_thread_struct(struct thread *thread)
292 {
293 	delete_sem(thread->exit.sem);
294 	delete_sem(thread->msg.write_sem);
295 	delete_sem(thread->msg.read_sem);
296 
297 	// ToDo: put them in the dead queue instead?
298 	free(thread);
299 }
300 
301 
302 /*! This function gets run by a new thread before anything else */
303 static void
304 thread_kthread_entry(void)
305 {
306 	struct thread *thread = thread_get_current_thread();
307 
308 	// simulates the thread spinlock release that would occur if the thread had been
309 	// rescheded from. The resched didn't happen because the thread is new.
310 	RELEASE_THREAD_LOCK();
311 
312 	// start tracking time
313 	thread->last_time = system_time();
314 
315 	enable_interrupts(); // this essentially simulates a return-from-interrupt
316 }
317 
318 
319 static void
320 thread_kthread_exit(void)
321 {
322 	struct thread *thread = thread_get_current_thread();
323 
324 	thread->exit.reason = THREAD_RETURN_EXIT;
325 	thread_exit();
326 }
327 
328 
329 /*!
330 	Initializes the thread and jumps to its userspace entry point.
331 	This function is called at creation time of every user thread,
332 	but not for a team's main thread.
333 */
334 static int
335 _create_user_thread_kentry(void)
336 {
337 	struct thread *thread = thread_get_current_thread();
338 
339 	// a signal may have been delivered here
340 	thread_at_kernel_exit();
341 
342 	// jump to the entry point in user space
343 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
344 		thread->args1, thread->args2);
345 
346 	// only get here if the above call fails
347 	return 0;
348 }
349 
350 
351 /*! Initializes the thread and calls it kernel space entry point. */
352 static int
353 _create_kernel_thread_kentry(void)
354 {
355 	struct thread *thread = thread_get_current_thread();
356 	int (*func)(void *args) = (int (*)(void *))thread->entry;
357 
358 	// call the entry function with the appropriate args
359 	return func(thread->args1);
360 }
361 
362 
363 /*!
364 	Creates a new thread in the team with the specified team ID.
365 
366 	\param threadID The ID to be assigned to the new thread. If
367 		  \code < 0 \endcode a fresh one is allocated.
368 */
369 static thread_id
370 create_thread(const char *name, team_id teamID, thread_entry_func entry,
371 	void *args1, void *args2, int32 priority, bool kernel, thread_id threadID)
372 {
373 	struct thread *thread, *currentThread;
374 	struct team *team;
375 	cpu_status state;
376 	char stack_name[B_OS_NAME_LENGTH];
377 	status_t status;
378 	bool abort = false;
379 	bool debugNewThread = false;
380 
381 	TRACE(("create_thread(%s, id = %ld, %s)\n", name, threadID,
382 		kernel ? "kernel" : "user"));
383 
384 	thread = create_thread_struct(NULL, name, threadID, NULL);
385 	if (thread == NULL)
386 		return B_NO_MEMORY;
387 
388 	thread->priority = priority == -1 ? B_NORMAL_PRIORITY : priority;
389 	thread->next_priority = thread->priority;
390 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
391 	thread->state = B_THREAD_SUSPENDED;
392 	thread->next_state = B_THREAD_SUSPENDED;
393 
394 	// init debug structure
395 	clear_thread_debug_info(&thread->debug_info, false);
396 
397 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", name, thread->id);
398 	thread->kernel_stack_area = create_area(stack_name,
399 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
400 		KERNEL_STACK_SIZE, B_FULL_LOCK,
401 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
402 
403 	if (thread->kernel_stack_area < 0) {
404 		// we're not yet part of a team, so we can just bail out
405 		status = thread->kernel_stack_area;
406 
407 		dprintf("create_thread: error creating kernel stack: %s!\n",
408 			strerror(status));
409 
410 		delete_thread_struct(thread);
411 		return status;
412 	}
413 
414 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
415 
416 	state = disable_interrupts();
417 	GRAB_THREAD_LOCK();
418 
419 	// If the new thread belongs to the same team as the current thread,
420 	// it may inherit some of the thread debug flags.
421 	currentThread = thread_get_current_thread();
422 	if (currentThread && currentThread->team->id == teamID) {
423 		// inherit all user flags...
424 		int32 debugFlags = currentThread->debug_info.flags
425 			& B_THREAD_DEBUG_USER_FLAG_MASK;
426 
427 		// ... save the syscall tracing flags, unless explicitely specified
428 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
429 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
430 				| B_THREAD_DEBUG_POST_SYSCALL);
431 		}
432 
433 		thread->debug_info.flags = debugFlags;
434 
435 		// stop the new thread, if desired
436 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
437 	}
438 
439 	// insert into global list
440 	hash_insert(sThreadHash, thread);
441 	sUsedThreads++;
442 	RELEASE_THREAD_LOCK();
443 
444 	GRAB_TEAM_LOCK();
445 	// look at the team, make sure it's not being deleted
446 	team = team_get_team_struct_locked(teamID);
447 	if (team != NULL && team->state != TEAM_STATE_DEATH) {
448 		// Debug the new thread, if the parent thread required that (see above),
449 		// or the respective global team debug flag is set. But only, if a
450 		// debugger is installed for the team.
451 		debugNewThread |= (atomic_get(&team->debug_info.flags)
452 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
453 		if (debugNewThread
454 			&& (atomic_get(&team->debug_info.flags)
455 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
456 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
457 		}
458 
459 		insert_thread_into_team(team, thread);
460 	} else
461 		abort = true;
462 
463 	RELEASE_TEAM_LOCK();
464 	if (abort) {
465 		GRAB_THREAD_LOCK();
466 		hash_remove(sThreadHash, thread);
467 		RELEASE_THREAD_LOCK();
468 	}
469 	restore_interrupts(state);
470 	if (abort) {
471 		delete_area(thread->kernel_stack_area);
472 		delete_thread_struct(thread);
473 		return B_BAD_TEAM_ID;
474 	}
475 
476 	thread->args1 = args1;
477 	thread->args2 = args2;
478 	thread->entry = entry;
479 	status = thread->id;
480 
481 	if (kernel) {
482 		// this sets up an initial kthread stack that runs the entry
483 
484 		// Note: whatever function wants to set up a user stack later for this
485 		// thread must initialize the TLS for it
486 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
487 			&thread_kthread_entry, &thread_kthread_exit);
488 	} else {
489 		// create user stack
490 
491 		// the stack will be between USER_STACK_REGION and the main thread stack area
492 		// (the user stack of the main thread is created in team_create_team())
493 		thread->user_stack_base = USER_STACK_REGION;
494 		thread->user_stack_size = USER_STACK_SIZE;
495 
496 		snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack", name, thread->id);
497 		thread->user_stack_area = create_area_etc(team, stack_name,
498 				(void **)&thread->user_stack_base, B_BASE_ADDRESS,
499 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
500 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
501 		if (thread->user_stack_area < B_OK
502 			|| arch_thread_init_tls(thread) < B_OK) {
503 			// great, we have a fully running thread without a (usable) stack
504 			dprintf("create_thread: unable to create proper user stack!\n");
505 			status = thread->user_stack_area;
506 			kill_thread(thread->id);
507 		}
508 
509 		user_debug_update_new_thread_flags(thread->id);
510 
511 		// copy the user entry over to the args field in the thread struct
512 		// the function this will call will immediately switch the thread into
513 		// user space.
514 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
515 			&thread_kthread_entry, &thread_kthread_exit);
516 	}
517 
518 	return status;
519 }
520 
521 
522 /*!
523 	Finds a free death stack for us and allocates it.
524 	Must be called with interrupts enabled.
525 */
526 static uint32
527 get_death_stack(void)
528 {
529 	cpu_status state;
530 	uint32 bit;
531 	int32 i;
532 
533 	acquire_sem(sDeathStackSem);
534 
535 	// grab the death stack and thread locks, find a free spot and release
536 
537 	state = disable_interrupts();
538 
539 	acquire_spinlock(&sDeathStackLock);
540 	GRAB_THREAD_LOCK();
541 
542 	bit = sDeathStackBitmap;
543 	bit = (~bit) & ~((~bit) - 1);
544 	sDeathStackBitmap |= bit;
545 
546 	RELEASE_THREAD_LOCK();
547 	release_spinlock(&sDeathStackLock);
548 
549 	restore_interrupts(state);
550 
551 	// sanity checks
552 	if (!bit)
553 		panic("get_death_stack: couldn't find free stack!\n");
554 
555 	if (bit & (bit - 1))
556 		panic("get_death_stack: impossible bitmap result!\n");
557 
558 	// bit to number
559 	for (i = -1; bit; i++) {
560 		bit >>= 1;
561 	}
562 
563 	TRACE(("get_death_stack: returning %#lx\n", sDeathStacks[i].address));
564 
565 	return (uint32)i;
566 }
567 
568 
569 /*!	Returns the thread's death stack to the pool.
570 	Interrupts must be disabled and the sDeathStackLock be held.
571 */
572 static void
573 put_death_stack(uint32 index)
574 {
575 	TRACE(("put_death_stack...: passed %lu\n", index));
576 
577 	if (index >= sNumDeathStacks)
578 		panic("put_death_stack: passed invalid stack index %ld\n", index);
579 
580 	if (!(sDeathStackBitmap & (1 << index)))
581 		panic("put_death_stack: passed invalid stack index %ld\n", index);
582 
583 	GRAB_THREAD_LOCK();
584 	sDeathStackBitmap &= ~(1 << index);
585 	RELEASE_THREAD_LOCK();
586 
587 	release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
588 		// we must not hold the thread lock when releasing a semaphore
589 }
590 
591 
592 static void
593 thread_exit2(void *_args)
594 {
595 	struct thread_exit_args args;
596 
597 	// copy the arguments over, since the source is probably on the kernel
598 	// stack we're about to delete
599 	memcpy(&args, _args, sizeof(struct thread_exit_args));
600 
601 	// we can't let the interrupts disabled at this point
602 	enable_interrupts();
603 
604 	TRACE(("thread_exit2, running on death stack %#lx\n", args.death_stack));
605 
606 	// delete the old kernel stack area
607 	TRACE(("thread_exit2: deleting old kernel stack id %ld for thread %ld\n",
608 		args.old_kernel_stack, args.thread->id));
609 
610 	delete_area(args.old_kernel_stack);
611 
612 	// remove this thread from all of the global lists
613 	TRACE(("thread_exit2: removing thread %ld from global lists\n",
614 		args.thread->id));
615 
616 	disable_interrupts();
617 	GRAB_TEAM_LOCK();
618 
619 	remove_thread_from_team(team_get_kernel_team(), args.thread);
620 
621 	RELEASE_TEAM_LOCK();
622 	enable_interrupts();
623 		// needed for the debugger notification below
624 
625 	TRACE(("thread_exit2: done removing thread from lists\n"));
626 
627 	if (args.death_sem >= 0)
628 		release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
629 
630 	// notify the debugger
631 	if (args.original_team_id >= 0
632 		&& args.original_team_id != team_get_kernel_team_id()) {
633 		user_debug_thread_deleted(args.original_team_id, args.thread->id);
634 	}
635 
636 	disable_interrupts();
637 
638 	// Set the next state to be gone: this will cause the thread structure
639 	// to be returned to a ready pool upon reschedule.
640 	// Note, we need to have disabled interrupts at this point, or else
641 	// we could get rescheduled too early.
642 	args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
643 
644 	// return the death stack and reschedule one last time
645 
646 	// Note that we need to hold sDeathStackLock until we've got the thread
647 	// lock. Otherwise someone else might grab our stack in the meantime.
648 	acquire_spinlock(&sDeathStackLock);
649 	put_death_stack(args.death_stack);
650 
651 	GRAB_THREAD_LOCK();
652 	release_spinlock(&sDeathStackLock);
653 
654 	scheduler_reschedule();
655 		// requires thread lock to be held
656 
657 	// never get to here
658 	panic("thread_exit2: made it where it shouldn't have!\n");
659 }
660 
661 
662 /*!
663 	Fills the thread_info structure with information from the specified
664 	thread.
665 	The thread lock must be held when called.
666 */
667 static void
668 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
669 {
670 	info->thread = thread->id;
671 	info->team = thread->team->id;
672 
673 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
674 
675 	if (thread->state == B_THREAD_WAITING) {
676 		if (thread->sem.blocking == sSnoozeSem)
677 			info->state = B_THREAD_ASLEEP;
678 		else if (thread->sem.blocking == thread->msg.read_sem)
679 			info->state = B_THREAD_RECEIVING;
680 		else
681 			info->state = B_THREAD_WAITING;
682 	} else
683 		info->state = (thread_state)thread->state;
684 
685 	info->priority = thread->priority;
686 	info->sem = thread->sem.blocking;
687 	info->user_time = thread->user_time;
688 	info->kernel_time = thread->kernel_time;
689 	info->stack_base = (void *)thread->user_stack_base;
690 	info->stack_end = (void *)(thread->user_stack_base
691 		+ thread->user_stack_size);
692 }
693 
694 
695 static status_t
696 send_data_etc(thread_id id, int32 code, const void *buffer,
697 	size_t bufferSize, int32 flags)
698 {
699 	struct thread *target;
700 	sem_id cachedSem;
701 	cpu_status state;
702 	status_t status;
703 	cbuf *data;
704 
705 	state = disable_interrupts();
706 	GRAB_THREAD_LOCK();
707 	target = thread_get_thread_struct_locked(id);
708 	if (!target) {
709 		RELEASE_THREAD_LOCK();
710 		restore_interrupts(state);
711 		return B_BAD_THREAD_ID;
712 	}
713 	cachedSem = target->msg.write_sem;
714 	RELEASE_THREAD_LOCK();
715 	restore_interrupts(state);
716 
717 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
718 		return B_NO_MEMORY;
719 
720 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
721 	if (status == B_INTERRUPTED) {
722 		// We got interrupted by a signal
723 		return status;
724 	}
725 	if (status != B_OK) {
726 		// Any other acquisition problems may be due to thread deletion
727 		return B_BAD_THREAD_ID;
728 	}
729 
730 	if (bufferSize > 0) {
731 		data = cbuf_get_chain(bufferSize);
732 		if (data == NULL)
733 			return B_NO_MEMORY;
734 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
735 		if (status < B_OK) {
736 			cbuf_free_chain(data);
737 			return B_NO_MEMORY;
738 		}
739 	} else
740 		data = NULL;
741 
742 	state = disable_interrupts();
743 	GRAB_THREAD_LOCK();
744 
745 	// The target thread could have been deleted at this point
746 	target = thread_get_thread_struct_locked(id);
747 	if (target == NULL) {
748 		RELEASE_THREAD_LOCK();
749 		restore_interrupts(state);
750 		cbuf_free_chain(data);
751 		return B_BAD_THREAD_ID;
752 	}
753 
754 	// Save message informations
755 	target->msg.sender = thread_get_current_thread()->id;
756 	target->msg.code = code;
757 	target->msg.size = bufferSize;
758 	target->msg.buffer = data;
759 	cachedSem = target->msg.read_sem;
760 
761 	RELEASE_THREAD_LOCK();
762 	restore_interrupts(state);
763 
764 	release_sem(cachedSem);
765 	return B_OK;
766 }
767 
768 
769 static int32
770 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
771 	int32 flags)
772 {
773 	struct thread *thread = thread_get_current_thread();
774 	status_t status;
775 	size_t size;
776 	int32 code;
777 
778 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
779 	if (status < B_OK) {
780 		// Actually, we're not supposed to return error codes
781 		// but since the only reason this can fail is that we
782 		// were killed, it's probably okay to do so (but also
783 		// meaningless).
784 		return status;
785 	}
786 
787 	if (buffer != NULL && bufferSize != 0) {
788 		size = min_c(bufferSize, thread->msg.size);
789 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
790 			0, size);
791 		if (status < B_OK) {
792 			cbuf_free_chain(thread->msg.buffer);
793 			release_sem(thread->msg.write_sem);
794 			return status;
795 		}
796 	}
797 
798 	*_sender = thread->msg.sender;
799 	code = thread->msg.code;
800 
801 	cbuf_free_chain(thread->msg.buffer);
802 	release_sem(thread->msg.write_sem);
803 
804 	return code;
805 }
806 
807 
808 //	#pragma mark - debugger calls
809 
810 
811 static int
812 make_thread_unreal(int argc, char **argv)
813 {
814 	struct thread *thread;
815 	struct hash_iterator i;
816 	int32 id = -1;
817 
818 	if (argc > 2) {
819 		print_debugger_command_usage(argv[0]);
820 		return 0;
821 	}
822 
823 	if (argc > 1)
824 		id = strtoul(argv[1], NULL, 0);
825 
826 	hash_open(sThreadHash, &i);
827 
828 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
829 		if (id != -1 && thread->id != id)
830 			continue;
831 
832 		if (thread->priority > B_DISPLAY_PRIORITY) {
833 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
834 			kprintf("thread %ld made unreal\n", thread->id);
835 		}
836 	}
837 
838 	hash_close(sThreadHash, &i, false);
839 	return 0;
840 }
841 
842 
843 static int
844 set_thread_prio(int argc, char **argv)
845 {
846 	struct thread *thread;
847 	struct hash_iterator i;
848 	int32 id;
849 	int32 prio;
850 
851 	if (argc > 3 || argc < 2) {
852 		print_debugger_command_usage(argv[0]);
853 		return 0;
854 	}
855 
856 	prio = strtoul(argv[1], NULL, 0);
857 	if (prio > B_MAX_PRIORITY)
858 		prio = B_MAX_PRIORITY;
859 	if (prio < B_MIN_PRIORITY)
860 		prio = B_MIN_PRIORITY;
861 
862 	if (argc > 2)
863 		id = strtoul(argv[2], NULL, 0);
864 	else
865 		id = thread_get_current_thread()->id;
866 
867 	hash_open(sThreadHash, &i);
868 
869 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
870 		if (thread->id != id)
871 			continue;
872 		thread->priority = thread->next_priority = prio;
873 		kprintf("thread %ld set to priority %ld\n", id, prio);
874 		break;
875 	}
876 	if (!thread)
877 		kprintf("thread %ld (%#lx) not found\n", id, id);
878 
879 	hash_close(sThreadHash, &i, false);
880 	return 0;
881 }
882 
883 
884 static int
885 make_thread_suspended(int argc, char **argv)
886 {
887 	struct thread *thread;
888 	struct hash_iterator i;
889 	int32 id;
890 
891 	if (argc > 2) {
892 		print_debugger_command_usage(argv[0]);
893 		return 0;
894 	}
895 
896 	if (argc == 1)
897 		id = thread_get_current_thread()->id;
898 	else
899 		id = strtoul(argv[1], NULL, 0);
900 
901 	hash_open(sThreadHash, &i);
902 
903 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
904 		if (thread->id != id)
905 			continue;
906 
907 		thread->next_state = B_THREAD_SUSPENDED;
908 		kprintf("thread %ld suspended\n", id);
909 		break;
910 	}
911 	if (!thread)
912 		kprintf("thread %ld (%#lx) not found\n", id, id);
913 
914 	hash_close(sThreadHash, &i, false);
915 	return 0;
916 }
917 
918 
919 static int
920 make_thread_resumed(int argc, char **argv)
921 {
922 	struct thread *thread;
923 	struct hash_iterator i;
924 	int32 id;
925 
926 	if (argc != 2) {
927 		print_debugger_command_usage(argv[0]);
928 		return 0;
929 	}
930 
931 	// force user to enter a thread id, as using
932 	// the current thread is usually not intended
933 	id = strtoul(argv[1], NULL, 0);
934 
935 	hash_open(sThreadHash, &i);
936 
937 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
938 		if (thread->id != id)
939 			continue;
940 
941 		if (thread->state == B_THREAD_SUSPENDED) {
942 			scheduler_enqueue_in_run_queue(thread);
943 			kprintf("thread %ld resumed\n", thread->id);
944 		}
945 		break;
946 	}
947 	if (!thread)
948 		kprintf("thread %ld (%#lx) not found\n", id, id);
949 
950 	hash_close(sThreadHash, &i, false);
951 	return 0;
952 }
953 
954 
955 static int
956 drop_into_debugger(int argc, char **argv)
957 {
958 	status_t err;
959 	int32 id;
960 
961 	if (argc > 2) {
962 		print_debugger_command_usage(argv[0]);
963 		return 0;
964 	}
965 
966 	if (argc == 1)
967 		id = thread_get_current_thread()->id;
968 	else
969 		id = strtoul(argv[1], NULL, 0);
970 
971 	err = _user_debug_thread(id);
972 	if (err)
973 		kprintf("drop failed\n");
974 	else
975 		kprintf("thread %ld dropped into user debugger\n", id);
976 
977 	return 0;
978 }
979 
980 
981 static const char *
982 state_to_text(struct thread *thread, int32 state)
983 {
984 	switch (state) {
985 		case B_THREAD_READY:
986 			return "ready";
987 
988 		case B_THREAD_RUNNING:
989 			return "running";
990 
991 		case B_THREAD_WAITING:
992 			if (thread->sem.blocking == sSnoozeSem)
993 				return "zzz";
994 			if (thread->sem.blocking == thread->msg.read_sem)
995 				return "receive";
996 
997 			return "waiting";
998 
999 		case B_THREAD_SUSPENDED:
1000 			return "suspended";
1001 
1002 		case THREAD_STATE_FREE_ON_RESCHED:
1003 			return "death";
1004 
1005 		default:
1006 			return "UNKNOWN";
1007 	}
1008 }
1009 
1010 
1011 static void
1012 _dump_thread_info(struct thread *thread)
1013 {
1014 	struct death_entry *death = NULL;
1015 
1016 	kprintf("THREAD: %p\n", thread);
1017 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1018 	kprintf("name:               \"%s\"\n", thread->name);
1019 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1020 		thread->all_next, thread->team_next, thread->queue_next);
1021 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1022 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1023 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1024 	kprintf("cpu:                %p ", thread->cpu);
1025 	if (thread->cpu)
1026 		kprintf("(%d)\n", thread->cpu->cpu_num);
1027 	else
1028 		kprintf("\n");
1029 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1030 		thread->sig_block_mask);
1031 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1032 	kprintf("  sem.blocking:     %ld\n", thread->sem.blocking);
1033 	kprintf("  sem.count:        %ld\n", thread->sem.count);
1034 	kprintf("  sem.acquire_status: %#lx\n", thread->sem.acquire_status);
1035 	kprintf("  sem.flags:        %#lx\n", thread->sem.flags);
1036 
1037 	kprintf("condition variables:");
1038 	PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
1039 	while (entry != NULL) {
1040 		kprintf(" %p", entry->Variable());
1041 		entry = entry->ThreadNext();
1042 	}
1043 	kprintf("\n");
1044 
1045 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1046 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1047 	kprintf("entry:              %p\n", (void *)thread->entry);
1048 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1049 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1050 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1051 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1052 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1053 	kprintf("  exit.waiters:\n");
1054 	while ((death = (struct death_entry*)list_get_next_item(
1055 			&thread->exit.waiters, death)) != NULL) {
1056 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1057 	}
1058 
1059 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1060 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1061 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1062 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1063 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1064 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1065 		strerror(thread->kernel_errno));
1066 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1067 	kprintf("user_time:          %Ld\n", thread->user_time);
1068 	kprintf("flags:              0x%lx\n", thread->flags);
1069 	kprintf("architecture dependant section:\n");
1070 	arch_thread_dump_info(&thread->arch_info);
1071 }
1072 
1073 
1074 static int
1075 dump_thread_info(int argc, char **argv)
1076 {
1077 	const char *name = NULL;
1078 	struct thread *thread;
1079 	int32 id = -1;
1080 	struct hash_iterator i;
1081 	bool found = false;
1082 
1083 	if (argc > 2) {
1084 		print_debugger_command_usage(argv[0]);
1085 		return 0;
1086 	}
1087 
1088 	if (argc == 1) {
1089 		_dump_thread_info(thread_get_current_thread());
1090 		return 0;
1091 	} else {
1092 		name = argv[1];
1093 		id = strtoul(argv[1], NULL, 0);
1094 
1095 		if (IS_KERNEL_ADDRESS(id)) {
1096 			// semi-hack
1097 			_dump_thread_info((struct thread *)id);
1098 			return 0;
1099 		}
1100 	}
1101 
1102 	// walk through the thread list, trying to match name or id
1103 	hash_open(sThreadHash, &i);
1104 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1105 		if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
1106 			_dump_thread_info(thread);
1107 			found = true;
1108 			break;
1109 		}
1110 	}
1111 	hash_close(sThreadHash, &i, false);
1112 
1113 	if (!found)
1114 		kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1115 	return 0;
1116 }
1117 
1118 
1119 static int
1120 dump_thread_list(int argc, char **argv)
1121 {
1122 	struct thread *thread;
1123 	struct hash_iterator i;
1124 	bool realTimeOnly = false;
1125 	bool calling = false;
1126 	const char *callSymbol = NULL;
1127 	addr_t callStart = 0;
1128 	addr_t callEnd = 0;
1129 	int32 requiredState = 0;
1130 	team_id team = -1;
1131 	sem_id sem = -1;
1132 
1133 	if (!strcmp(argv[0], "realtime"))
1134 		realTimeOnly = true;
1135 	else if (!strcmp(argv[0], "ready"))
1136 		requiredState = B_THREAD_READY;
1137 	else if (!strcmp(argv[0], "running"))
1138 		requiredState = B_THREAD_RUNNING;
1139 	else if (!strcmp(argv[0], "waiting")) {
1140 		requiredState = B_THREAD_WAITING;
1141 
1142 		if (argc > 1) {
1143 			sem = strtoul(argv[1], NULL, 0);
1144 			if (sem == 0)
1145 				kprintf("ignoring invalid semaphore argument.\n");
1146 		}
1147 	} else if (!strcmp(argv[0], "calling")) {
1148 		if (argc < 2) {
1149 			kprintf("Need to give a symbol name or start and end arguments.\n");
1150 			return 0;
1151 		} else if (argc == 3) {
1152 			callStart = parse_expression(argv[1]);
1153 			callEnd = parse_expression(argv[2]);
1154 		} else
1155 			callSymbol = argv[1];
1156 
1157 		calling = true;
1158 	} else if (argc > 1) {
1159 		team = strtoul(argv[1], NULL, 0);
1160 		if (team == 0)
1161 			kprintf("ignoring invalid team argument.\n");
1162 	}
1163 
1164 	kprintf("thread         id  state        sem/cv cpu pri  stack      team  "
1165 		"name\n");
1166 
1167 	hash_open(sThreadHash, &i);
1168 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1169 		// filter out threads not matching the search criteria
1170 		if ((requiredState && thread->state != requiredState)
1171 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1172 					callStart, callEnd))
1173 			|| (sem > 0 && thread->sem.blocking != sem)
1174 			|| (team > 0 && thread->team->id != team)
1175 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1176 			continue;
1177 
1178 		kprintf("%p %6ld  %-9s", thread, thread->id, state_to_text(thread,
1179 			thread->state));
1180 
1181 		// does it block on a semaphore or a condition variable?
1182 		if (thread->state == B_THREAD_WAITING) {
1183 			if (thread->condition_variable_entry)
1184 				kprintf("%p  ", thread->condition_variable_entry->Variable());
1185 			else
1186 				kprintf("%10ld  ", thread->sem.blocking);
1187 		} else
1188 			kprintf("      -     ");
1189 
1190 		// on which CPU does it run?
1191 		if (thread->cpu)
1192 			kprintf("%2d", thread->cpu->cpu_num);
1193 		else
1194 			kprintf(" -");
1195 
1196 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1197 			(void *)thread->kernel_stack_base, thread->team->id,
1198 			thread->name != NULL ? thread->name : "<NULL>");
1199 	}
1200 	hash_close(sThreadHash, &i, false);
1201 	return 0;
1202 }
1203 
1204 
1205 //	#pragma mark - private kernel API
1206 
1207 
1208 void
1209 thread_exit(void)
1210 {
1211 	cpu_status state;
1212 	struct thread *thread = thread_get_current_thread();
1213 	struct team *team = thread->team;
1214 	thread_id parentID = -1;
1215 	bool deleteTeam = false;
1216 	sem_id cachedDeathSem = -1;
1217 	status_t status;
1218 	struct thread_debug_info debugInfo;
1219 	team_id teamID = team->id;
1220 
1221 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1222 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1223 			? "due to signal" : "normally", thread->exit.status));
1224 
1225 	if (!are_interrupts_enabled())
1226 		panic("thread_exit() called with interrupts disabled!\n");
1227 
1228 	// boost our priority to get this over with
1229 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1230 
1231 	// Cancel previously installed alarm timer, if any
1232 	cancel_timer(&thread->alarm);
1233 
1234 	// delete the user stack area first, we won't need it anymore
1235 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1236 		area_id area = thread->user_stack_area;
1237 		thread->user_stack_area = -1;
1238 		delete_area_etc(team, area);
1239 	}
1240 
1241 	struct job_control_entry *death = NULL;
1242 	struct death_entry* threadDeathEntry = NULL;
1243 
1244 	if (team != team_get_kernel_team()) {
1245 		if (team->main_thread == thread) {
1246 			// this was the main thread in this team, so we will delete that as well
1247 			deleteTeam = true;
1248 		} else
1249 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1250 
1251 		// remove this thread from the current team and add it to the kernel
1252 		// put the thread into the kernel team until it dies
1253 		state = disable_interrupts();
1254 		GRAB_TEAM_LOCK();
1255 		GRAB_THREAD_LOCK();
1256 			// removing the thread and putting its death entry to the parent
1257 			// team needs to be an atomic operation
1258 
1259 		// remember how long this thread lasted
1260 		team->dead_threads_kernel_time += thread->kernel_time;
1261 		team->dead_threads_user_time += thread->user_time;
1262 
1263 		remove_thread_from_team(team, thread);
1264 		insert_thread_into_team(team_get_kernel_team(), thread);
1265 
1266 		cachedDeathSem = team->death_sem;
1267 
1268 		if (deleteTeam) {
1269 			struct team *parent = team->parent;
1270 
1271 			// remember who our parent was so we can send a signal
1272 			parentID = parent->id;
1273 
1274 			// Set the team job control state to "dead" and detach the job
1275 			// control entry from our team struct.
1276 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1277 			death = team->job_control_entry;
1278 			team->job_control_entry = NULL;
1279 
1280 			if (death != NULL) {
1281 				death->InitDeadState();
1282 
1283 				// team_set_job_control_state() already moved our entry
1284 				// into the parent's list. We just check the soft limit of
1285 				// death entries.
1286 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1287 					death = parent->dead_children->entries.RemoveHead();
1288 					parent->dead_children->count--;
1289 				} else
1290 					death = NULL;
1291 
1292 				RELEASE_THREAD_LOCK();
1293 			} else
1294 				RELEASE_THREAD_LOCK();
1295 
1296 			team_remove_team(team);
1297 
1298 			send_signal_etc(parentID, SIGCHLD,
1299 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1300 		} else {
1301 			// The thread is not the main thread. We store a thread death
1302 			// entry for it, unless someone is already waiting it.
1303 			if (threadDeathEntry != NULL
1304 				&& list_is_empty(&thread->exit.waiters)) {
1305 				threadDeathEntry->thread = thread->id;
1306 				threadDeathEntry->status = thread->exit.status;
1307 				threadDeathEntry->reason = thread->exit.reason;
1308 				threadDeathEntry->signal = thread->exit.signal;
1309 
1310 				// add entry -- remove and old one, if we hit the limit
1311 				list_add_item(&team->dead_threads, threadDeathEntry);
1312 				team->dead_threads_count++;
1313 				threadDeathEntry = NULL;
1314 
1315 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1316 					threadDeathEntry = (death_entry*)list_remove_head_item(
1317 						&team->dead_threads);
1318 					team->dead_threads_count--;
1319 				}
1320 			}
1321 
1322 			RELEASE_THREAD_LOCK();
1323 		}
1324 
1325 		RELEASE_TEAM_LOCK();
1326 
1327 		// swap address spaces, to make sure we're running on the kernel's pgdir
1328 		vm_swap_address_space(vm_kernel_address_space());
1329 		restore_interrupts(state);
1330 
1331 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1332 	}
1333 
1334 	if (threadDeathEntry != NULL)
1335 		free(threadDeathEntry);
1336 
1337 	// delete the team if we're its main thread
1338 	if (deleteTeam) {
1339 		team_delete_team(team);
1340 
1341 		// we need to delete any death entry that made it to here
1342 		if (death != NULL)
1343 			delete death;
1344 
1345 		cachedDeathSem = -1;
1346 	}
1347 
1348 	state = disable_interrupts();
1349 	GRAB_THREAD_LOCK();
1350 
1351 	// remove thread from hash, so it's no longer accessible
1352 	hash_remove(sThreadHash, thread);
1353 	sUsedThreads--;
1354 
1355 	// Stop debugging for this thread
1356 	debugInfo = thread->debug_info;
1357 	clear_thread_debug_info(&thread->debug_info, true);
1358 
1359 	// Remove the select infos. We notify them a little later.
1360 	select_info* selectInfos = thread->select_infos;
1361 	thread->select_infos = NULL;
1362 
1363 	RELEASE_THREAD_LOCK();
1364 	restore_interrupts(state);
1365 
1366 	destroy_thread_debug_info(&debugInfo);
1367 
1368 	// notify select infos
1369 	select_info* info = selectInfos;
1370 	while (info != NULL) {
1371 		select_sync* sync = info->sync;
1372 
1373 		notify_select_events(info, B_EVENT_INVALID);
1374 		info = info->next;
1375 		put_select_sync(sync);
1376 	}
1377 
1378 	// shutdown the thread messaging
1379 
1380 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1381 	if (status == B_WOULD_BLOCK) {
1382 		// there is data waiting for us, so let us eat it
1383 		thread_id sender;
1384 
1385 		delete_sem(thread->msg.write_sem);
1386 			// first, let's remove all possibly waiting writers
1387 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1388 	} else {
1389 		// we probably own the semaphore here, and we're the last to do so
1390 		delete_sem(thread->msg.write_sem);
1391 	}
1392 	// now we can safely remove the msg.read_sem
1393 	delete_sem(thread->msg.read_sem);
1394 
1395 	// fill all death entries and delete the sem that others will use to wait on us
1396 	{
1397 		sem_id cachedExitSem = thread->exit.sem;
1398 		cpu_status state;
1399 
1400 		state = disable_interrupts();
1401 		GRAB_THREAD_LOCK();
1402 
1403 		// make sure no one will grab this semaphore again
1404 		thread->exit.sem = -1;
1405 
1406 		// fill all death entries
1407 		death_entry* entry = NULL;
1408 		while ((entry = (struct death_entry*)list_get_next_item(
1409 				&thread->exit.waiters, entry)) != NULL) {
1410 			entry->status = thread->exit.status;
1411 			entry->reason = thread->exit.reason;
1412 			entry->signal = thread->exit.signal;
1413 		}
1414 
1415 		RELEASE_THREAD_LOCK();
1416 		restore_interrupts(state);
1417 
1418 		delete_sem(cachedExitSem);
1419 	}
1420 
1421 	{
1422 		struct thread_exit_args args;
1423 
1424 		args.thread = thread;
1425 		args.old_kernel_stack = thread->kernel_stack_area;
1426 		args.death_stack = get_death_stack();
1427 		args.death_sem = cachedDeathSem;
1428 		args.original_team_id = teamID;
1429 
1430 
1431 		disable_interrupts();
1432 
1433 		// set the new kernel stack officially to the death stack, it won't be
1434 		// switched until the next function is called. This must be done now
1435 		// before a context switch, or we'll stay on the old stack
1436 		thread->kernel_stack_area = sDeathStacks[args.death_stack].area;
1437 		thread->kernel_stack_base = sDeathStacks[args.death_stack].address;
1438 
1439 		// we will continue in thread_exit2(), on the new stack
1440 		arch_thread_switch_kstack_and_call(thread, thread->kernel_stack_base
1441 			 + KERNEL_STACK_SIZE, thread_exit2, &args);
1442 	}
1443 
1444 	panic("never can get here\n");
1445 }
1446 
1447 
1448 struct thread *
1449 thread_get_thread_struct(thread_id id)
1450 {
1451 	struct thread *thread;
1452 	cpu_status state;
1453 
1454 	state = disable_interrupts();
1455 	GRAB_THREAD_LOCK();
1456 
1457 	thread = thread_get_thread_struct_locked(id);
1458 
1459 	RELEASE_THREAD_LOCK();
1460 	restore_interrupts(state);
1461 
1462 	return thread;
1463 }
1464 
1465 
1466 struct thread *
1467 thread_get_thread_struct_locked(thread_id id)
1468 {
1469 	struct thread_key key;
1470 
1471 	key.id = id;
1472 
1473 	return (struct thread*)hash_lookup(sThreadHash, &key);
1474 }
1475 
1476 
1477 /*!
1478 	Called in the interrupt handler code when a thread enters
1479 	the kernel for any reason.
1480 	Only tracks time for now.
1481 	Interrupts are disabled.
1482 */
1483 void
1484 thread_at_kernel_entry(bigtime_t now)
1485 {
1486 	struct thread *thread = thread_get_current_thread();
1487 
1488 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1489 
1490 	// track user time
1491 	thread->user_time += now - thread->last_time;
1492 	thread->last_time = now;
1493 
1494 	thread->in_kernel = true;
1495 }
1496 
1497 
1498 /*!
1499 	Called whenever a thread exits kernel space to user space.
1500 	Tracks time, handles signals, ...
1501 */
1502 void
1503 thread_at_kernel_exit(void)
1504 {
1505 	struct thread *thread = thread_get_current_thread();
1506 
1507 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1508 
1509 	while (handle_signals(thread)) {
1510 		InterruptsSpinLocker _(thread_spinlock);
1511 		scheduler_reschedule();
1512 	}
1513 
1514 	cpu_status state = disable_interrupts();
1515 
1516 	thread->in_kernel = false;
1517 
1518 	// track kernel time
1519 	bigtime_t now = system_time();
1520 	thread->kernel_time += now - thread->last_time;
1521 	thread->last_time = now;
1522 
1523 	restore_interrupts(state);
1524 }
1525 
1526 
1527 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1528 	and no debugging shall be done.
1529 	Interrupts are disabled in this case.
1530 */
1531 void
1532 thread_at_kernel_exit_no_signals(void)
1533 {
1534 	struct thread *thread = thread_get_current_thread();
1535 
1536 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1537 
1538 	thread->in_kernel = false;
1539 
1540 	// track kernel time
1541 	bigtime_t now = system_time();
1542 	thread->kernel_time += now - thread->last_time;
1543 	thread->last_time = now;
1544 }
1545 
1546 
1547 void
1548 thread_reset_for_exec(void)
1549 {
1550 	struct thread *thread = thread_get_current_thread();
1551 
1552 	cancel_timer(&thread->alarm);
1553 	reset_signals(thread);
1554 }
1555 
1556 
1557 /*! Insert a thread to the tail of a queue */
1558 void
1559 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1560 {
1561 	thread->queue_next = NULL;
1562 	if (queue->head == NULL) {
1563 		queue->head = thread;
1564 		queue->tail = thread;
1565 	} else {
1566 		queue->tail->queue_next = thread;
1567 		queue->tail = thread;
1568 	}
1569 }
1570 
1571 
1572 struct thread *
1573 thread_lookat_queue(struct thread_queue *queue)
1574 {
1575 	return queue->head;
1576 }
1577 
1578 
1579 struct thread *
1580 thread_dequeue(struct thread_queue *queue)
1581 {
1582 	struct thread *thread = queue->head;
1583 
1584 	if (thread != NULL) {
1585 		queue->head = thread->queue_next;
1586 		if (queue->tail == thread)
1587 			queue->tail = NULL;
1588 	}
1589 	return thread;
1590 }
1591 
1592 
1593 struct thread *
1594 thread_dequeue_id(struct thread_queue *q, thread_id id)
1595 {
1596 	struct thread *thread;
1597 	struct thread *last = NULL;
1598 
1599 	thread = q->head;
1600 	while (thread != NULL) {
1601 		if (thread->id == id) {
1602 			if (last == NULL)
1603 				q->head = thread->queue_next;
1604 			else
1605 				last->queue_next = thread->queue_next;
1606 
1607 			if (q->tail == thread)
1608 				q->tail = last;
1609 			break;
1610 		}
1611 		last = thread;
1612 		thread = thread->queue_next;
1613 	}
1614 	return thread;
1615 }
1616 
1617 
1618 thread_id
1619 allocate_thread_id(void)
1620 {
1621 	return atomic_add(&sNextThreadID, 1);
1622 }
1623 
1624 
1625 thread_id
1626 peek_next_thread_id(void)
1627 {
1628 	return atomic_get(&sNextThreadID);
1629 }
1630 
1631 
1632 /*!	Yield the CPU to other threads.
1633 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1634 	If \c false, it will continue to run, if there's no other thread in ready
1635 	state, and if it has a higher priority than the other ready threads, it
1636 	still has a good chance to continue.
1637 */
1638 void
1639 thread_yield(bool force)
1640 {
1641 	if (force) {
1642 		// snooze for roughly 3 thread quantums
1643 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1644 #if 0
1645 		cpu_status state;
1646 
1647 		struct thread *thread = thread_get_current_thread();
1648 		if (thread == NULL)
1649 			return;
1650 
1651 		state = disable_interrupts();
1652 		GRAB_THREAD_LOCK();
1653 
1654 		// mark the thread as yielded, so it will not be scheduled next
1655 		//thread->was_yielded = true;
1656 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1657 		scheduler_reschedule();
1658 
1659 		RELEASE_THREAD_LOCK();
1660 		restore_interrupts(state);
1661 #endif
1662 	} else {
1663 		struct thread *thread = thread_get_current_thread();
1664 		if (thread == NULL)
1665 			return;
1666 
1667 		// Don't force the thread off the CPU, just reschedule.
1668 		InterruptsSpinLocker _(thread_spinlock);
1669 		scheduler_reschedule();
1670 	}
1671 }
1672 
1673 
1674 /*!
1675 	Kernel private thread creation function.
1676 
1677 	\param threadID The ID to be assigned to the new thread. If
1678 		  \code < 0 \endcode a fresh one is allocated.
1679 */
1680 thread_id
1681 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1682 	void *arg, team_id team, thread_id threadID)
1683 {
1684 	return create_thread(name, team, (thread_entry_func)function, arg, NULL,
1685 		priority, true, threadID);
1686 }
1687 
1688 
1689 status_t
1690 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1691 	status_t *_returnCode)
1692 {
1693 	sem_id exitSem = B_BAD_THREAD_ID;
1694 	struct death_entry death;
1695 	job_control_entry* freeDeath = NULL;
1696 	struct thread *thread;
1697 	cpu_status state;
1698 	status_t status = B_OK;
1699 
1700 	if (id < B_OK)
1701 		return B_BAD_THREAD_ID;
1702 
1703 	// we need to resume the thread we're waiting for first
1704 
1705 	state = disable_interrupts();
1706 	GRAB_THREAD_LOCK();
1707 
1708 	thread = thread_get_thread_struct_locked(id);
1709 	if (thread != NULL) {
1710 		// remember the semaphore we have to wait on and place our death entry
1711 		exitSem = thread->exit.sem;
1712 		list_add_link_to_head(&thread->exit.waiters, &death);
1713 	}
1714 
1715 	death_entry* threadDeathEntry = NULL;
1716 
1717 	RELEASE_THREAD_LOCK();
1718 
1719 	if (thread == NULL) {
1720 		// we couldn't find this thread - maybe it's already gone, and we'll
1721 		// find its death entry in our team
1722 		GRAB_TEAM_LOCK();
1723 
1724 		struct team* team = thread_get_current_thread()->team;
1725 
1726 		// check the child death entries first (i.e. main threads of child
1727 		// teams)
1728 		bool deleteEntry;
1729 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1730 		if (freeDeath != NULL) {
1731 			death.status = freeDeath->status;
1732 			if (!deleteEntry)
1733 				freeDeath = NULL;
1734 		} else {
1735 			// check the thread death entries of the team (non-main threads)
1736 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1737 					&team->dead_threads, threadDeathEntry)) != NULL) {
1738 				if (threadDeathEntry->thread == id) {
1739 					list_remove_item(&team->dead_threads, threadDeathEntry);
1740 					team->dead_threads_count--;
1741 					death.status = threadDeathEntry->status;
1742 					break;
1743 				}
1744 			}
1745 
1746 			if (threadDeathEntry == NULL)
1747 				status = B_BAD_THREAD_ID;
1748 		}
1749 
1750 		RELEASE_TEAM_LOCK();
1751 	}
1752 
1753 	restore_interrupts(state);
1754 
1755 	if (thread == NULL && status == B_OK) {
1756 		// we found the thread's death entry in our team
1757 		if (_returnCode)
1758 			*_returnCode = death.status;
1759 
1760 		delete freeDeath;
1761 		free(threadDeathEntry);
1762 		return B_OK;
1763 	}
1764 
1765 	// we need to wait for the death of the thread
1766 
1767 	if (exitSem < B_OK)
1768 		return B_BAD_THREAD_ID;
1769 
1770 	resume_thread(id);
1771 		// make sure we don't wait forever on a suspended thread
1772 
1773 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1774 
1775 	if (status == B_OK) {
1776 		// this should never happen as the thread deletes the semaphore on exit
1777 		panic("could acquire exit_sem for thread %ld\n", id);
1778 	} else if (status == B_BAD_SEM_ID) {
1779 		// this is the way the thread normally exits
1780 		status = B_OK;
1781 
1782 		if (_returnCode)
1783 			*_returnCode = death.status;
1784 	} else {
1785 		// We were probably interrupted; we need to remove our death entry now.
1786 		state = disable_interrupts();
1787 		GRAB_THREAD_LOCK();
1788 
1789 		thread = thread_get_thread_struct_locked(id);
1790 		if (thread != NULL)
1791 			list_remove_link(&death);
1792 
1793 		RELEASE_THREAD_LOCK();
1794 		restore_interrupts(state);
1795 
1796 		// If the thread is already gone, we need to wait for its exit semaphore
1797 		// to make sure our death entry stays valid - it won't take long
1798 		if (thread == NULL)
1799 			acquire_sem(exitSem);
1800 	}
1801 
1802 	return status;
1803 }
1804 
1805 
1806 status_t
1807 select_thread(int32 id, struct select_info* info, bool kernel)
1808 {
1809 	InterruptsSpinLocker locker(thread_spinlock);
1810 
1811 	// get thread
1812 	struct thread* thread = thread_get_thread_struct_locked(id);
1813 	if (thread == NULL)
1814 		return B_BAD_THREAD_ID;
1815 
1816 	// We support only B_EVENT_INVALID at the moment.
1817 	info->selected_events &= B_EVENT_INVALID;
1818 
1819 	// add info to list
1820 	if (info->selected_events != 0) {
1821 		info->next = thread->select_infos;
1822 		thread->select_infos = info;
1823 
1824 		// we need a sync reference
1825 		atomic_add(&info->sync->ref_count, 1);
1826 	}
1827 
1828 	return B_OK;
1829 }
1830 
1831 
1832 status_t
1833 deselect_thread(int32 id, struct select_info* info, bool kernel)
1834 {
1835 	InterruptsSpinLocker locker(thread_spinlock);
1836 
1837 	// get thread
1838 	struct thread* thread = thread_get_thread_struct_locked(id);
1839 	if (thread == NULL)
1840 		return B_BAD_THREAD_ID;
1841 
1842 	// remove info from list
1843 	select_info** infoLocation = &thread->select_infos;
1844 	while (*infoLocation != NULL && *infoLocation != info)
1845 		infoLocation = &(*infoLocation)->next;
1846 
1847 	if (*infoLocation != info)
1848 		return B_OK;
1849 
1850 	*infoLocation = info->next;
1851 
1852 	locker.Unlock();
1853 
1854 	// surrender sync reference
1855 	put_select_sync(info->sync);
1856 
1857 	return B_OK;
1858 }
1859 
1860 
1861 int32
1862 thread_max_threads(void)
1863 {
1864 	return sMaxThreads;
1865 }
1866 
1867 
1868 int32
1869 thread_used_threads(void)
1870 {
1871 	return sUsedThreads;
1872 }
1873 
1874 
1875 status_t
1876 thread_init(kernel_args *args)
1877 {
1878 	uint32 i;
1879 
1880 	TRACE(("thread_init: entry\n"));
1881 
1882 	// create the thread hash table
1883 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
1884 		&thread_struct_compare, &thread_struct_hash);
1885 
1886 	// zero out the dead thread structure q
1887 	memset(&dead_q, 0, sizeof(dead_q));
1888 
1889 	// allocate snooze sem
1890 	sSnoozeSem = create_sem(0, "snooze sem");
1891 	if (sSnoozeSem < 0) {
1892 		panic("error creating snooze sem\n");
1893 		return sSnoozeSem;
1894 	}
1895 
1896 	if (arch_thread_init(args) < B_OK)
1897 		panic("arch_thread_init() failed!\n");
1898 
1899 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
1900 	sNextThreadID = B_SYSTEM_TEAM + 1;
1901 
1902 	// create an idle thread for each cpu
1903 
1904 	for (i = 0; i < args->num_cpus; i++) {
1905 		struct thread *thread;
1906 		area_info info;
1907 		char name[64];
1908 
1909 		sprintf(name, "idle thread %lu", i + 1);
1910 		thread = create_thread_struct(&sIdleThreads[i], name,
1911 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
1912 		if (thread == NULL) {
1913 			panic("error creating idle thread struct\n");
1914 			return B_NO_MEMORY;
1915 		}
1916 
1917 		thread->team = team_get_kernel_team();
1918 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
1919 		thread->state = B_THREAD_RUNNING;
1920 		thread->next_state = B_THREAD_READY;
1921 		sprintf(name, "idle thread %lu kstack", i + 1);
1922 		thread->kernel_stack_area = find_area(name);
1923 		thread->entry = NULL;
1924 
1925 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
1926 			panic("error finding idle kstack area\n");
1927 
1928 		thread->kernel_stack_base = (addr_t)info.address;
1929 
1930 		hash_insert(sThreadHash, thread);
1931 		insert_thread_into_team(thread->team, thread);
1932 	}
1933 	sUsedThreads = args->num_cpus;
1934 
1935 	// create a set of death stacks
1936 
1937 	sNumDeathStacks = smp_get_num_cpus();
1938 	if (sNumDeathStacks > 8 * sizeof(sDeathStackBitmap)) {
1939 		// clamp values for really beefy machines
1940 		sNumDeathStacks = 8 * sizeof(sDeathStackBitmap);
1941 	}
1942 	sDeathStackBitmap = 0;
1943 	sDeathStacks = (struct death_stack *)malloc(sNumDeathStacks
1944 		* sizeof(struct death_stack));
1945 	if (sDeathStacks == NULL) {
1946 		panic("error creating death stacks\n");
1947 		return B_NO_MEMORY;
1948 	}
1949 	{
1950 		char temp[64];
1951 
1952 		for (i = 0; i < sNumDeathStacks; i++) {
1953 			sprintf(temp, "death stack %lu", i);
1954 			sDeathStacks[i].area = create_area(temp,
1955 				(void **)&sDeathStacks[i].address, B_ANY_KERNEL_ADDRESS,
1956 				KERNEL_STACK_SIZE, B_FULL_LOCK,
1957 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
1958 			if (sDeathStacks[i].area < 0) {
1959 				panic("error creating death stacks\n");
1960 				return sDeathStacks[i].area;
1961 			}
1962 			sDeathStacks[i].in_use = false;
1963 		}
1964 	}
1965 	sDeathStackSem = create_sem(sNumDeathStacks, "death stack availability");
1966 
1967 	// set up some debugger commands
1968 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
1969 		"[ <team> ]\n"
1970 		"Prints a list of all existing threads, or, if a team ID is given,\n"
1971 		"all threads of the specified team.\n"
1972 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
1973 	add_debugger_command_etc("ready", &dump_thread_list,
1974 		"List all ready threads",
1975 		"\n"
1976 		"Prints a list of all threads in ready state.\n", 0);
1977 	add_debugger_command_etc("running", &dump_thread_list,
1978 		"List all running threads",
1979 		"\n"
1980 		"Prints a list of all threads in running state.\n", 0);
1981 	add_debugger_command_etc("waiting", &dump_thread_list,
1982 		"List all waiting threads (optionally for a specific semaphore)",
1983 		"[ <sem> ]\n"
1984 		"Prints a list of all threads in waiting state. If a semaphore is\n"
1985 		"specified, only the threads waiting on that semaphore are listed.\n"
1986 		"  <sem>  - ID of the semaphore.\n", 0);
1987 	add_debugger_command_etc("realtime", &dump_thread_list,
1988 		"List all realtime threads",
1989 		"\n"
1990 		"Prints a list of all threads with realtime priority.\n", 0);
1991 	add_debugger_command_etc("thread", &dump_thread_info,
1992 		"Dump info about a particular thread",
1993 		"[ <id> | <address> | <name> ]\n"
1994 		"Prints information about the specified thread. If no argument is\n"
1995 		"given the current thread is selected.\n"
1996 		"  <id>       - The ID of the thread.\n"
1997 		"  <address>  - The address of the thread structure.\n"
1998 		"  <name>     - The thread's name.\n", 0);
1999 	add_debugger_command_etc("calling", &dump_thread_list,
2000 		"Show all threads that have a specific address in their call chain",
2001 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2002 	add_debugger_command_etc("unreal", &make_thread_unreal,
2003 		"Set realtime priority threads to normal priority",
2004 		"[ <id> ]\n"
2005 		"Sets the priority of all realtime threads or, if given, the one\n"
2006 		"with the specified ID to \"normal\" priority.\n"
2007 		"  <id>  - The ID of the thread.\n", 0);
2008 	add_debugger_command_etc("suspend", &make_thread_suspended,
2009 		"Suspend a thread",
2010 		"[ <id> ]\n"
2011 		"Suspends the thread with the given ID. If no ID argument is given\n"
2012 		"the current thread is selected.\n"
2013 		"  <id>  - The ID of the thread.\n", 0);
2014 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2015 		"<id>\n"
2016 		"Resumes the specified thread, if it is currently suspended.\n"
2017 		"  <id>  - The ID of the thread.\n", 0);
2018 	add_debugger_command_etc("drop", &drop_into_debugger,
2019 		"Drop a thread into the userland debugger",
2020 		"<id>\n"
2021 		"Drops the specified (userland) thread into the userland debugger\n"
2022 		"after leaving the kernel debugger.\n"
2023 		"  <id>  - The ID of the thread.\n", 0);
2024 	add_debugger_command_etc("priority", &set_thread_prio,
2025 		"Set a thread's priority",
2026 		"<priority> [ <id> ]\n"
2027 		"Sets the priority of the thread with the specified ID to the given\n"
2028 		"priority. If no thread ID is given, the current thread is selected.\n"
2029 		"  <priority>  - The thread's new priority (0 - 120)\n"
2030 		"  <id>        - The ID of the thread.\n", 0);
2031 
2032 	return B_OK;
2033 }
2034 
2035 
2036 status_t
2037 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2038 {
2039 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2040 	// so that get_current_cpu and friends will work, which is crucial for
2041 	// a lot of low level routines
2042 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2043 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2044 	return B_OK;
2045 }
2046 
2047 //	#pragma mark - public kernel API
2048 
2049 
2050 void
2051 exit_thread(status_t returnValue)
2052 {
2053 	struct thread *thread = thread_get_current_thread();
2054 
2055 	thread->exit.status = returnValue;
2056 	thread->exit.reason = THREAD_RETURN_EXIT;
2057 
2058 	// if called from a kernel thread, we don't deliver the signal,
2059 	// we just exit directly to keep the user space behaviour of
2060 	// this function
2061 	if (thread->team != team_get_kernel_team())
2062 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2063 	else
2064 		thread_exit();
2065 }
2066 
2067 
2068 status_t
2069 kill_thread(thread_id id)
2070 {
2071 	if (id <= 0)
2072 		return B_BAD_VALUE;
2073 
2074 	return send_signal(id, SIGKILLTHR);
2075 }
2076 
2077 
2078 status_t
2079 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2080 {
2081 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2082 }
2083 
2084 
2085 int32
2086 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2087 {
2088 	return receive_data_etc(sender, buffer, bufferSize, 0);
2089 }
2090 
2091 
2092 bool
2093 has_data(thread_id thread)
2094 {
2095 	int32 count;
2096 
2097 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2098 			&count) != B_OK)
2099 		return false;
2100 
2101 	return count == 0 ? false : true;
2102 }
2103 
2104 
2105 status_t
2106 _get_thread_info(thread_id id, thread_info *info, size_t size)
2107 {
2108 	status_t status = B_OK;
2109 	struct thread *thread;
2110 	cpu_status state;
2111 
2112 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2113 		return B_BAD_VALUE;
2114 
2115 	state = disable_interrupts();
2116 	GRAB_THREAD_LOCK();
2117 
2118 	thread = thread_get_thread_struct_locked(id);
2119 	if (thread == NULL) {
2120 		status = B_BAD_VALUE;
2121 		goto err;
2122 	}
2123 
2124 	fill_thread_info(thread, info, size);
2125 
2126 err:
2127 	RELEASE_THREAD_LOCK();
2128 	restore_interrupts(state);
2129 
2130 	return status;
2131 }
2132 
2133 
2134 status_t
2135 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2136 	size_t size)
2137 {
2138 	status_t status = B_BAD_VALUE;
2139 	struct thread *thread = NULL;
2140 	cpu_status state;
2141 	int slot;
2142 	thread_id lastThreadID;
2143 
2144 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2145 		return B_BAD_VALUE;
2146 
2147 	if (team == B_CURRENT_TEAM)
2148 		team = team_get_current_team_id();
2149 	else if (!team_is_valid(team))
2150 		return B_BAD_VALUE;
2151 
2152 	slot = *_cookie;
2153 
2154 	state = disable_interrupts();
2155 	GRAB_THREAD_LOCK();
2156 
2157 	lastThreadID = peek_next_thread_id();
2158 	if (slot >= lastThreadID)
2159 		goto err;
2160 
2161 	while (slot < lastThreadID
2162 		&& (!(thread = thread_get_thread_struct_locked(slot))
2163 			|| thread->team->id != team))
2164 		slot++;
2165 
2166 	if (thread != NULL && thread->team->id == team) {
2167 		fill_thread_info(thread, info, size);
2168 
2169 		*_cookie = slot + 1;
2170 		status = B_OK;
2171 	}
2172 
2173 err:
2174 	RELEASE_THREAD_LOCK();
2175 	restore_interrupts(state);
2176 
2177 	return status;
2178 }
2179 
2180 
2181 thread_id
2182 find_thread(const char *name)
2183 {
2184 	struct hash_iterator iterator;
2185 	struct thread *thread;
2186 	cpu_status state;
2187 
2188 	if (name == NULL)
2189 		return thread_get_current_thread_id();
2190 
2191 	state = disable_interrupts();
2192 	GRAB_THREAD_LOCK();
2193 
2194 	// ToDo: this might not be in the same order as find_thread() in BeOS
2195 	//		which could be theoretically problematic.
2196 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2197 	//		cheap either - although this function is probably used very rarely.
2198 
2199 	hash_open(sThreadHash, &iterator);
2200 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2201 			!= NULL) {
2202 		// Search through hash
2203 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2204 			thread_id id = thread->id;
2205 
2206 			RELEASE_THREAD_LOCK();
2207 			restore_interrupts(state);
2208 			return id;
2209 		}
2210 	}
2211 
2212 	RELEASE_THREAD_LOCK();
2213 	restore_interrupts(state);
2214 
2215 	return B_NAME_NOT_FOUND;
2216 }
2217 
2218 
2219 status_t
2220 rename_thread(thread_id id, const char *name)
2221 {
2222 	struct thread *thread = thread_get_current_thread();
2223 	status_t status = B_BAD_THREAD_ID;
2224 	cpu_status state;
2225 
2226 	if (name == NULL)
2227 		return B_BAD_VALUE;
2228 
2229 	state = disable_interrupts();
2230 	GRAB_THREAD_LOCK();
2231 
2232 	if (thread->id != id)
2233 		thread = thread_get_thread_struct_locked(id);
2234 
2235 	if (thread != NULL) {
2236 		if (thread->team == thread_get_current_thread()->team) {
2237 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2238 			status = B_OK;
2239 		} else
2240 			status = B_NOT_ALLOWED;
2241 	}
2242 
2243 	RELEASE_THREAD_LOCK();
2244 	restore_interrupts(state);
2245 
2246 	return status;
2247 }
2248 
2249 
2250 status_t
2251 set_thread_priority(thread_id id, int32 priority)
2252 {
2253 	struct thread *thread;
2254 	int32 oldPriority;
2255 
2256 	// make sure the passed in priority is within bounds
2257 	if (priority > B_MAX_PRIORITY)
2258 		priority = B_MAX_PRIORITY;
2259 	if (priority < B_MIN_PRIORITY)
2260 		priority = B_MIN_PRIORITY;
2261 
2262 	thread = thread_get_current_thread();
2263 	if (thread->id == id) {
2264 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2265 		// our structure directly
2266 		oldPriority = thread->priority;
2267 			// note that this might not return the correct value if we are preempted
2268 			// here, and another thread changes our priority before the next line is
2269 			// executed
2270 		thread->priority = thread->next_priority = priority;
2271 	} else {
2272 		cpu_status state = disable_interrupts();
2273 		GRAB_THREAD_LOCK();
2274 
2275 		thread = thread_get_thread_struct_locked(id);
2276 		if (thread) {
2277 			oldPriority = thread->priority;
2278 			thread->next_priority = priority;
2279 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2280 				// if the thread is in the run queue, we reinsert it at a new position
2281 				scheduler_remove_from_run_queue(thread);
2282 				thread->priority = priority;
2283 				scheduler_enqueue_in_run_queue(thread);
2284 			} else
2285 				thread->priority = priority;
2286 		} else
2287 			oldPriority = B_BAD_THREAD_ID;
2288 
2289 		RELEASE_THREAD_LOCK();
2290 		restore_interrupts(state);
2291 	}
2292 
2293 	return oldPriority;
2294 }
2295 
2296 
2297 status_t
2298 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2299 {
2300 	status_t status;
2301 
2302 	if (timebase != B_SYSTEM_TIMEBASE)
2303 		return B_BAD_VALUE;
2304 
2305 	status = acquire_sem_etc(sSnoozeSem, 1, flags, timeout);
2306 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2307 		return B_OK;
2308 
2309 	return status;
2310 }
2311 
2312 
2313 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2314 status_t
2315 snooze(bigtime_t timeout)
2316 {
2317 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2318 }
2319 
2320 
2321 /*!
2322 	snooze_until() for internal kernel use only; doesn't interrupt on
2323 	signals.
2324 */
2325 status_t
2326 snooze_until(bigtime_t timeout, int timebase)
2327 {
2328 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2329 }
2330 
2331 
2332 status_t
2333 wait_for_thread(thread_id thread, status_t *_returnCode)
2334 {
2335 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2336 }
2337 
2338 
2339 status_t
2340 suspend_thread(thread_id id)
2341 {
2342 	if (id <= 0)
2343 		return B_BAD_VALUE;
2344 
2345 	return send_signal(id, SIGSTOP);
2346 }
2347 
2348 
2349 status_t
2350 resume_thread(thread_id id)
2351 {
2352 	if (id <= 0)
2353 		return B_BAD_VALUE;
2354 
2355 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2356 		// This retains compatibility to BeOS which documents the
2357 		// combination of suspend_thread() and resume_thread() to
2358 		// interrupt threads waiting on semaphores.
2359 }
2360 
2361 
2362 thread_id
2363 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2364 	void *arg)
2365 {
2366 	return create_thread(name, team_get_kernel_team()->id,
2367 		(thread_entry_func)function, arg, NULL, priority, true, -1);
2368 }
2369 
2370 
2371 /* TODO: split this; have kernel version set kerrno */
2372 int
2373 getrlimit(int resource, struct rlimit * rlp)
2374 {
2375 	if (!rlp)
2376 		return B_BAD_ADDRESS;
2377 
2378 	switch (resource) {
2379 		case RLIMIT_NOFILE:
2380 		case RLIMIT_NOVMON:
2381 			return vfs_getrlimit(resource, rlp);
2382 
2383 		case RLIMIT_STACK:
2384 		{
2385 			struct thread *thread = thread_get_current_thread();
2386 			if (!thread)
2387 				return B_ERROR;
2388 			rlp->rlim_cur = thread->user_stack_size;
2389 			rlp->rlim_max = thread->user_stack_size;
2390 			return 0;
2391 		}
2392 
2393 		default:
2394 			return EINVAL;
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 
2401 /* TODO: split this; have kernel version set kerrno */
2402 int
2403 setrlimit(int resource, const struct rlimit * rlp)
2404 {
2405 	if (!rlp)
2406 		return B_BAD_ADDRESS;
2407 
2408 	switch (resource) {
2409 		case RLIMIT_NOFILE:
2410 		case RLIMIT_NOVMON:
2411 			return vfs_setrlimit(resource, rlp);
2412 
2413 		default:
2414 			return EINVAL;
2415 	}
2416 
2417 	return 0;
2418 }
2419 
2420 
2421 //	#pragma mark - syscalls
2422 
2423 
2424 void
2425 _user_exit_thread(status_t returnValue)
2426 {
2427 	exit_thread(returnValue);
2428 }
2429 
2430 
2431 status_t
2432 _user_kill_thread(thread_id thread)
2433 {
2434 	return kill_thread(thread);
2435 }
2436 
2437 
2438 status_t
2439 _user_resume_thread(thread_id thread)
2440 {
2441 	return resume_thread(thread);
2442 }
2443 
2444 
2445 status_t
2446 _user_suspend_thread(thread_id thread)
2447 {
2448 	return suspend_thread(thread);
2449 }
2450 
2451 
2452 status_t
2453 _user_rename_thread(thread_id thread, const char *userName)
2454 {
2455 	char name[B_OS_NAME_LENGTH];
2456 
2457 	if (!IS_USER_ADDRESS(userName)
2458 		|| userName == NULL
2459 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2460 		return B_BAD_ADDRESS;
2461 
2462 	return rename_thread(thread, name);
2463 }
2464 
2465 
2466 int32
2467 _user_set_thread_priority(thread_id thread, int32 newPriority)
2468 {
2469 	return set_thread_priority(thread, newPriority);
2470 }
2471 
2472 
2473 thread_id
2474 _user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
2475 	int32 priority, void *data1, void *data2)
2476 {
2477 	char name[B_OS_NAME_LENGTH];
2478 	thread_id threadID;
2479 
2480 	if (!IS_USER_ADDRESS(entry) || entry == NULL
2481 		|| (userName != NULL && (!IS_USER_ADDRESS(userName)
2482 			|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)))
2483 		return B_BAD_ADDRESS;
2484 
2485 	threadID = create_thread(userName != NULL ? name : "user thread",
2486 		thread_get_current_thread()->team->id, entry,
2487 		data1, data2, priority, false, -1);
2488 
2489 	user_debug_thread_created(threadID);
2490 
2491 	return threadID;
2492 }
2493 
2494 
2495 status_t
2496 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2497 {
2498 	// NOTE: We only know the system timebase at the moment.
2499 	syscall_restart_handle_timeout_pre(flags, timeout);
2500 
2501 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2502 
2503 	return syscall_restart_handle_timeout_post(error, timeout);
2504 }
2505 
2506 
2507 void
2508 _user_thread_yield(void)
2509 {
2510 	thread_yield(true);
2511 }
2512 
2513 
2514 status_t
2515 _user_get_thread_info(thread_id id, thread_info *userInfo)
2516 {
2517 	thread_info info;
2518 	status_t status;
2519 
2520 	if (!IS_USER_ADDRESS(userInfo))
2521 		return B_BAD_ADDRESS;
2522 
2523 	status = _get_thread_info(id, &info, sizeof(thread_info));
2524 
2525 	if (status >= B_OK
2526 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2527 		return B_BAD_ADDRESS;
2528 
2529 	return status;
2530 }
2531 
2532 
2533 status_t
2534 _user_get_next_thread_info(team_id team, int32 *userCookie,
2535 	thread_info *userInfo)
2536 {
2537 	status_t status;
2538 	thread_info info;
2539 	int32 cookie;
2540 
2541 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2542 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2543 		return B_BAD_ADDRESS;
2544 
2545 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2546 	if (status < B_OK)
2547 		return status;
2548 
2549 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2550 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2551 		return B_BAD_ADDRESS;
2552 
2553 	return status;
2554 }
2555 
2556 
2557 thread_id
2558 _user_find_thread(const char *userName)
2559 {
2560 	char name[B_OS_NAME_LENGTH];
2561 
2562 	if (userName == NULL)
2563 		return find_thread(NULL);
2564 
2565 	if (!IS_USER_ADDRESS(userName)
2566 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2567 		return B_BAD_ADDRESS;
2568 
2569 	return find_thread(name);
2570 }
2571 
2572 
2573 status_t
2574 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2575 {
2576 	status_t returnCode;
2577 	status_t status;
2578 
2579 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2580 		return B_BAD_ADDRESS;
2581 
2582 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2583 
2584 	if (status == B_OK && userReturnCode != NULL
2585 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2586 		return B_BAD_ADDRESS;
2587 	}
2588 
2589 	return syscall_restart_handle_post(status);
2590 }
2591 
2592 
2593 bool
2594 _user_has_data(thread_id thread)
2595 {
2596 	return has_data(thread);
2597 }
2598 
2599 
2600 status_t
2601 _user_send_data(thread_id thread, int32 code, const void *buffer,
2602 	size_t bufferSize)
2603 {
2604 	if (!IS_USER_ADDRESS(buffer))
2605 		return B_BAD_ADDRESS;
2606 
2607 	return send_data_etc(thread, code, buffer, bufferSize,
2608 		B_KILL_CAN_INTERRUPT);
2609 		// supports userland buffers
2610 }
2611 
2612 
2613 status_t
2614 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2615 {
2616 	thread_id sender;
2617 	status_t code;
2618 
2619 	if (!IS_USER_ADDRESS(_userSender)
2620 		|| !IS_USER_ADDRESS(buffer))
2621 		return B_BAD_ADDRESS;
2622 
2623 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2624 		// supports userland buffers
2625 
2626 	if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2627 		return B_BAD_ADDRESS;
2628 
2629 	return code;
2630 }
2631 
2632 
2633 // ToDo: the following two functions don't belong here
2634 
2635 
2636 int
2637 _user_getrlimit(int resource, struct rlimit *urlp)
2638 {
2639 	struct rlimit rl;
2640 	int ret;
2641 
2642 	if (urlp == NULL)
2643 		return EINVAL;
2644 
2645 	if (!IS_USER_ADDRESS(urlp))
2646 		return B_BAD_ADDRESS;
2647 
2648 	ret = getrlimit(resource, &rl);
2649 
2650 	if (ret == 0) {
2651 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2652 		if (ret < 0)
2653 			return ret;
2654 
2655 		return 0;
2656 	}
2657 
2658 	return ret;
2659 }
2660 
2661 
2662 int
2663 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2664 {
2665 	struct rlimit resourceLimit;
2666 
2667 	if (userResourceLimit == NULL)
2668 		return EINVAL;
2669 
2670 	if (!IS_USER_ADDRESS(userResourceLimit)
2671 		|| user_memcpy(&resourceLimit, userResourceLimit,
2672 			sizeof(struct rlimit)) < B_OK)
2673 		return B_BAD_ADDRESS;
2674 
2675 	return setrlimit(resource, &resourceLimit);
2676 }
2677 
2678