xref: /haiku/src/system/kernel/thread.cpp (revision e9c4d47ad719d6fd67cd9b75b41ebbec563e7a79)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <real_time_clock.h>
34 #include <smp.h>
35 #include <syscalls.h>
36 #include <syscall_restart.h>
37 #include <team.h>
38 #include <tls.h>
39 #include <user_runtime.h>
40 #include <user_thread.h>
41 #include <vfs.h>
42 #include <vm.h>
43 #include <vm_address_space.h>
44 #include <wait_for_objects.h>
45 
46 
47 //#define TRACE_THREAD
48 #ifdef TRACE_THREAD
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 
55 #define THREAD_MAX_MESSAGE_SIZE		65536
56 
57 
58 struct thread_key {
59 	thread_id id;
60 };
61 
62 // global
63 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
64 
65 // thread list
66 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
67 static hash_table *sThreadHash = NULL;
68 static thread_id sNextThreadID = 1;
69 
70 // some arbitrary chosen limits - should probably depend on the available
71 // memory (the limit is not yet enforced)
72 static int32 sMaxThreads = 4096;
73 static int32 sUsedThreads = 0;
74 
75 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
76 	struct thread*	thread;
77 	team_id			teamID;
78 	sem_id			deathSem;
79 
80 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
81 		:
82 		thread(thread),
83 		teamID(teamID),
84 		deathSem(deathSem)
85 	{
86 	}
87 };
88 
89 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
90 static ConditionVariable sUndertakerCondition;
91 
92 // The dead queue is used as a pool from which to retrieve and reuse previously
93 // allocated thread structs when creating a new thread. It should be gone once
94 // the slab allocator is in.
95 static struct thread_queue dead_q;
96 
97 static void thread_kthread_entry(void);
98 static void thread_kthread_exit(void);
99 
100 
101 /*!
102 	Inserts a thread into a team.
103 	You must hold the team lock when you call this function.
104 */
105 static void
106 insert_thread_into_team(struct team *team, struct thread *thread)
107 {
108 	thread->team_next = team->thread_list;
109 	team->thread_list = thread;
110 	team->num_threads++;
111 
112 	if (team->num_threads == 1) {
113 		// this was the first thread
114 		team->main_thread = thread;
115 	}
116 	thread->team = team;
117 }
118 
119 
120 /*!
121 	Removes a thread from a team.
122 	You must hold the team lock when you call this function.
123 */
124 static void
125 remove_thread_from_team(struct team *team, struct thread *thread)
126 {
127 	struct thread *temp, *last = NULL;
128 
129 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
130 		if (temp == thread) {
131 			if (last == NULL)
132 				team->thread_list = temp->team_next;
133 			else
134 				last->team_next = temp->team_next;
135 
136 			team->num_threads--;
137 			break;
138 		}
139 		last = temp;
140 	}
141 }
142 
143 
144 static int
145 thread_struct_compare(void *_t, const void *_key)
146 {
147 	struct thread *thread = (struct thread*)_t;
148 	const struct thread_key *key = (const struct thread_key*)_key;
149 
150 	if (thread->id == key->id)
151 		return 0;
152 
153 	return 1;
154 }
155 
156 
157 static uint32
158 thread_struct_hash(void *_t, const void *_key, uint32 range)
159 {
160 	struct thread *thread = (struct thread*)_t;
161 	const struct thread_key *key = (const struct thread_key*)_key;
162 
163 	if (thread != NULL)
164 		return thread->id % range;
165 
166 	return (uint32)key->id % range;
167 }
168 
169 
170 static void
171 reset_signals(struct thread *thread)
172 {
173 	thread->sig_pending = 0;
174 	thread->sig_block_mask = 0;
175 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
176 	thread->signal_stack_base = 0;
177 	thread->signal_stack_size = 0;
178 	thread->signal_stack_enabled = false;
179 }
180 
181 
182 /*!
183 	Allocates and fills in thread structure (or reuses one from the
184 	dead queue).
185 
186 	\param threadID The ID to be assigned to the new thread. If
187 		  \code < 0 \endcode a fresh one is allocated.
188 	\param thread initialize this thread struct if nonnull
189 */
190 
191 static struct thread *
192 create_thread_struct(struct thread *inthread, const char *name,
193 	thread_id threadID, struct cpu_ent *cpu)
194 {
195 	struct thread *thread;
196 	cpu_status state;
197 	char temp[64];
198 
199 	if (inthread == NULL) {
200 		// try to recycle one from the dead queue first
201 		state = disable_interrupts();
202 		GRAB_THREAD_LOCK();
203 		thread = thread_dequeue(&dead_q);
204 		RELEASE_THREAD_LOCK();
205 		restore_interrupts(state);
206 
207 		// if not, create a new one
208 		if (thread == NULL) {
209 			thread = (struct thread *)malloc(sizeof(struct thread));
210 			if (thread == NULL)
211 				return NULL;
212 		}
213 	} else {
214 		thread = inthread;
215 	}
216 
217 	if (name != NULL)
218 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
219 	else
220 		strcpy(thread->name, "unnamed thread");
221 
222 	thread->flags = 0;
223 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
224 	thread->team = NULL;
225 	thread->cpu = cpu;
226 	thread->fault_handler = 0;
227 	thread->page_faults_allowed = 1;
228 	thread->kernel_stack_area = -1;
229 	thread->kernel_stack_base = 0;
230 	thread->user_stack_area = -1;
231 	thread->user_stack_base = 0;
232 	thread->user_local_storage = 0;
233 	thread->kernel_errno = 0;
234 	thread->team_next = NULL;
235 	thread->queue_next = NULL;
236 	thread->priority = thread->next_priority = -1;
237 	thread->io_priority = -1;
238 	thread->args1 = NULL;  thread->args2 = NULL;
239 	thread->alarm.period = 0;
240 	reset_signals(thread);
241 	thread->in_kernel = true;
242 	thread->was_yielded = false;
243 	thread->user_time = 0;
244 	thread->kernel_time = 0;
245 	thread->last_time = 0;
246 	thread->exit.status = 0;
247 	thread->exit.reason = 0;
248 	thread->exit.signal = 0;
249 	list_init(&thread->exit.waiters);
250 	thread->select_infos = NULL;
251 	thread->post_interrupt_callback = NULL;
252 	thread->post_interrupt_data = NULL;
253 
254 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
255 	thread->exit.sem = create_sem(0, temp);
256 	if (thread->exit.sem < B_OK)
257 		goto err1;
258 
259 	sprintf(temp, "%s send", thread->name);
260 	thread->msg.write_sem = create_sem(1, temp);
261 	if (thread->msg.write_sem < B_OK)
262 		goto err2;
263 
264 	sprintf(temp, "%s receive", thread->name);
265 	thread->msg.read_sem = create_sem(0, temp);
266 	if (thread->msg.read_sem < B_OK)
267 		goto err3;
268 
269 	if (arch_thread_init_thread_struct(thread) < B_OK)
270 		goto err4;
271 
272 	return thread;
273 
274 err4:
275 	delete_sem(thread->msg.read_sem);
276 err3:
277 	delete_sem(thread->msg.write_sem);
278 err2:
279 	delete_sem(thread->exit.sem);
280 err1:
281 	// ToDo: put them in the dead queue instead?
282 	if (inthread == NULL)
283 		free(thread);
284 	return NULL;
285 }
286 
287 
288 static void
289 delete_thread_struct(struct thread *thread)
290 {
291 	delete_sem(thread->exit.sem);
292 	delete_sem(thread->msg.write_sem);
293 	delete_sem(thread->msg.read_sem);
294 
295 	// ToDo: put them in the dead queue instead?
296 	free(thread);
297 }
298 
299 
300 /*! This function gets run by a new thread before anything else */
301 static void
302 thread_kthread_entry(void)
303 {
304 	struct thread *thread = thread_get_current_thread();
305 
306 	// The thread is new and has been scheduled the first time. Notify the user
307 	// debugger code.
308 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
309 		user_debug_thread_scheduled(thread);
310 
311 	// simulates the thread spinlock release that would occur if the thread had been
312 	// rescheded from. The resched didn't happen because the thread is new.
313 	RELEASE_THREAD_LOCK();
314 
315 	// start tracking time
316 	thread->last_time = system_time();
317 
318 	enable_interrupts(); // this essentially simulates a return-from-interrupt
319 }
320 
321 
322 static void
323 thread_kthread_exit(void)
324 {
325 	struct thread *thread = thread_get_current_thread();
326 
327 	thread->exit.reason = THREAD_RETURN_EXIT;
328 	thread_exit();
329 }
330 
331 
332 /*!
333 	Initializes the thread and jumps to its userspace entry point.
334 	This function is called at creation time of every user thread,
335 	but not for a team's main thread.
336 */
337 static int
338 _create_user_thread_kentry(void)
339 {
340 	struct thread *thread = thread_get_current_thread();
341 
342 	// jump to the entry point in user space
343 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
344 		thread->args1, thread->args2);
345 
346 	// only get here if the above call fails
347 	return 0;
348 }
349 
350 
351 /*! Initializes the thread and calls it kernel space entry point. */
352 static int
353 _create_kernel_thread_kentry(void)
354 {
355 	struct thread *thread = thread_get_current_thread();
356 	int (*func)(void *args) = (int (*)(void *))thread->entry;
357 
358 	// call the entry function with the appropriate args
359 	return func(thread->args1);
360 }
361 
362 
363 /*!
364 	Creates a new thread in the team with the specified team ID.
365 
366 	\param threadID The ID to be assigned to the new thread. If
367 		  \code < 0 \endcode a fresh one is allocated.
368 */
369 static thread_id
370 create_thread(thread_creation_attributes& attributes, bool kernel)
371 {
372 	struct thread *thread, *currentThread;
373 	struct team *team;
374 	cpu_status state;
375 	char stack_name[B_OS_NAME_LENGTH];
376 	status_t status;
377 	bool abort = false;
378 	bool debugNewThread = false;
379 
380 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
381 		attributes.thread, kernel ? "kernel" : "user"));
382 
383 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
384 		NULL);
385 	if (thread == NULL)
386 		return B_NO_MEMORY;
387 
388 	thread->priority = attributes.priority == -1
389 		? B_NORMAL_PRIORITY : attributes.priority;
390 	thread->next_priority = thread->priority;
391 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
392 	thread->state = B_THREAD_SUSPENDED;
393 	thread->next_state = B_THREAD_SUSPENDED;
394 
395 	// init debug structure
396 	init_thread_debug_info(&thread->debug_info);
397 
398 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
399 		thread->id);
400 	thread->kernel_stack_area = create_area(stack_name,
401 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
402 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
403 		B_FULL_LOCK,
404 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
405 
406 	if (thread->kernel_stack_area < 0) {
407 		// we're not yet part of a team, so we can just bail out
408 		status = thread->kernel_stack_area;
409 
410 		dprintf("create_thread: error creating kernel stack: %s!\n",
411 			strerror(status));
412 
413 		delete_thread_struct(thread);
414 		return status;
415 	}
416 
417 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
418 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
419 
420 	state = disable_interrupts();
421 	GRAB_THREAD_LOCK();
422 
423 	// If the new thread belongs to the same team as the current thread,
424 	// it may inherit some of the thread debug flags.
425 	currentThread = thread_get_current_thread();
426 	if (currentThread && currentThread->team->id == attributes.team) {
427 		// inherit all user flags...
428 		int32 debugFlags = currentThread->debug_info.flags
429 			& B_THREAD_DEBUG_USER_FLAG_MASK;
430 
431 		// ... save the syscall tracing flags, unless explicitely specified
432 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
433 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
434 				| B_THREAD_DEBUG_POST_SYSCALL);
435 		}
436 
437 		thread->debug_info.flags = debugFlags;
438 
439 		// stop the new thread, if desired
440 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
441 	}
442 
443 	// insert into global list
444 	hash_insert(sThreadHash, thread);
445 	sUsedThreads++;
446 	RELEASE_THREAD_LOCK();
447 
448 	GRAB_TEAM_LOCK();
449 	// look at the team, make sure it's not being deleted
450 	team = team_get_team_struct_locked(attributes.team);
451 
452 	if (team == NULL || team->state == TEAM_STATE_DEATH)
453 		abort = true;
454 
455 	if (!abort && !kernel) {
456 		thread->user_thread = team_allocate_user_thread(team);
457 		abort = thread->user_thread == NULL;
458 	}
459 
460 	if (!abort) {
461 		// Debug the new thread, if the parent thread required that (see above),
462 		// or the respective global team debug flag is set. But only, if a
463 		// debugger is installed for the team.
464 		debugNewThread |= (atomic_get(&team->debug_info.flags)
465 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
466 		if (debugNewThread
467 			&& (atomic_get(&team->debug_info.flags)
468 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
469 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
470 		}
471 
472 		insert_thread_into_team(team, thread);
473 	}
474 
475 	RELEASE_TEAM_LOCK();
476 	if (abort) {
477 		GRAB_THREAD_LOCK();
478 		hash_remove(sThreadHash, thread);
479 		RELEASE_THREAD_LOCK();
480 	}
481 	restore_interrupts(state);
482 	if (abort) {
483 		delete_area(thread->kernel_stack_area);
484 		delete_thread_struct(thread);
485 		return B_BAD_TEAM_ID;
486 	}
487 
488 	thread->args1 = attributes.args1;
489 	thread->args2 = attributes.args2;
490 	thread->entry = attributes.entry;
491 	status = thread->id;
492 
493 	if (kernel) {
494 		// this sets up an initial kthread stack that runs the entry
495 
496 		// Note: whatever function wants to set up a user stack later for this
497 		// thread must initialize the TLS for it
498 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
499 			&thread_kthread_entry, &thread_kthread_exit);
500 	} else {
501 		// create user stack
502 
503 		// the stack will be between USER_STACK_REGION and the main thread stack
504 		// area (the user stack of the main thread is created in
505 		// team_create_team())
506 		if (attributes.stack_address == NULL) {
507 			thread->user_stack_base = USER_STACK_REGION;
508 			if (attributes.stack_size <= 0)
509 				thread->user_stack_size = USER_STACK_SIZE;
510 			else
511 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
512 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
513 
514 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
515 				attributes.name, thread->id);
516 			thread->user_stack_area = create_area_etc(team->id, stack_name,
517 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
518 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
519 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
520 			if (thread->user_stack_area < B_OK
521 				|| arch_thread_init_tls(thread) < B_OK) {
522 				// great, we have a fully running thread without a (usable)
523 				// stack
524 				dprintf("create_thread: unable to create proper user stack!\n");
525 				status = thread->user_stack_area;
526 				kill_thread(thread->id);
527 			}
528 		} else {
529 			thread->user_stack_base = (addr_t)attributes.stack_address;
530 			thread->user_stack_size = attributes.stack_size;
531 		}
532 
533 		user_debug_update_new_thread_flags(thread->id);
534 
535 		// copy the user entry over to the args field in the thread struct
536 		// the function this will call will immediately switch the thread into
537 		// user space.
538 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
539 			&thread_kthread_entry, &thread_kthread_exit);
540 	}
541 
542 	return status;
543 }
544 
545 
546 static status_t
547 undertaker(void* /*args*/)
548 {
549 	while (true) {
550 		// wait for a thread to bury
551 		InterruptsSpinLocker locker(gThreadSpinlock);
552 
553 		while (sUndertakerEntries.IsEmpty()) {
554 			ConditionVariableEntry conditionEntry;
555 			sUndertakerCondition.Add(&conditionEntry);
556 			locker.Unlock();
557 
558 			conditionEntry.Wait();
559 
560 			locker.Lock();
561 		}
562 
563 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
564 		locker.Unlock();
565 
566 		UndertakerEntry entry = *_entry;
567 			// we need a copy, since the original entry is on the thread's stack
568 
569 		// we've got an entry
570 		struct thread* thread = entry.thread;
571 
572 		// delete the old kernel stack area
573 		delete_area(thread->kernel_stack_area);
574 
575 		// remove this thread from all of the global lists
576 		disable_interrupts();
577 		GRAB_TEAM_LOCK();
578 
579 		remove_thread_from_team(team_get_kernel_team(), thread);
580 
581 		RELEASE_TEAM_LOCK();
582 		enable_interrupts();
583 			// needed for the debugger notification below
584 
585 		if (entry.deathSem >= 0)
586 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
587 
588 		// free the thread structure
589 		thread_enqueue(thread, &dead_q);
590 			// TODO: Use the slab allocator!
591 	}
592 }
593 
594 
595 static sem_id
596 get_thread_wait_sem(struct thread* thread)
597 {
598 	if (thread->state == B_THREAD_WAITING
599 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
600 		return (sem_id)(addr_t)thread->wait.object;
601 	}
602 	return -1;
603 }
604 
605 
606 /*!
607 	Fills the thread_info structure with information from the specified
608 	thread.
609 	The thread lock must be held when called.
610 */
611 static void
612 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
613 {
614 	info->thread = thread->id;
615 	info->team = thread->team->id;
616 
617 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
618 
619 	if (thread->state == B_THREAD_WAITING) {
620 		info->state = B_THREAD_WAITING;
621 
622 		switch (thread->wait.type) {
623 			case THREAD_BLOCK_TYPE_SNOOZE:
624 				info->state = B_THREAD_ASLEEP;
625 				break;
626 
627 			case THREAD_BLOCK_TYPE_SEMAPHORE:
628 			{
629 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
630 				if (sem == thread->msg.read_sem)
631 					info->state = B_THREAD_RECEIVING;
632 				break;
633 			}
634 
635 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
636 			default:
637 				break;
638 		}
639 	} else
640 		info->state = (thread_state)thread->state;
641 
642 	info->priority = thread->priority;
643 	info->user_time = thread->user_time;
644 	info->kernel_time = thread->kernel_time;
645 	info->stack_base = (void *)thread->user_stack_base;
646 	info->stack_end = (void *)(thread->user_stack_base
647 		+ thread->user_stack_size);
648 	info->sem = get_thread_wait_sem(thread);
649 }
650 
651 static status_t
652 send_data_etc(thread_id id, int32 code, const void *buffer,
653 	size_t bufferSize, int32 flags)
654 {
655 	struct thread *target;
656 	sem_id cachedSem;
657 	cpu_status state;
658 	status_t status;
659 	cbuf *data;
660 
661 	state = disable_interrupts();
662 	GRAB_THREAD_LOCK();
663 	target = thread_get_thread_struct_locked(id);
664 	if (!target) {
665 		RELEASE_THREAD_LOCK();
666 		restore_interrupts(state);
667 		return B_BAD_THREAD_ID;
668 	}
669 	cachedSem = target->msg.write_sem;
670 	RELEASE_THREAD_LOCK();
671 	restore_interrupts(state);
672 
673 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
674 		return B_NO_MEMORY;
675 
676 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
677 	if (status == B_INTERRUPTED) {
678 		// We got interrupted by a signal
679 		return status;
680 	}
681 	if (status != B_OK) {
682 		// Any other acquisition problems may be due to thread deletion
683 		return B_BAD_THREAD_ID;
684 	}
685 
686 	if (bufferSize > 0) {
687 		data = cbuf_get_chain(bufferSize);
688 		if (data == NULL)
689 			return B_NO_MEMORY;
690 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
691 		if (status < B_OK) {
692 			cbuf_free_chain(data);
693 			return B_NO_MEMORY;
694 		}
695 	} else
696 		data = NULL;
697 
698 	state = disable_interrupts();
699 	GRAB_THREAD_LOCK();
700 
701 	// The target thread could have been deleted at this point
702 	target = thread_get_thread_struct_locked(id);
703 	if (target == NULL) {
704 		RELEASE_THREAD_LOCK();
705 		restore_interrupts(state);
706 		cbuf_free_chain(data);
707 		return B_BAD_THREAD_ID;
708 	}
709 
710 	// Save message informations
711 	target->msg.sender = thread_get_current_thread()->id;
712 	target->msg.code = code;
713 	target->msg.size = bufferSize;
714 	target->msg.buffer = data;
715 	cachedSem = target->msg.read_sem;
716 
717 	RELEASE_THREAD_LOCK();
718 	restore_interrupts(state);
719 
720 	release_sem(cachedSem);
721 	return B_OK;
722 }
723 
724 
725 static int32
726 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
727 	int32 flags)
728 {
729 	struct thread *thread = thread_get_current_thread();
730 	status_t status;
731 	size_t size;
732 	int32 code;
733 
734 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
735 	if (status < B_OK) {
736 		// Actually, we're not supposed to return error codes
737 		// but since the only reason this can fail is that we
738 		// were killed, it's probably okay to do so (but also
739 		// meaningless).
740 		return status;
741 	}
742 
743 	if (buffer != NULL && bufferSize != 0) {
744 		size = min_c(bufferSize, thread->msg.size);
745 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
746 			0, size);
747 		if (status < B_OK) {
748 			cbuf_free_chain(thread->msg.buffer);
749 			release_sem(thread->msg.write_sem);
750 			return status;
751 		}
752 	}
753 
754 	*_sender = thread->msg.sender;
755 	code = thread->msg.code;
756 
757 	cbuf_free_chain(thread->msg.buffer);
758 	release_sem(thread->msg.write_sem);
759 
760 	return code;
761 }
762 
763 
764 static status_t
765 common_getrlimit(int resource, struct rlimit * rlp)
766 {
767 	if (!rlp)
768 		return B_BAD_ADDRESS;
769 
770 	switch (resource) {
771 		case RLIMIT_NOFILE:
772 		case RLIMIT_NOVMON:
773 			return vfs_getrlimit(resource, rlp);
774 
775 		case RLIMIT_CORE:
776 			rlp->rlim_cur = 0;
777 			rlp->rlim_max = 0;
778 			return B_OK;
779 
780 		case RLIMIT_STACK:
781 		{
782 			struct thread *thread = thread_get_current_thread();
783 			if (!thread)
784 				return B_ERROR;
785 			rlp->rlim_cur = thread->user_stack_size;
786 			rlp->rlim_max = thread->user_stack_size;
787 			return B_OK;
788 		}
789 
790 		default:
791 			return EINVAL;
792 	}
793 
794 	return B_OK;
795 }
796 
797 
798 static status_t
799 common_setrlimit(int resource, const struct rlimit * rlp)
800 {
801 	if (!rlp)
802 		return B_BAD_ADDRESS;
803 
804 	switch (resource) {
805 		case RLIMIT_NOFILE:
806 		case RLIMIT_NOVMON:
807 			return vfs_setrlimit(resource, rlp);
808 
809 		case RLIMIT_CORE:
810 			// We don't support core file, so allow settings to 0/0 only.
811 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
812 				return EINVAL;
813 			return B_OK;
814 
815 		default:
816 			return EINVAL;
817 	}
818 
819 	return B_OK;
820 }
821 
822 
823 //	#pragma mark - debugger calls
824 
825 
826 static int
827 make_thread_unreal(int argc, char **argv)
828 {
829 	struct thread *thread;
830 	struct hash_iterator i;
831 	int32 id = -1;
832 
833 	if (argc > 2) {
834 		print_debugger_command_usage(argv[0]);
835 		return 0;
836 	}
837 
838 	if (argc > 1)
839 		id = strtoul(argv[1], NULL, 0);
840 
841 	hash_open(sThreadHash, &i);
842 
843 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
844 		if (id != -1 && thread->id != id)
845 			continue;
846 
847 		if (thread->priority > B_DISPLAY_PRIORITY) {
848 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
849 			kprintf("thread %ld made unreal\n", thread->id);
850 		}
851 	}
852 
853 	hash_close(sThreadHash, &i, false);
854 	return 0;
855 }
856 
857 
858 static int
859 set_thread_prio(int argc, char **argv)
860 {
861 	struct thread *thread;
862 	struct hash_iterator i;
863 	int32 id;
864 	int32 prio;
865 
866 	if (argc > 3 || argc < 2) {
867 		print_debugger_command_usage(argv[0]);
868 		return 0;
869 	}
870 
871 	prio = strtoul(argv[1], NULL, 0);
872 	if (prio > B_MAX_PRIORITY)
873 		prio = B_MAX_PRIORITY;
874 	if (prio < B_MIN_PRIORITY)
875 		prio = B_MIN_PRIORITY;
876 
877 	if (argc > 2)
878 		id = strtoul(argv[2], NULL, 0);
879 	else
880 		id = thread_get_current_thread()->id;
881 
882 	hash_open(sThreadHash, &i);
883 
884 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
885 		if (thread->id != id)
886 			continue;
887 		thread->priority = thread->next_priority = prio;
888 		kprintf("thread %ld set to priority %ld\n", id, prio);
889 		break;
890 	}
891 	if (!thread)
892 		kprintf("thread %ld (%#lx) not found\n", id, id);
893 
894 	hash_close(sThreadHash, &i, false);
895 	return 0;
896 }
897 
898 
899 static int
900 make_thread_suspended(int argc, char **argv)
901 {
902 	struct thread *thread;
903 	struct hash_iterator i;
904 	int32 id;
905 
906 	if (argc > 2) {
907 		print_debugger_command_usage(argv[0]);
908 		return 0;
909 	}
910 
911 	if (argc == 1)
912 		id = thread_get_current_thread()->id;
913 	else
914 		id = strtoul(argv[1], NULL, 0);
915 
916 	hash_open(sThreadHash, &i);
917 
918 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
919 		if (thread->id != id)
920 			continue;
921 
922 		thread->next_state = B_THREAD_SUSPENDED;
923 		kprintf("thread %ld suspended\n", id);
924 		break;
925 	}
926 	if (!thread)
927 		kprintf("thread %ld (%#lx) not found\n", id, id);
928 
929 	hash_close(sThreadHash, &i, false);
930 	return 0;
931 }
932 
933 
934 static int
935 make_thread_resumed(int argc, char **argv)
936 {
937 	struct thread *thread;
938 	struct hash_iterator i;
939 	int32 id;
940 
941 	if (argc != 2) {
942 		print_debugger_command_usage(argv[0]);
943 		return 0;
944 	}
945 
946 	// force user to enter a thread id, as using
947 	// the current thread is usually not intended
948 	id = strtoul(argv[1], NULL, 0);
949 
950 	hash_open(sThreadHash, &i);
951 
952 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
953 		if (thread->id != id)
954 			continue;
955 
956 		if (thread->state == B_THREAD_SUSPENDED) {
957 			scheduler_enqueue_in_run_queue(thread);
958 			kprintf("thread %ld resumed\n", thread->id);
959 		}
960 		break;
961 	}
962 	if (!thread)
963 		kprintf("thread %ld (%#lx) not found\n", id, id);
964 
965 	hash_close(sThreadHash, &i, false);
966 	return 0;
967 }
968 
969 
970 static int
971 drop_into_debugger(int argc, char **argv)
972 {
973 	status_t err;
974 	int32 id;
975 
976 	if (argc > 2) {
977 		print_debugger_command_usage(argv[0]);
978 		return 0;
979 	}
980 
981 	if (argc == 1)
982 		id = thread_get_current_thread()->id;
983 	else
984 		id = strtoul(argv[1], NULL, 0);
985 
986 	err = _user_debug_thread(id);
987 	if (err)
988 		kprintf("drop failed\n");
989 	else
990 		kprintf("thread %ld dropped into user debugger\n", id);
991 
992 	return 0;
993 }
994 
995 
996 static const char *
997 state_to_text(struct thread *thread, int32 state)
998 {
999 	switch (state) {
1000 		case B_THREAD_READY:
1001 			return "ready";
1002 
1003 		case B_THREAD_RUNNING:
1004 			return "running";
1005 
1006 		case B_THREAD_WAITING:
1007 		{
1008 			if (thread != NULL) {
1009 				switch (thread->wait.type) {
1010 					case THREAD_BLOCK_TYPE_SNOOZE:
1011 						return "zzz";
1012 
1013 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1014 					{
1015 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1016 						if (sem == thread->msg.read_sem)
1017 							return "receive";
1018 						break;
1019 					}
1020 				}
1021 			}
1022 
1023 			return "waiting";
1024 		}
1025 
1026 		case B_THREAD_SUSPENDED:
1027 			return "suspended";
1028 
1029 		case THREAD_STATE_FREE_ON_RESCHED:
1030 			return "death";
1031 
1032 		default:
1033 			return "UNKNOWN";
1034 	}
1035 }
1036 
1037 
1038 static void
1039 print_thread_list_table_head()
1040 {
1041 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1042 		"  team  name\n");
1043 }
1044 
1045 
1046 static void
1047 _dump_thread_info(struct thread *thread, bool shortInfo)
1048 {
1049 	if (shortInfo) {
1050 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1051 			thread->state));
1052 
1053 		// does it block on a semaphore or a condition variable?
1054 		if (thread->state == B_THREAD_WAITING) {
1055 			switch (thread->wait.type) {
1056 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1057 				{
1058 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1059 					if (sem == thread->msg.read_sem)
1060 						kprintf("                    ");
1061 					else
1062 						kprintf("sem  %12ld   ", sem);
1063 					break;
1064 				}
1065 
1066 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1067 					kprintf("cvar   %p   ", thread->wait.object);
1068 					break;
1069 
1070 				case THREAD_BLOCK_TYPE_SNOOZE:
1071 					kprintf("                    ");
1072 					break;
1073 
1074 				case THREAD_BLOCK_TYPE_SIGNAL:
1075 					kprintf("signal              ");
1076 					break;
1077 
1078 				case THREAD_BLOCK_TYPE_MUTEX:
1079 					kprintf("mutex  %p   ", thread->wait.object);
1080 					break;
1081 
1082 				case THREAD_BLOCK_TYPE_RW_LOCK:
1083 					kprintf("rwlock %p   ", thread->wait.object);
1084 					break;
1085 
1086 				case THREAD_BLOCK_TYPE_OTHER:
1087 					kprintf("other               ");
1088 					break;
1089 
1090 				default:
1091 					kprintf("???    %p   ", thread->wait.object);
1092 					break;
1093 			}
1094 		} else
1095 			kprintf("        -           ");
1096 
1097 		// on which CPU does it run?
1098 		if (thread->cpu)
1099 			kprintf("%2d", thread->cpu->cpu_num);
1100 		else
1101 			kprintf(" -");
1102 
1103 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1104 			(void *)thread->kernel_stack_base, thread->team->id,
1105 			thread->name != NULL ? thread->name : "<NULL>");
1106 
1107 		return;
1108 	}
1109 
1110 	// print the long info
1111 
1112 	struct death_entry *death = NULL;
1113 
1114 	kprintf("THREAD: %p\n", thread);
1115 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1116 	kprintf("name:               \"%s\"\n", thread->name);
1117 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1118 		thread->all_next, thread->team_next, thread->queue_next);
1119 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1120 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1121 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1122 	kprintf("cpu:                %p ", thread->cpu);
1123 	if (thread->cpu)
1124 		kprintf("(%d)\n", thread->cpu->cpu_num);
1125 	else
1126 		kprintf("\n");
1127 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1128 		thread->sig_block_mask);
1129 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1130 
1131 	if (thread->state == B_THREAD_WAITING) {
1132 		kprintf("waiting for:        ");
1133 
1134 		switch (thread->wait.type) {
1135 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1136 			{
1137 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1138 				if (sem == thread->msg.read_sem)
1139 					kprintf("data\n");
1140 				else
1141 					kprintf("semaphore %ld\n", sem);
1142 				break;
1143 			}
1144 
1145 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1146 				kprintf("condition variable %p\n", thread->wait.object);
1147 				break;
1148 
1149 			case THREAD_BLOCK_TYPE_SNOOZE:
1150 				kprintf("snooze()\n");
1151 				break;
1152 
1153 			case THREAD_BLOCK_TYPE_SIGNAL:
1154 				kprintf("signal\n");
1155 				break;
1156 
1157 			case THREAD_BLOCK_TYPE_MUTEX:
1158 				kprintf("mutex %p\n", thread->wait.object);
1159 				break;
1160 
1161 			case THREAD_BLOCK_TYPE_RW_LOCK:
1162 				kprintf("rwlock %p\n", thread->wait.object);
1163 				break;
1164 
1165 			case THREAD_BLOCK_TYPE_OTHER:
1166 				kprintf("other (%s)\n", (char*)thread->wait.object);
1167 				break;
1168 
1169 			default:
1170 				kprintf("unknown (%p)\n", thread->wait.object);
1171 				break;
1172 		}
1173 	}
1174 
1175 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1176 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1177 	kprintf("entry:              %p\n", (void *)thread->entry);
1178 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1179 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1180 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1181 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1182 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1183 	kprintf("  exit.waiters:\n");
1184 	while ((death = (struct death_entry*)list_get_next_item(
1185 			&thread->exit.waiters, death)) != NULL) {
1186 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1187 	}
1188 
1189 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1190 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1191 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1192 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1193 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1194 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1195 		strerror(thread->kernel_errno));
1196 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1197 	kprintf("user_time:          %Ld\n", thread->user_time);
1198 	kprintf("flags:              0x%lx\n", thread->flags);
1199 	kprintf("architecture dependant section:\n");
1200 	arch_thread_dump_info(&thread->arch_info);
1201 }
1202 
1203 
1204 static int
1205 dump_thread_info(int argc, char **argv)
1206 {
1207 	bool shortInfo = false;
1208 	int argi = 1;
1209 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1210 		shortInfo = true;
1211 		print_thread_list_table_head();
1212 		argi++;
1213 	}
1214 
1215 	if (argi == argc) {
1216 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1217 		return 0;
1218 	}
1219 
1220 	for (; argi < argc; argi++) {
1221 		const char *name = argv[argi];
1222 		int32 id = strtoul(name, NULL, 0);
1223 
1224 		if (IS_KERNEL_ADDRESS(id)) {
1225 			// semi-hack
1226 			_dump_thread_info((struct thread *)id, shortInfo);
1227 			continue;
1228 		}
1229 
1230 		// walk through the thread list, trying to match name or id
1231 		bool found = false;
1232 		struct hash_iterator i;
1233 		hash_open(sThreadHash, &i);
1234 		struct thread *thread;
1235 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1236 			if (!strcmp(name, thread->name) || thread->id == id) {
1237 				_dump_thread_info(thread, shortInfo);
1238 				found = true;
1239 				break;
1240 			}
1241 		}
1242 		hash_close(sThreadHash, &i, false);
1243 
1244 		if (!found)
1245 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 
1252 static int
1253 dump_thread_list(int argc, char **argv)
1254 {
1255 	struct thread *thread;
1256 	struct hash_iterator i;
1257 	bool realTimeOnly = false;
1258 	bool calling = false;
1259 	const char *callSymbol = NULL;
1260 	addr_t callStart = 0;
1261 	addr_t callEnd = 0;
1262 	int32 requiredState = 0;
1263 	team_id team = -1;
1264 	sem_id sem = -1;
1265 
1266 	if (!strcmp(argv[0], "realtime"))
1267 		realTimeOnly = true;
1268 	else if (!strcmp(argv[0], "ready"))
1269 		requiredState = B_THREAD_READY;
1270 	else if (!strcmp(argv[0], "running"))
1271 		requiredState = B_THREAD_RUNNING;
1272 	else if (!strcmp(argv[0], "waiting")) {
1273 		requiredState = B_THREAD_WAITING;
1274 
1275 		if (argc > 1) {
1276 			sem = strtoul(argv[1], NULL, 0);
1277 			if (sem == 0)
1278 				kprintf("ignoring invalid semaphore argument.\n");
1279 		}
1280 	} else if (!strcmp(argv[0], "calling")) {
1281 		if (argc < 2) {
1282 			kprintf("Need to give a symbol name or start and end arguments.\n");
1283 			return 0;
1284 		} else if (argc == 3) {
1285 			callStart = parse_expression(argv[1]);
1286 			callEnd = parse_expression(argv[2]);
1287 		} else
1288 			callSymbol = argv[1];
1289 
1290 		calling = true;
1291 	} else if (argc > 1) {
1292 		team = strtoul(argv[1], NULL, 0);
1293 		if (team == 0)
1294 			kprintf("ignoring invalid team argument.\n");
1295 	}
1296 
1297 	print_thread_list_table_head();
1298 
1299 	hash_open(sThreadHash, &i);
1300 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1301 		// filter out threads not matching the search criteria
1302 		if ((requiredState && thread->state != requiredState)
1303 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1304 					callStart, callEnd))
1305 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1306 			|| (team > 0 && thread->team->id != team)
1307 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1308 			continue;
1309 
1310 		_dump_thread_info(thread, true);
1311 	}
1312 	hash_close(sThreadHash, &i, false);
1313 	return 0;
1314 }
1315 
1316 
1317 //	#pragma mark - private kernel API
1318 
1319 
1320 void
1321 thread_exit(void)
1322 {
1323 	cpu_status state;
1324 	struct thread *thread = thread_get_current_thread();
1325 	struct team *team = thread->team;
1326 	thread_id parentID = -1;
1327 	bool deleteTeam = false;
1328 	sem_id cachedDeathSem = -1;
1329 	status_t status;
1330 	struct thread_debug_info debugInfo;
1331 	team_id teamID = team->id;
1332 
1333 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1334 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1335 			? "due to signal" : "normally", thread->exit.status));
1336 
1337 	if (!are_interrupts_enabled())
1338 		panic("thread_exit() called with interrupts disabled!\n");
1339 
1340 	// boost our priority to get this over with
1341 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1342 
1343 	// Cancel previously installed alarm timer, if any
1344 	cancel_timer(&thread->alarm);
1345 
1346 	// delete the user stack area first, we won't need it anymore
1347 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1348 		area_id area = thread->user_stack_area;
1349 		thread->user_stack_area = -1;
1350 		vm_delete_area(team->id, area, true);
1351 	}
1352 
1353 	struct job_control_entry *death = NULL;
1354 	struct death_entry* threadDeathEntry = NULL;
1355 
1356 	if (team != team_get_kernel_team()) {
1357 		user_debug_thread_exiting(thread);
1358 
1359 		if (team->main_thread == thread) {
1360 			// this was the main thread in this team, so we will delete that as well
1361 			deleteTeam = true;
1362 		} else {
1363 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1364 			team_free_user_thread(thread);
1365 		}
1366 
1367 		// remove this thread from the current team and add it to the kernel
1368 		// put the thread into the kernel team until it dies
1369 		state = disable_interrupts();
1370 		GRAB_TEAM_LOCK();
1371 		GRAB_THREAD_LOCK();
1372 			// removing the thread and putting its death entry to the parent
1373 			// team needs to be an atomic operation
1374 
1375 		// remember how long this thread lasted
1376 		team->dead_threads_kernel_time += thread->kernel_time;
1377 		team->dead_threads_user_time += thread->user_time;
1378 
1379 		remove_thread_from_team(team, thread);
1380 		insert_thread_into_team(team_get_kernel_team(), thread);
1381 
1382 		cachedDeathSem = team->death_sem;
1383 
1384 		if (deleteTeam) {
1385 			struct team *parent = team->parent;
1386 
1387 			// remember who our parent was so we can send a signal
1388 			parentID = parent->id;
1389 
1390 			// Set the team job control state to "dead" and detach the job
1391 			// control entry from our team struct.
1392 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1393 			death = team->job_control_entry;
1394 			team->job_control_entry = NULL;
1395 
1396 			if (death != NULL) {
1397 				death->InitDeadState();
1398 
1399 				// team_set_job_control_state() already moved our entry
1400 				// into the parent's list. We just check the soft limit of
1401 				// death entries.
1402 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1403 					death = parent->dead_children->entries.RemoveHead();
1404 					parent->dead_children->count--;
1405 				} else
1406 					death = NULL;
1407 
1408 				RELEASE_THREAD_LOCK();
1409 			} else
1410 				RELEASE_THREAD_LOCK();
1411 
1412 			team_remove_team(team);
1413 
1414 			send_signal_etc(parentID, SIGCHLD,
1415 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1416 		} else {
1417 			// The thread is not the main thread. We store a thread death
1418 			// entry for it, unless someone is already waiting it.
1419 			if (threadDeathEntry != NULL
1420 				&& list_is_empty(&thread->exit.waiters)) {
1421 				threadDeathEntry->thread = thread->id;
1422 				threadDeathEntry->status = thread->exit.status;
1423 				threadDeathEntry->reason = thread->exit.reason;
1424 				threadDeathEntry->signal = thread->exit.signal;
1425 
1426 				// add entry -- remove and old one, if we hit the limit
1427 				list_add_item(&team->dead_threads, threadDeathEntry);
1428 				team->dead_threads_count++;
1429 				threadDeathEntry = NULL;
1430 
1431 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1432 					threadDeathEntry = (death_entry*)list_remove_head_item(
1433 						&team->dead_threads);
1434 					team->dead_threads_count--;
1435 				}
1436 			}
1437 
1438 			RELEASE_THREAD_LOCK();
1439 		}
1440 
1441 		RELEASE_TEAM_LOCK();
1442 
1443 		// swap address spaces, to make sure we're running on the kernel's pgdir
1444 		vm_swap_address_space(vm_kernel_address_space());
1445 		restore_interrupts(state);
1446 
1447 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1448 	}
1449 
1450 	if (threadDeathEntry != NULL)
1451 		free(threadDeathEntry);
1452 
1453 	// delete the team if we're its main thread
1454 	if (deleteTeam) {
1455 		team_delete_team(team);
1456 
1457 		// we need to delete any death entry that made it to here
1458 		if (death != NULL)
1459 			delete death;
1460 
1461 		cachedDeathSem = -1;
1462 	}
1463 
1464 	state = disable_interrupts();
1465 	GRAB_THREAD_LOCK();
1466 
1467 	// remove thread from hash, so it's no longer accessible
1468 	hash_remove(sThreadHash, thread);
1469 	sUsedThreads--;
1470 
1471 	// Stop debugging for this thread
1472 	debugInfo = thread->debug_info;
1473 	clear_thread_debug_info(&thread->debug_info, true);
1474 
1475 	// Remove the select infos. We notify them a little later.
1476 	select_info* selectInfos = thread->select_infos;
1477 	thread->select_infos = NULL;
1478 
1479 	RELEASE_THREAD_LOCK();
1480 	restore_interrupts(state);
1481 
1482 	destroy_thread_debug_info(&debugInfo);
1483 
1484 	// notify select infos
1485 	select_info* info = selectInfos;
1486 	while (info != NULL) {
1487 		select_sync* sync = info->sync;
1488 
1489 		notify_select_events(info, B_EVENT_INVALID);
1490 		info = info->next;
1491 		put_select_sync(sync);
1492 	}
1493 
1494 	// shutdown the thread messaging
1495 
1496 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1497 	if (status == B_WOULD_BLOCK) {
1498 		// there is data waiting for us, so let us eat it
1499 		thread_id sender;
1500 
1501 		delete_sem(thread->msg.write_sem);
1502 			// first, let's remove all possibly waiting writers
1503 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1504 	} else {
1505 		// we probably own the semaphore here, and we're the last to do so
1506 		delete_sem(thread->msg.write_sem);
1507 	}
1508 	// now we can safely remove the msg.read_sem
1509 	delete_sem(thread->msg.read_sem);
1510 
1511 	// fill all death entries and delete the sem that others will use to wait on us
1512 	{
1513 		sem_id cachedExitSem = thread->exit.sem;
1514 		cpu_status state;
1515 
1516 		state = disable_interrupts();
1517 		GRAB_THREAD_LOCK();
1518 
1519 		// make sure no one will grab this semaphore again
1520 		thread->exit.sem = -1;
1521 
1522 		// fill all death entries
1523 		death_entry* entry = NULL;
1524 		while ((entry = (struct death_entry*)list_get_next_item(
1525 				&thread->exit.waiters, entry)) != NULL) {
1526 			entry->status = thread->exit.status;
1527 			entry->reason = thread->exit.reason;
1528 			entry->signal = thread->exit.signal;
1529 		}
1530 
1531 		RELEASE_THREAD_LOCK();
1532 		restore_interrupts(state);
1533 
1534 		delete_sem(cachedExitSem);
1535 	}
1536 
1537 	// notify the debugger
1538 	if (teamID != team_get_kernel_team_id())
1539 		user_debug_thread_deleted(teamID, thread->id);
1540 
1541 	// enqueue in the undertaker list and reschedule for the last time
1542 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1543 
1544 	disable_interrupts();
1545 	GRAB_THREAD_LOCK();
1546 
1547 	sUndertakerEntries.Add(&undertakerEntry);
1548 	sUndertakerCondition.NotifyOne(true);
1549 
1550 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1551 	scheduler_reschedule();
1552 
1553 	panic("never can get here\n");
1554 }
1555 
1556 
1557 struct thread *
1558 thread_get_thread_struct(thread_id id)
1559 {
1560 	struct thread *thread;
1561 	cpu_status state;
1562 
1563 	state = disable_interrupts();
1564 	GRAB_THREAD_LOCK();
1565 
1566 	thread = thread_get_thread_struct_locked(id);
1567 
1568 	RELEASE_THREAD_LOCK();
1569 	restore_interrupts(state);
1570 
1571 	return thread;
1572 }
1573 
1574 
1575 struct thread *
1576 thread_get_thread_struct_locked(thread_id id)
1577 {
1578 	struct thread_key key;
1579 
1580 	key.id = id;
1581 
1582 	return (struct thread*)hash_lookup(sThreadHash, &key);
1583 }
1584 
1585 
1586 /*!
1587 	Called in the interrupt handler code when a thread enters
1588 	the kernel for any reason.
1589 	Only tracks time for now.
1590 	Interrupts are disabled.
1591 */
1592 void
1593 thread_at_kernel_entry(bigtime_t now)
1594 {
1595 	struct thread *thread = thread_get_current_thread();
1596 
1597 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1598 
1599 	// track user time
1600 	thread->user_time += now - thread->last_time;
1601 	thread->last_time = now;
1602 
1603 	thread->in_kernel = true;
1604 }
1605 
1606 
1607 /*!
1608 	Called whenever a thread exits kernel space to user space.
1609 	Tracks time, handles signals, ...
1610 	Interrupts must be enabled. When the function returns, interrupts will be
1611 	disabled.
1612 */
1613 void
1614 thread_at_kernel_exit(void)
1615 {
1616 	struct thread *thread = thread_get_current_thread();
1617 
1618 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1619 
1620 	while (handle_signals(thread)) {
1621 		InterruptsSpinLocker _(gThreadSpinlock);
1622 		scheduler_reschedule();
1623 	}
1624 
1625 	disable_interrupts();
1626 
1627 	thread->in_kernel = false;
1628 
1629 	// track kernel time
1630 	bigtime_t now = system_time();
1631 	thread->kernel_time += now - thread->last_time;
1632 	thread->last_time = now;
1633 }
1634 
1635 
1636 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1637 	and no debugging shall be done.
1638 	Interrupts must be disabled.
1639 */
1640 void
1641 thread_at_kernel_exit_no_signals(void)
1642 {
1643 	struct thread *thread = thread_get_current_thread();
1644 
1645 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1646 
1647 	thread->in_kernel = false;
1648 
1649 	// track kernel time
1650 	bigtime_t now = system_time();
1651 	thread->kernel_time += now - thread->last_time;
1652 	thread->last_time = now;
1653 }
1654 
1655 
1656 void
1657 thread_reset_for_exec(void)
1658 {
1659 	struct thread *thread = thread_get_current_thread();
1660 
1661 	cancel_timer(&thread->alarm);
1662 	reset_signals(thread);
1663 }
1664 
1665 
1666 /*! Insert a thread to the tail of a queue */
1667 void
1668 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1669 {
1670 	thread->queue_next = NULL;
1671 	if (queue->head == NULL) {
1672 		queue->head = thread;
1673 		queue->tail = thread;
1674 	} else {
1675 		queue->tail->queue_next = thread;
1676 		queue->tail = thread;
1677 	}
1678 }
1679 
1680 
1681 struct thread *
1682 thread_lookat_queue(struct thread_queue *queue)
1683 {
1684 	return queue->head;
1685 }
1686 
1687 
1688 struct thread *
1689 thread_dequeue(struct thread_queue *queue)
1690 {
1691 	struct thread *thread = queue->head;
1692 
1693 	if (thread != NULL) {
1694 		queue->head = thread->queue_next;
1695 		if (queue->tail == thread)
1696 			queue->tail = NULL;
1697 	}
1698 	return thread;
1699 }
1700 
1701 
1702 struct thread *
1703 thread_dequeue_id(struct thread_queue *q, thread_id id)
1704 {
1705 	struct thread *thread;
1706 	struct thread *last = NULL;
1707 
1708 	thread = q->head;
1709 	while (thread != NULL) {
1710 		if (thread->id == id) {
1711 			if (last == NULL)
1712 				q->head = thread->queue_next;
1713 			else
1714 				last->queue_next = thread->queue_next;
1715 
1716 			if (q->tail == thread)
1717 				q->tail = last;
1718 			break;
1719 		}
1720 		last = thread;
1721 		thread = thread->queue_next;
1722 	}
1723 	return thread;
1724 }
1725 
1726 
1727 thread_id
1728 allocate_thread_id(void)
1729 {
1730 	return atomic_add(&sNextThreadID, 1);
1731 }
1732 
1733 
1734 thread_id
1735 peek_next_thread_id(void)
1736 {
1737 	return atomic_get(&sNextThreadID);
1738 }
1739 
1740 
1741 /*!	Yield the CPU to other threads.
1742 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1743 	If \c false, it will continue to run, if there's no other thread in ready
1744 	state, and if it has a higher priority than the other ready threads, it
1745 	still has a good chance to continue.
1746 */
1747 void
1748 thread_yield(bool force)
1749 {
1750 	if (force) {
1751 		// snooze for roughly 3 thread quantums
1752 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1753 #if 0
1754 		cpu_status state;
1755 
1756 		struct thread *thread = thread_get_current_thread();
1757 		if (thread == NULL)
1758 			return;
1759 
1760 		state = disable_interrupts();
1761 		GRAB_THREAD_LOCK();
1762 
1763 		// mark the thread as yielded, so it will not be scheduled next
1764 		//thread->was_yielded = true;
1765 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1766 		scheduler_reschedule();
1767 
1768 		RELEASE_THREAD_LOCK();
1769 		restore_interrupts(state);
1770 #endif
1771 	} else {
1772 		struct thread *thread = thread_get_current_thread();
1773 		if (thread == NULL)
1774 			return;
1775 
1776 		// Don't force the thread off the CPU, just reschedule.
1777 		InterruptsSpinLocker _(gThreadSpinlock);
1778 		scheduler_reschedule();
1779 	}
1780 }
1781 
1782 
1783 /*!
1784 	Kernel private thread creation function.
1785 
1786 	\param threadID The ID to be assigned to the new thread. If
1787 		  \code < 0 \endcode a fresh one is allocated.
1788 */
1789 thread_id
1790 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1791 	void *arg, team_id team, thread_id threadID)
1792 {
1793 	thread_creation_attributes attributes;
1794 	attributes.entry = (thread_entry_func)function;
1795 	attributes.name = name;
1796 	attributes.priority = priority;
1797 	attributes.args1 = arg;
1798 	attributes.args2 = NULL;
1799 	attributes.stack_address = NULL;
1800 	attributes.stack_size = 0;
1801 	attributes.team = team;
1802 	attributes.thread = threadID;
1803 
1804 	return create_thread(attributes, true);
1805 }
1806 
1807 
1808 status_t
1809 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1810 	status_t *_returnCode)
1811 {
1812 	sem_id exitSem = B_BAD_THREAD_ID;
1813 	struct death_entry death;
1814 	job_control_entry* freeDeath = NULL;
1815 	struct thread *thread;
1816 	cpu_status state;
1817 	status_t status = B_OK;
1818 
1819 	if (id < B_OK)
1820 		return B_BAD_THREAD_ID;
1821 
1822 	// we need to resume the thread we're waiting for first
1823 
1824 	state = disable_interrupts();
1825 	GRAB_THREAD_LOCK();
1826 
1827 	thread = thread_get_thread_struct_locked(id);
1828 	if (thread != NULL) {
1829 		// remember the semaphore we have to wait on and place our death entry
1830 		exitSem = thread->exit.sem;
1831 		list_add_link_to_head(&thread->exit.waiters, &death);
1832 	}
1833 
1834 	death_entry* threadDeathEntry = NULL;
1835 
1836 	RELEASE_THREAD_LOCK();
1837 
1838 	if (thread == NULL) {
1839 		// we couldn't find this thread - maybe it's already gone, and we'll
1840 		// find its death entry in our team
1841 		GRAB_TEAM_LOCK();
1842 
1843 		struct team* team = thread_get_current_thread()->team;
1844 
1845 		// check the child death entries first (i.e. main threads of child
1846 		// teams)
1847 		bool deleteEntry;
1848 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1849 		if (freeDeath != NULL) {
1850 			death.status = freeDeath->status;
1851 			if (!deleteEntry)
1852 				freeDeath = NULL;
1853 		} else {
1854 			// check the thread death entries of the team (non-main threads)
1855 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1856 					&team->dead_threads, threadDeathEntry)) != NULL) {
1857 				if (threadDeathEntry->thread == id) {
1858 					list_remove_item(&team->dead_threads, threadDeathEntry);
1859 					team->dead_threads_count--;
1860 					death.status = threadDeathEntry->status;
1861 					break;
1862 				}
1863 			}
1864 
1865 			if (threadDeathEntry == NULL)
1866 				status = B_BAD_THREAD_ID;
1867 		}
1868 
1869 		RELEASE_TEAM_LOCK();
1870 	}
1871 
1872 	restore_interrupts(state);
1873 
1874 	if (thread == NULL && status == B_OK) {
1875 		// we found the thread's death entry in our team
1876 		if (_returnCode)
1877 			*_returnCode = death.status;
1878 
1879 		delete freeDeath;
1880 		free(threadDeathEntry);
1881 		return B_OK;
1882 	}
1883 
1884 	// we need to wait for the death of the thread
1885 
1886 	if (exitSem < B_OK)
1887 		return B_BAD_THREAD_ID;
1888 
1889 	resume_thread(id);
1890 		// make sure we don't wait forever on a suspended thread
1891 
1892 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1893 
1894 	if (status == B_OK) {
1895 		// this should never happen as the thread deletes the semaphore on exit
1896 		panic("could acquire exit_sem for thread %ld\n", id);
1897 	} else if (status == B_BAD_SEM_ID) {
1898 		// this is the way the thread normally exits
1899 		status = B_OK;
1900 
1901 		if (_returnCode)
1902 			*_returnCode = death.status;
1903 	} else {
1904 		// We were probably interrupted; we need to remove our death entry now.
1905 		state = disable_interrupts();
1906 		GRAB_THREAD_LOCK();
1907 
1908 		thread = thread_get_thread_struct_locked(id);
1909 		if (thread != NULL)
1910 			list_remove_link(&death);
1911 
1912 		RELEASE_THREAD_LOCK();
1913 		restore_interrupts(state);
1914 
1915 		// If the thread is already gone, we need to wait for its exit semaphore
1916 		// to make sure our death entry stays valid - it won't take long
1917 		if (thread == NULL)
1918 			acquire_sem(exitSem);
1919 	}
1920 
1921 	return status;
1922 }
1923 
1924 
1925 status_t
1926 select_thread(int32 id, struct select_info* info, bool kernel)
1927 {
1928 	InterruptsSpinLocker locker(gThreadSpinlock);
1929 
1930 	// get thread
1931 	struct thread* thread = thread_get_thread_struct_locked(id);
1932 	if (thread == NULL)
1933 		return B_BAD_THREAD_ID;
1934 
1935 	// We support only B_EVENT_INVALID at the moment.
1936 	info->selected_events &= B_EVENT_INVALID;
1937 
1938 	// add info to list
1939 	if (info->selected_events != 0) {
1940 		info->next = thread->select_infos;
1941 		thread->select_infos = info;
1942 
1943 		// we need a sync reference
1944 		atomic_add(&info->sync->ref_count, 1);
1945 	}
1946 
1947 	return B_OK;
1948 }
1949 
1950 
1951 status_t
1952 deselect_thread(int32 id, struct select_info* info, bool kernel)
1953 {
1954 	InterruptsSpinLocker locker(gThreadSpinlock);
1955 
1956 	// get thread
1957 	struct thread* thread = thread_get_thread_struct_locked(id);
1958 	if (thread == NULL)
1959 		return B_BAD_THREAD_ID;
1960 
1961 	// remove info from list
1962 	select_info** infoLocation = &thread->select_infos;
1963 	while (*infoLocation != NULL && *infoLocation != info)
1964 		infoLocation = &(*infoLocation)->next;
1965 
1966 	if (*infoLocation != info)
1967 		return B_OK;
1968 
1969 	*infoLocation = info->next;
1970 
1971 	locker.Unlock();
1972 
1973 	// surrender sync reference
1974 	put_select_sync(info->sync);
1975 
1976 	return B_OK;
1977 }
1978 
1979 
1980 int32
1981 thread_max_threads(void)
1982 {
1983 	return sMaxThreads;
1984 }
1985 
1986 
1987 int32
1988 thread_used_threads(void)
1989 {
1990 	return sUsedThreads;
1991 }
1992 
1993 
1994 const char*
1995 thread_state_to_text(struct thread* thread, int32 state)
1996 {
1997 	return state_to_text(thread, state);
1998 }
1999 
2000 
2001 int32
2002 thread_get_io_priority(thread_id id)
2003 {
2004 	// take a shortcut, if it is the current thread
2005 	struct thread* thread = thread_get_current_thread();
2006 	int32 priority;
2007 	if (id == thread->id) {
2008 		int32 priority = thread->io_priority;
2009 		return priority < 0 ? thread->priority : priority;
2010 	}
2011 
2012 	// not the current thread -- get it
2013 	InterruptsSpinLocker locker(gThreadSpinlock);
2014 
2015 	thread = thread_get_thread_struct_locked(id);
2016 	if (thread == NULL)
2017 		return B_BAD_THREAD_ID;
2018 
2019 	priority = thread->io_priority;
2020 	return priority < 0 ? thread->priority : priority;
2021 }
2022 
2023 
2024 void
2025 thread_set_io_priority(int32 priority)
2026 {
2027 	struct thread* thread = thread_get_current_thread();
2028 	thread->io_priority = priority;
2029 }
2030 
2031 
2032 status_t
2033 thread_init(kernel_args *args)
2034 {
2035 	uint32 i;
2036 
2037 	TRACE(("thread_init: entry\n"));
2038 
2039 	// create the thread hash table
2040 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2041 		&thread_struct_compare, &thread_struct_hash);
2042 
2043 	// zero out the dead thread structure q
2044 	memset(&dead_q, 0, sizeof(dead_q));
2045 
2046 	if (arch_thread_init(args) < B_OK)
2047 		panic("arch_thread_init() failed!\n");
2048 
2049 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2050 	sNextThreadID = B_SYSTEM_TEAM + 1;
2051 
2052 	// create an idle thread for each cpu
2053 
2054 	for (i = 0; i < args->num_cpus; i++) {
2055 		struct thread *thread;
2056 		area_info info;
2057 		char name[64];
2058 
2059 		sprintf(name, "idle thread %lu", i + 1);
2060 		thread = create_thread_struct(&sIdleThreads[i], name,
2061 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2062 		if (thread == NULL) {
2063 			panic("error creating idle thread struct\n");
2064 			return B_NO_MEMORY;
2065 		}
2066 
2067 		thread->team = team_get_kernel_team();
2068 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2069 		thread->state = B_THREAD_RUNNING;
2070 		thread->next_state = B_THREAD_READY;
2071 		sprintf(name, "idle thread %lu kstack", i + 1);
2072 		thread->kernel_stack_area = find_area(name);
2073 		thread->entry = NULL;
2074 
2075 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2076 			panic("error finding idle kstack area\n");
2077 
2078 		thread->kernel_stack_base = (addr_t)info.address;
2079 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2080 
2081 		hash_insert(sThreadHash, thread);
2082 		insert_thread_into_team(thread->team, thread);
2083 	}
2084 	sUsedThreads = args->num_cpus;
2085 
2086 	// start the undertaker thread
2087 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2088 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2089 
2090 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2091 		B_DISPLAY_PRIORITY, NULL);
2092 	if (undertakerThread < 0)
2093 		panic("Failed to create undertaker thread!");
2094 	resume_thread(undertakerThread);
2095 
2096 	// set up some debugger commands
2097 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2098 		"[ <team> ]\n"
2099 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2100 		"all threads of the specified team.\n"
2101 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2102 	add_debugger_command_etc("ready", &dump_thread_list,
2103 		"List all ready threads",
2104 		"\n"
2105 		"Prints a list of all threads in ready state.\n", 0);
2106 	add_debugger_command_etc("running", &dump_thread_list,
2107 		"List all running threads",
2108 		"\n"
2109 		"Prints a list of all threads in running state.\n", 0);
2110 	add_debugger_command_etc("waiting", &dump_thread_list,
2111 		"List all waiting threads (optionally for a specific semaphore)",
2112 		"[ <sem> ]\n"
2113 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2114 		"specified, only the threads waiting on that semaphore are listed.\n"
2115 		"  <sem>  - ID of the semaphore.\n", 0);
2116 	add_debugger_command_etc("realtime", &dump_thread_list,
2117 		"List all realtime threads",
2118 		"\n"
2119 		"Prints a list of all threads with realtime priority.\n", 0);
2120 	add_debugger_command_etc("thread", &dump_thread_info,
2121 		"Dump info about a particular thread",
2122 		"[ -s ] ( <id> | <address> | <name> )*\n"
2123 		"Prints information about the specified thread. If no argument is\n"
2124 		"given the current thread is selected.\n"
2125 		"  -s         - Print info in compact table form (like \"threads\").\n"
2126 		"  <id>       - The ID of the thread.\n"
2127 		"  <address>  - The address of the thread structure.\n"
2128 		"  <name>     - The thread's name.\n", 0);
2129 	add_debugger_command_etc("calling", &dump_thread_list,
2130 		"Show all threads that have a specific address in their call chain",
2131 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2132 	add_debugger_command_etc("unreal", &make_thread_unreal,
2133 		"Set realtime priority threads to normal priority",
2134 		"[ <id> ]\n"
2135 		"Sets the priority of all realtime threads or, if given, the one\n"
2136 		"with the specified ID to \"normal\" priority.\n"
2137 		"  <id>  - The ID of the thread.\n", 0);
2138 	add_debugger_command_etc("suspend", &make_thread_suspended,
2139 		"Suspend a thread",
2140 		"[ <id> ]\n"
2141 		"Suspends the thread with the given ID. If no ID argument is given\n"
2142 		"the current thread is selected.\n"
2143 		"  <id>  - The ID of the thread.\n", 0);
2144 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2145 		"<id>\n"
2146 		"Resumes the specified thread, if it is currently suspended.\n"
2147 		"  <id>  - The ID of the thread.\n", 0);
2148 	add_debugger_command_etc("drop", &drop_into_debugger,
2149 		"Drop a thread into the userland debugger",
2150 		"<id>\n"
2151 		"Drops the specified (userland) thread into the userland debugger\n"
2152 		"after leaving the kernel debugger.\n"
2153 		"  <id>  - The ID of the thread.\n", 0);
2154 	add_debugger_command_etc("priority", &set_thread_prio,
2155 		"Set a thread's priority",
2156 		"<priority> [ <id> ]\n"
2157 		"Sets the priority of the thread with the specified ID to the given\n"
2158 		"priority. If no thread ID is given, the current thread is selected.\n"
2159 		"  <priority>  - The thread's new priority (0 - 120)\n"
2160 		"  <id>        - The ID of the thread.\n", 0);
2161 
2162 	return B_OK;
2163 }
2164 
2165 
2166 status_t
2167 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2168 {
2169 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2170 	// so that get_current_cpu and friends will work, which is crucial for
2171 	// a lot of low level routines
2172 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2173 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2174 	return B_OK;
2175 }
2176 
2177 
2178 //	#pragma mark - thread blocking API
2179 
2180 
2181 static status_t
2182 thread_block_timeout(timer* timer)
2183 {
2184 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2185 	// we're holding the thread lock already. This makes things comfortably
2186 	// easy.
2187 
2188 	struct thread* thread = (struct thread*)timer->user_data;
2189 	if (thread_unblock_locked(thread, B_TIMED_OUT))
2190 		return B_INVOKE_SCHEDULER;
2191 
2192 	return B_HANDLED_INTERRUPT;
2193 }
2194 
2195 
2196 status_t
2197 thread_block()
2198 {
2199 	InterruptsSpinLocker _(gThreadSpinlock);
2200 	return thread_block_locked(thread_get_current_thread());
2201 }
2202 
2203 
2204 bool
2205 thread_unblock(status_t threadID, status_t status)
2206 {
2207 	InterruptsSpinLocker _(gThreadSpinlock);
2208 
2209 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2210 	if (thread == NULL)
2211 		return false;
2212 	return thread_unblock_locked(thread, status);
2213 }
2214 
2215 
2216 status_t
2217 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2218 {
2219 	InterruptsSpinLocker _(gThreadSpinlock);
2220 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2221 }
2222 
2223 
2224 status_t
2225 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2226 {
2227 	struct thread* thread = thread_get_current_thread();
2228 
2229 	if (thread->wait.status != 1)
2230 		return thread->wait.status;
2231 
2232 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2233 		&& timeout != B_INFINITE_TIMEOUT;
2234 
2235 	if (useTimer) {
2236 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2237 		// avoids nasty race conditions and deadlock problems that could
2238 		// otherwise occur between our cancel_timer() and a concurrently
2239 		// executing thread_block_timeout().
2240 		uint32 timerFlags;
2241 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2242 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2243 		} else {
2244 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2245 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2246 				timeout -= rtc_boot_time();
2247 		}
2248 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2249 
2250 		// install the timer
2251 		thread->wait.unblock_timer.user_data = thread;
2252 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2253 			timerFlags);
2254 	}
2255 
2256 	// block
2257 	status_t error = thread_block_locked(thread);
2258 
2259 	// cancel timer, if it didn't fire
2260 	if (error != B_TIMED_OUT && useTimer)
2261 		cancel_timer(&thread->wait.unblock_timer);
2262 
2263 	return error;
2264 }
2265 
2266 
2267 /*!	Thread spinlock must be held.
2268 */
2269 static status_t
2270 user_unblock_thread(thread_id threadID, status_t status)
2271 {
2272 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2273 	if (thread == NULL)
2274 		return B_BAD_THREAD_ID;
2275 	if (thread->user_thread == NULL)
2276 		return B_NOT_ALLOWED;
2277 
2278 	thread_unblock_locked(thread, status);
2279 
2280 	return B_OK;
2281 }
2282 
2283 
2284 //	#pragma mark - public kernel API
2285 
2286 
2287 void
2288 exit_thread(status_t returnValue)
2289 {
2290 	struct thread *thread = thread_get_current_thread();
2291 
2292 	thread->exit.status = returnValue;
2293 	thread->exit.reason = THREAD_RETURN_EXIT;
2294 
2295 	// if called from a kernel thread, we don't deliver the signal,
2296 	// we just exit directly to keep the user space behaviour of
2297 	// this function
2298 	if (thread->team != team_get_kernel_team())
2299 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2300 	else
2301 		thread_exit();
2302 }
2303 
2304 
2305 status_t
2306 kill_thread(thread_id id)
2307 {
2308 	if (id <= 0)
2309 		return B_BAD_VALUE;
2310 
2311 	return send_signal(id, SIGKILLTHR);
2312 }
2313 
2314 
2315 status_t
2316 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2317 {
2318 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2319 }
2320 
2321 
2322 int32
2323 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2324 {
2325 	return receive_data_etc(sender, buffer, bufferSize, 0);
2326 }
2327 
2328 
2329 bool
2330 has_data(thread_id thread)
2331 {
2332 	int32 count;
2333 
2334 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2335 			&count) != B_OK)
2336 		return false;
2337 
2338 	return count == 0 ? false : true;
2339 }
2340 
2341 
2342 status_t
2343 _get_thread_info(thread_id id, thread_info *info, size_t size)
2344 {
2345 	status_t status = B_OK;
2346 	struct thread *thread;
2347 	cpu_status state;
2348 
2349 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2350 		return B_BAD_VALUE;
2351 
2352 	state = disable_interrupts();
2353 	GRAB_THREAD_LOCK();
2354 
2355 	thread = thread_get_thread_struct_locked(id);
2356 	if (thread == NULL) {
2357 		status = B_BAD_VALUE;
2358 		goto err;
2359 	}
2360 
2361 	fill_thread_info(thread, info, size);
2362 
2363 err:
2364 	RELEASE_THREAD_LOCK();
2365 	restore_interrupts(state);
2366 
2367 	return status;
2368 }
2369 
2370 
2371 status_t
2372 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2373 	size_t size)
2374 {
2375 	status_t status = B_BAD_VALUE;
2376 	struct thread *thread = NULL;
2377 	cpu_status state;
2378 	int slot;
2379 	thread_id lastThreadID;
2380 
2381 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2382 		return B_BAD_VALUE;
2383 
2384 	if (team == B_CURRENT_TEAM)
2385 		team = team_get_current_team_id();
2386 	else if (!team_is_valid(team))
2387 		return B_BAD_VALUE;
2388 
2389 	slot = *_cookie;
2390 
2391 	state = disable_interrupts();
2392 	GRAB_THREAD_LOCK();
2393 
2394 	lastThreadID = peek_next_thread_id();
2395 	if (slot >= lastThreadID)
2396 		goto err;
2397 
2398 	while (slot < lastThreadID
2399 		&& (!(thread = thread_get_thread_struct_locked(slot))
2400 			|| thread->team->id != team))
2401 		slot++;
2402 
2403 	if (thread != NULL && thread->team->id == team) {
2404 		fill_thread_info(thread, info, size);
2405 
2406 		*_cookie = slot + 1;
2407 		status = B_OK;
2408 	}
2409 
2410 err:
2411 	RELEASE_THREAD_LOCK();
2412 	restore_interrupts(state);
2413 
2414 	return status;
2415 }
2416 
2417 
2418 thread_id
2419 find_thread(const char *name)
2420 {
2421 	struct hash_iterator iterator;
2422 	struct thread *thread;
2423 	cpu_status state;
2424 
2425 	if (name == NULL)
2426 		return thread_get_current_thread_id();
2427 
2428 	state = disable_interrupts();
2429 	GRAB_THREAD_LOCK();
2430 
2431 	// ToDo: this might not be in the same order as find_thread() in BeOS
2432 	//		which could be theoretically problematic.
2433 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2434 	//		cheap either - although this function is probably used very rarely.
2435 
2436 	hash_open(sThreadHash, &iterator);
2437 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2438 			!= NULL) {
2439 		// Search through hash
2440 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2441 			thread_id id = thread->id;
2442 
2443 			RELEASE_THREAD_LOCK();
2444 			restore_interrupts(state);
2445 			return id;
2446 		}
2447 	}
2448 
2449 	RELEASE_THREAD_LOCK();
2450 	restore_interrupts(state);
2451 
2452 	return B_NAME_NOT_FOUND;
2453 }
2454 
2455 
2456 status_t
2457 rename_thread(thread_id id, const char *name)
2458 {
2459 	struct thread *thread = thread_get_current_thread();
2460 	status_t status = B_BAD_THREAD_ID;
2461 	cpu_status state;
2462 
2463 	if (name == NULL)
2464 		return B_BAD_VALUE;
2465 
2466 	state = disable_interrupts();
2467 	GRAB_THREAD_LOCK();
2468 
2469 	if (thread->id != id)
2470 		thread = thread_get_thread_struct_locked(id);
2471 
2472 	if (thread != NULL) {
2473 		if (thread->team == thread_get_current_thread()->team) {
2474 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2475 			status = B_OK;
2476 		} else
2477 			status = B_NOT_ALLOWED;
2478 	}
2479 
2480 	RELEASE_THREAD_LOCK();
2481 	restore_interrupts(state);
2482 
2483 	return status;
2484 }
2485 
2486 
2487 status_t
2488 set_thread_priority(thread_id id, int32 priority)
2489 {
2490 	struct thread *thread;
2491 	int32 oldPriority;
2492 
2493 	// make sure the passed in priority is within bounds
2494 	if (priority > B_MAX_PRIORITY)
2495 		priority = B_MAX_PRIORITY;
2496 	if (priority < B_MIN_PRIORITY)
2497 		priority = B_MIN_PRIORITY;
2498 
2499 	thread = thread_get_current_thread();
2500 	if (thread->id == id) {
2501 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2502 		// our structure directly
2503 		oldPriority = thread->priority;
2504 			// note that this might not return the correct value if we are preempted
2505 			// here, and another thread changes our priority before the next line is
2506 			// executed
2507 		thread->priority = thread->next_priority = priority;
2508 	} else {
2509 		cpu_status state = disable_interrupts();
2510 		GRAB_THREAD_LOCK();
2511 
2512 		thread = thread_get_thread_struct_locked(id);
2513 		if (thread) {
2514 			oldPriority = thread->priority;
2515 			thread->next_priority = priority;
2516 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2517 				// if the thread is in the run queue, we reinsert it at a new position
2518 				scheduler_remove_from_run_queue(thread);
2519 				thread->priority = priority;
2520 				scheduler_enqueue_in_run_queue(thread);
2521 			} else
2522 				thread->priority = priority;
2523 		} else
2524 			oldPriority = B_BAD_THREAD_ID;
2525 
2526 		RELEASE_THREAD_LOCK();
2527 		restore_interrupts(state);
2528 	}
2529 
2530 	return oldPriority;
2531 }
2532 
2533 
2534 status_t
2535 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2536 {
2537 	status_t status;
2538 
2539 	if (timebase != B_SYSTEM_TIMEBASE)
2540 		return B_BAD_VALUE;
2541 
2542 	InterruptsSpinLocker _(gThreadSpinlock);
2543 	struct thread* thread = thread_get_current_thread();
2544 
2545 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2546 	status = thread_block_with_timeout_locked(flags, timeout);
2547 
2548 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2549 		return B_OK;
2550 
2551 	return status;
2552 }
2553 
2554 
2555 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2556 status_t
2557 snooze(bigtime_t timeout)
2558 {
2559 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2560 }
2561 
2562 
2563 /*!
2564 	snooze_until() for internal kernel use only; doesn't interrupt on
2565 	signals.
2566 */
2567 status_t
2568 snooze_until(bigtime_t timeout, int timebase)
2569 {
2570 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2571 }
2572 
2573 
2574 status_t
2575 wait_for_thread(thread_id thread, status_t *_returnCode)
2576 {
2577 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2578 }
2579 
2580 
2581 status_t
2582 suspend_thread(thread_id id)
2583 {
2584 	if (id <= 0)
2585 		return B_BAD_VALUE;
2586 
2587 	return send_signal(id, SIGSTOP);
2588 }
2589 
2590 
2591 status_t
2592 resume_thread(thread_id id)
2593 {
2594 	if (id <= 0)
2595 		return B_BAD_VALUE;
2596 
2597 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2598 		// This retains compatibility to BeOS which documents the
2599 		// combination of suspend_thread() and resume_thread() to
2600 		// interrupt threads waiting on semaphores.
2601 }
2602 
2603 
2604 thread_id
2605 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2606 	void *arg)
2607 {
2608 	thread_creation_attributes attributes;
2609 	attributes.entry = (thread_entry_func)function;
2610 	attributes.name = name;
2611 	attributes.priority = priority;
2612 	attributes.args1 = arg;
2613 	attributes.args2 = NULL;
2614 	attributes.stack_address = NULL;
2615 	attributes.stack_size = 0;
2616 	attributes.team = team_get_kernel_team()->id;
2617 	attributes.thread = -1;
2618 
2619 	return create_thread(attributes, true);
2620 }
2621 
2622 
2623 int
2624 getrlimit(int resource, struct rlimit * rlp)
2625 {
2626 	status_t error = common_getrlimit(resource, rlp);
2627 	if (error != B_OK) {
2628 		errno = error;
2629 		return -1;
2630 	}
2631 
2632 	return 0;
2633 }
2634 
2635 
2636 int
2637 setrlimit(int resource, const struct rlimit * rlp)
2638 {
2639 	status_t error = common_setrlimit(resource, rlp);
2640 	if (error != B_OK) {
2641 		errno = error;
2642 		return -1;
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 
2649 //	#pragma mark - syscalls
2650 
2651 
2652 void
2653 _user_exit_thread(status_t returnValue)
2654 {
2655 	exit_thread(returnValue);
2656 }
2657 
2658 
2659 status_t
2660 _user_kill_thread(thread_id thread)
2661 {
2662 	return kill_thread(thread);
2663 }
2664 
2665 
2666 status_t
2667 _user_resume_thread(thread_id thread)
2668 {
2669 	return resume_thread(thread);
2670 }
2671 
2672 
2673 status_t
2674 _user_suspend_thread(thread_id thread)
2675 {
2676 	return suspend_thread(thread);
2677 }
2678 
2679 
2680 status_t
2681 _user_rename_thread(thread_id thread, const char *userName)
2682 {
2683 	char name[B_OS_NAME_LENGTH];
2684 
2685 	if (!IS_USER_ADDRESS(userName)
2686 		|| userName == NULL
2687 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2688 		return B_BAD_ADDRESS;
2689 
2690 	return rename_thread(thread, name);
2691 }
2692 
2693 
2694 int32
2695 _user_set_thread_priority(thread_id thread, int32 newPriority)
2696 {
2697 	return set_thread_priority(thread, newPriority);
2698 }
2699 
2700 
2701 thread_id
2702 _user_spawn_thread(thread_creation_attributes* userAttributes)
2703 {
2704 	thread_creation_attributes attributes;
2705 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2706 		|| user_memcpy(&attributes, userAttributes,
2707 				sizeof(attributes)) != B_OK) {
2708 		return B_BAD_ADDRESS;
2709 	}
2710 
2711 	if (attributes.stack_size != 0
2712 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2713 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2714 		return B_BAD_VALUE;
2715 	}
2716 
2717 	char name[B_OS_NAME_LENGTH];
2718 	thread_id threadID;
2719 
2720 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2721 		|| attributes.stack_address != NULL
2722 			&& !IS_USER_ADDRESS(attributes.stack_address)
2723 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2724 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2725 		return B_BAD_ADDRESS;
2726 
2727 	attributes.name = attributes.name != NULL ? name : "user thread";
2728 	attributes.team = thread_get_current_thread()->team->id;
2729 	attributes.thread = -1;
2730 
2731 	threadID = create_thread(attributes, false);
2732 
2733 	if (threadID >= 0)
2734 		user_debug_thread_created(threadID);
2735 
2736 	return threadID;
2737 }
2738 
2739 
2740 status_t
2741 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2742 {
2743 	// NOTE: We only know the system timebase at the moment.
2744 	syscall_restart_handle_timeout_pre(flags, timeout);
2745 
2746 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2747 
2748 	return syscall_restart_handle_timeout_post(error, timeout);
2749 }
2750 
2751 
2752 void
2753 _user_thread_yield(void)
2754 {
2755 	thread_yield(true);
2756 }
2757 
2758 
2759 status_t
2760 _user_get_thread_info(thread_id id, thread_info *userInfo)
2761 {
2762 	thread_info info;
2763 	status_t status;
2764 
2765 	if (!IS_USER_ADDRESS(userInfo))
2766 		return B_BAD_ADDRESS;
2767 
2768 	status = _get_thread_info(id, &info, sizeof(thread_info));
2769 
2770 	if (status >= B_OK
2771 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2772 		return B_BAD_ADDRESS;
2773 
2774 	return status;
2775 }
2776 
2777 
2778 status_t
2779 _user_get_next_thread_info(team_id team, int32 *userCookie,
2780 	thread_info *userInfo)
2781 {
2782 	status_t status;
2783 	thread_info info;
2784 	int32 cookie;
2785 
2786 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2787 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2788 		return B_BAD_ADDRESS;
2789 
2790 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2791 	if (status < B_OK)
2792 		return status;
2793 
2794 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2795 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2796 		return B_BAD_ADDRESS;
2797 
2798 	return status;
2799 }
2800 
2801 
2802 thread_id
2803 _user_find_thread(const char *userName)
2804 {
2805 	char name[B_OS_NAME_LENGTH];
2806 
2807 	if (userName == NULL)
2808 		return find_thread(NULL);
2809 
2810 	if (!IS_USER_ADDRESS(userName)
2811 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2812 		return B_BAD_ADDRESS;
2813 
2814 	return find_thread(name);
2815 }
2816 
2817 
2818 status_t
2819 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2820 {
2821 	status_t returnCode;
2822 	status_t status;
2823 
2824 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2825 		return B_BAD_ADDRESS;
2826 
2827 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2828 
2829 	if (status == B_OK && userReturnCode != NULL
2830 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2831 		return B_BAD_ADDRESS;
2832 	}
2833 
2834 	return syscall_restart_handle_post(status);
2835 }
2836 
2837 
2838 bool
2839 _user_has_data(thread_id thread)
2840 {
2841 	return has_data(thread);
2842 }
2843 
2844 
2845 status_t
2846 _user_send_data(thread_id thread, int32 code, const void *buffer,
2847 	size_t bufferSize)
2848 {
2849 	if (!IS_USER_ADDRESS(buffer))
2850 		return B_BAD_ADDRESS;
2851 
2852 	return send_data_etc(thread, code, buffer, bufferSize,
2853 		B_KILL_CAN_INTERRUPT);
2854 		// supports userland buffers
2855 }
2856 
2857 
2858 status_t
2859 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2860 {
2861 	thread_id sender;
2862 	status_t code;
2863 
2864 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2865 		|| !IS_USER_ADDRESS(buffer))
2866 		return B_BAD_ADDRESS;
2867 
2868 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2869 		// supports userland buffers
2870 
2871 	if (_userSender != NULL)
2872 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2873 			return B_BAD_ADDRESS;
2874 
2875 	return code;
2876 }
2877 
2878 
2879 status_t
2880 _user_block_thread(uint32 flags, bigtime_t timeout)
2881 {
2882 	syscall_restart_handle_timeout_pre(flags, timeout);
2883 	flags |= B_CAN_INTERRUPT;
2884 
2885 	struct thread* thread = thread_get_current_thread();
2886 
2887 	InterruptsSpinLocker locker(gThreadSpinlock);
2888 
2889 	// check, if already done
2890 	if (thread->user_thread->wait_status <= 0)
2891 		return thread->user_thread->wait_status;
2892 
2893 	// nope, so wait
2894 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2895 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2896 	thread->user_thread->wait_status = status;
2897 
2898 	return syscall_restart_handle_timeout_post(status, timeout);
2899 }
2900 
2901 
2902 status_t
2903 _user_unblock_thread(thread_id threadID, status_t status)
2904 {
2905 	InterruptsSpinLocker locker(gThreadSpinlock);
2906 	return user_unblock_thread(threadID, status);
2907 }
2908 
2909 
2910 status_t
2911 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2912 {
2913 	enum {
2914 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2915 	};
2916 
2917 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2918 		return B_BAD_ADDRESS;
2919 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2920 		return B_BAD_VALUE;
2921 
2922 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2923 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2924 		return B_BAD_ADDRESS;
2925 
2926 	for (uint32 i = 0; i < count; i++)
2927 		user_unblock_thread(threads[i], status);
2928 
2929 	return B_OK;
2930 }
2931 
2932 
2933 // TODO: the following two functions don't belong here
2934 
2935 
2936 int
2937 _user_getrlimit(int resource, struct rlimit *urlp)
2938 {
2939 	struct rlimit rl;
2940 	int ret;
2941 
2942 	if (urlp == NULL)
2943 		return EINVAL;
2944 
2945 	if (!IS_USER_ADDRESS(urlp))
2946 		return B_BAD_ADDRESS;
2947 
2948 	ret = common_getrlimit(resource, &rl);
2949 
2950 	if (ret == 0) {
2951 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2952 		if (ret < 0)
2953 			return ret;
2954 
2955 		return 0;
2956 	}
2957 
2958 	return ret;
2959 }
2960 
2961 
2962 int
2963 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2964 {
2965 	struct rlimit resourceLimit;
2966 
2967 	if (userResourceLimit == NULL)
2968 		return EINVAL;
2969 
2970 	if (!IS_USER_ADDRESS(userResourceLimit)
2971 		|| user_memcpy(&resourceLimit, userResourceLimit,
2972 			sizeof(struct rlimit)) < B_OK)
2973 		return B_BAD_ADDRESS;
2974 
2975 	return common_setrlimit(resource, &resourceLimit);
2976 }
2977