xref: /haiku/src/system/kernel/thread.cpp (revision 4f2fd49bdc6078128b1391191e4edac647044c3d)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <real_time_clock.h>
34 #include <smp.h>
35 #include <syscalls.h>
36 #include <syscall_restart.h>
37 #include <team.h>
38 #include <tls.h>
39 #include <user_runtime.h>
40 #include <user_thread.h>
41 #include <vfs.h>
42 #include <vm.h>
43 #include <vm_address_space.h>
44 #include <wait_for_objects.h>
45 
46 
47 //#define TRACE_THREAD
48 #ifdef TRACE_THREAD
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 
55 #define THREAD_MAX_MESSAGE_SIZE		65536
56 
57 
58 struct thread_key {
59 	thread_id id;
60 };
61 
62 // global
63 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
64 
65 // thread list
66 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
67 static hash_table *sThreadHash = NULL;
68 static thread_id sNextThreadID = 1;
69 
70 // some arbitrary chosen limits - should probably depend on the available
71 // memory (the limit is not yet enforced)
72 static int32 sMaxThreads = 4096;
73 static int32 sUsedThreads = 0;
74 
75 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
76 	struct thread*	thread;
77 	team_id			teamID;
78 	sem_id			deathSem;
79 
80 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
81 		:
82 		thread(thread),
83 		teamID(teamID),
84 		deathSem(deathSem)
85 	{
86 	}
87 };
88 
89 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
90 static ConditionVariable sUndertakerCondition;
91 
92 // The dead queue is used as a pool from which to retrieve and reuse previously
93 // allocated thread structs when creating a new thread. It should be gone once
94 // the slab allocator is in.
95 static struct thread_queue dead_q;
96 
97 static void thread_kthread_entry(void);
98 static void thread_kthread_exit(void);
99 
100 
101 /*!
102 	Inserts a thread into a team.
103 	You must hold the team lock when you call this function.
104 */
105 static void
106 insert_thread_into_team(struct team *team, struct thread *thread)
107 {
108 	thread->team_next = team->thread_list;
109 	team->thread_list = thread;
110 	team->num_threads++;
111 
112 	if (team->num_threads == 1) {
113 		// this was the first thread
114 		team->main_thread = thread;
115 	}
116 	thread->team = team;
117 }
118 
119 
120 /*!
121 	Removes a thread from a team.
122 	You must hold the team lock when you call this function.
123 */
124 static void
125 remove_thread_from_team(struct team *team, struct thread *thread)
126 {
127 	struct thread *temp, *last = NULL;
128 
129 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
130 		if (temp == thread) {
131 			if (last == NULL)
132 				team->thread_list = temp->team_next;
133 			else
134 				last->team_next = temp->team_next;
135 
136 			team->num_threads--;
137 			break;
138 		}
139 		last = temp;
140 	}
141 }
142 
143 
144 static int
145 thread_struct_compare(void *_t, const void *_key)
146 {
147 	struct thread *thread = (struct thread*)_t;
148 	const struct thread_key *key = (const struct thread_key*)_key;
149 
150 	if (thread->id == key->id)
151 		return 0;
152 
153 	return 1;
154 }
155 
156 
157 static uint32
158 thread_struct_hash(void *_t, const void *_key, uint32 range)
159 {
160 	struct thread *thread = (struct thread*)_t;
161 	const struct thread_key *key = (const struct thread_key*)_key;
162 
163 	if (thread != NULL)
164 		return thread->id % range;
165 
166 	return (uint32)key->id % range;
167 }
168 
169 
170 static void
171 reset_signals(struct thread *thread)
172 {
173 	thread->sig_pending = 0;
174 	thread->sig_block_mask = 0;
175 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
176 	thread->signal_stack_base = 0;
177 	thread->signal_stack_size = 0;
178 	thread->signal_stack_enabled = false;
179 }
180 
181 
182 /*!
183 	Allocates and fills in thread structure (or reuses one from the
184 	dead queue).
185 
186 	\param threadID The ID to be assigned to the new thread. If
187 		  \code < 0 \endcode a fresh one is allocated.
188 	\param thread initialize this thread struct if nonnull
189 */
190 
191 static struct thread *
192 create_thread_struct(struct thread *inthread, const char *name,
193 	thread_id threadID, struct cpu_ent *cpu)
194 {
195 	struct thread *thread;
196 	cpu_status state;
197 	char temp[64];
198 
199 	if (inthread == NULL) {
200 		// try to recycle one from the dead queue first
201 		state = disable_interrupts();
202 		GRAB_THREAD_LOCK();
203 		thread = thread_dequeue(&dead_q);
204 		RELEASE_THREAD_LOCK();
205 		restore_interrupts(state);
206 
207 		// if not, create a new one
208 		if (thread == NULL) {
209 			thread = (struct thread *)malloc(sizeof(struct thread));
210 			if (thread == NULL)
211 				return NULL;
212 		}
213 	} else {
214 		thread = inthread;
215 	}
216 
217 	if (name != NULL)
218 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
219 	else
220 		strcpy(thread->name, "unnamed thread");
221 
222 	thread->flags = 0;
223 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
224 	thread->team = NULL;
225 	thread->cpu = cpu;
226 	thread->previous_cpu = NULL;
227 	thread->pinned_to_cpu = 0;
228 	thread->keep_scheduled = 0;
229 	thread->fault_handler = 0;
230 	thread->page_faults_allowed = 1;
231 	thread->kernel_stack_area = -1;
232 	thread->kernel_stack_base = 0;
233 	thread->user_stack_area = -1;
234 	thread->user_stack_base = 0;
235 	thread->user_local_storage = 0;
236 	thread->kernel_errno = 0;
237 	thread->team_next = NULL;
238 	thread->queue_next = NULL;
239 	thread->priority = thread->next_priority = -1;
240 	thread->io_priority = -1;
241 	thread->args1 = NULL;  thread->args2 = NULL;
242 	thread->alarm.period = 0;
243 	reset_signals(thread);
244 	thread->in_kernel = true;
245 	thread->was_yielded = false;
246 	thread->user_time = 0;
247 	thread->kernel_time = 0;
248 	thread->last_time = 0;
249 	thread->exit.status = 0;
250 	thread->exit.reason = 0;
251 	thread->exit.signal = 0;
252 	list_init(&thread->exit.waiters);
253 	thread->select_infos = NULL;
254 	thread->post_interrupt_callback = NULL;
255 	thread->post_interrupt_data = NULL;
256 
257 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
258 	thread->exit.sem = create_sem(0, temp);
259 	if (thread->exit.sem < B_OK)
260 		goto err1;
261 
262 	sprintf(temp, "%s send", thread->name);
263 	thread->msg.write_sem = create_sem(1, temp);
264 	if (thread->msg.write_sem < B_OK)
265 		goto err2;
266 
267 	sprintf(temp, "%s receive", thread->name);
268 	thread->msg.read_sem = create_sem(0, temp);
269 	if (thread->msg.read_sem < B_OK)
270 		goto err3;
271 
272 	if (arch_thread_init_thread_struct(thread) < B_OK)
273 		goto err4;
274 
275 	return thread;
276 
277 err4:
278 	delete_sem(thread->msg.read_sem);
279 err3:
280 	delete_sem(thread->msg.write_sem);
281 err2:
282 	delete_sem(thread->exit.sem);
283 err1:
284 	// ToDo: put them in the dead queue instead?
285 	if (inthread == NULL)
286 		free(thread);
287 	return NULL;
288 }
289 
290 
291 static void
292 delete_thread_struct(struct thread *thread)
293 {
294 	delete_sem(thread->exit.sem);
295 	delete_sem(thread->msg.write_sem);
296 	delete_sem(thread->msg.read_sem);
297 
298 	// ToDo: put them in the dead queue instead?
299 	free(thread);
300 }
301 
302 
303 /*! This function gets run by a new thread before anything else */
304 static void
305 thread_kthread_entry(void)
306 {
307 	struct thread *thread = thread_get_current_thread();
308 
309 	// The thread is new and has been scheduled the first time. Notify the user
310 	// debugger code.
311 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
312 		user_debug_thread_scheduled(thread);
313 
314 	// simulates the thread spinlock release that would occur if the thread had been
315 	// rescheded from. The resched didn't happen because the thread is new.
316 	RELEASE_THREAD_LOCK();
317 
318 	// start tracking time
319 	thread->last_time = system_time();
320 
321 	enable_interrupts(); // this essentially simulates a return-from-interrupt
322 }
323 
324 
325 static void
326 thread_kthread_exit(void)
327 {
328 	struct thread *thread = thread_get_current_thread();
329 
330 	thread->exit.reason = THREAD_RETURN_EXIT;
331 	thread_exit();
332 }
333 
334 
335 /*!
336 	Initializes the thread and jumps to its userspace entry point.
337 	This function is called at creation time of every user thread,
338 	but not for a team's main thread.
339 */
340 static int
341 _create_user_thread_kentry(void)
342 {
343 	struct thread *thread = thread_get_current_thread();
344 
345 	// jump to the entry point in user space
346 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
347 		thread->args1, thread->args2);
348 
349 	// only get here if the above call fails
350 	return 0;
351 }
352 
353 
354 /*! Initializes the thread and calls it kernel space entry point. */
355 static int
356 _create_kernel_thread_kentry(void)
357 {
358 	struct thread *thread = thread_get_current_thread();
359 	int (*func)(void *args) = (int (*)(void *))thread->entry;
360 
361 	// call the entry function with the appropriate args
362 	return func(thread->args1);
363 }
364 
365 
366 /*!
367 	Creates a new thread in the team with the specified team ID.
368 
369 	\param threadID The ID to be assigned to the new thread. If
370 		  \code < 0 \endcode a fresh one is allocated.
371 */
372 static thread_id
373 create_thread(thread_creation_attributes& attributes, bool kernel)
374 {
375 	struct thread *thread, *currentThread;
376 	struct team *team;
377 	cpu_status state;
378 	char stack_name[B_OS_NAME_LENGTH];
379 	status_t status;
380 	bool abort = false;
381 	bool debugNewThread = false;
382 
383 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
384 		attributes.thread, kernel ? "kernel" : "user"));
385 
386 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
387 		NULL);
388 	if (thread == NULL)
389 		return B_NO_MEMORY;
390 
391 	thread->priority = attributes.priority == -1
392 		? B_NORMAL_PRIORITY : attributes.priority;
393 	thread->next_priority = thread->priority;
394 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
395 	thread->state = B_THREAD_SUSPENDED;
396 	thread->next_state = B_THREAD_SUSPENDED;
397 
398 	// init debug structure
399 	init_thread_debug_info(&thread->debug_info);
400 
401 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
402 		thread->id);
403 	thread->kernel_stack_area = create_area(stack_name,
404 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
405 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
406 		B_FULL_LOCK,
407 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
408 
409 	if (thread->kernel_stack_area < 0) {
410 		// we're not yet part of a team, so we can just bail out
411 		status = thread->kernel_stack_area;
412 
413 		dprintf("create_thread: error creating kernel stack: %s!\n",
414 			strerror(status));
415 
416 		delete_thread_struct(thread);
417 		return status;
418 	}
419 
420 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
421 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
422 
423 	state = disable_interrupts();
424 	GRAB_THREAD_LOCK();
425 
426 	// If the new thread belongs to the same team as the current thread,
427 	// it may inherit some of the thread debug flags.
428 	currentThread = thread_get_current_thread();
429 	if (currentThread && currentThread->team->id == attributes.team) {
430 		// inherit all user flags...
431 		int32 debugFlags = currentThread->debug_info.flags
432 			& B_THREAD_DEBUG_USER_FLAG_MASK;
433 
434 		// ... save the syscall tracing flags, unless explicitely specified
435 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
436 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
437 				| B_THREAD_DEBUG_POST_SYSCALL);
438 		}
439 
440 		thread->debug_info.flags = debugFlags;
441 
442 		// stop the new thread, if desired
443 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
444 	}
445 
446 	// insert into global list
447 	hash_insert(sThreadHash, thread);
448 	sUsedThreads++;
449 	RELEASE_THREAD_LOCK();
450 
451 	GRAB_TEAM_LOCK();
452 	// look at the team, make sure it's not being deleted
453 	team = team_get_team_struct_locked(attributes.team);
454 
455 	if (team == NULL || team->state == TEAM_STATE_DEATH)
456 		abort = true;
457 
458 	if (!abort && !kernel) {
459 		thread->user_thread = team_allocate_user_thread(team);
460 		abort = thread->user_thread == NULL;
461 	}
462 
463 	if (!abort) {
464 		// Debug the new thread, if the parent thread required that (see above),
465 		// or the respective global team debug flag is set. But only, if a
466 		// debugger is installed for the team.
467 		debugNewThread |= (atomic_get(&team->debug_info.flags)
468 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
469 		if (debugNewThread
470 			&& (atomic_get(&team->debug_info.flags)
471 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
472 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
473 		}
474 
475 		insert_thread_into_team(team, thread);
476 	}
477 
478 	RELEASE_TEAM_LOCK();
479 	if (abort) {
480 		GRAB_THREAD_LOCK();
481 		hash_remove(sThreadHash, thread);
482 		RELEASE_THREAD_LOCK();
483 	}
484 	restore_interrupts(state);
485 	if (abort) {
486 		delete_area(thread->kernel_stack_area);
487 		delete_thread_struct(thread);
488 		return B_BAD_TEAM_ID;
489 	}
490 
491 	thread->args1 = attributes.args1;
492 	thread->args2 = attributes.args2;
493 	thread->entry = attributes.entry;
494 	status = thread->id;
495 
496 	if (kernel) {
497 		// this sets up an initial kthread stack that runs the entry
498 
499 		// Note: whatever function wants to set up a user stack later for this
500 		// thread must initialize the TLS for it
501 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
502 			&thread_kthread_entry, &thread_kthread_exit);
503 	} else {
504 		// create user stack
505 
506 		// the stack will be between USER_STACK_REGION and the main thread stack
507 		// area (the user stack of the main thread is created in
508 		// team_create_team())
509 		if (attributes.stack_address == NULL) {
510 			thread->user_stack_base = USER_STACK_REGION;
511 			if (attributes.stack_size <= 0)
512 				thread->user_stack_size = USER_STACK_SIZE;
513 			else
514 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
515 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
516 
517 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
518 				attributes.name, thread->id);
519 			thread->user_stack_area = create_area_etc(team->id, stack_name,
520 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
521 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
522 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
523 			if (thread->user_stack_area < B_OK
524 				|| arch_thread_init_tls(thread) < B_OK) {
525 				// great, we have a fully running thread without a (usable)
526 				// stack
527 				dprintf("create_thread: unable to create proper user stack!\n");
528 				status = thread->user_stack_area;
529 				kill_thread(thread->id);
530 			}
531 		} else {
532 			thread->user_stack_base = (addr_t)attributes.stack_address;
533 			thread->user_stack_size = attributes.stack_size;
534 		}
535 
536 		user_debug_update_new_thread_flags(thread->id);
537 
538 		// copy the user entry over to the args field in the thread struct
539 		// the function this will call will immediately switch the thread into
540 		// user space.
541 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
542 			&thread_kthread_entry, &thread_kthread_exit);
543 	}
544 
545 	return status;
546 }
547 
548 
549 static status_t
550 undertaker(void* /*args*/)
551 {
552 	while (true) {
553 		// wait for a thread to bury
554 		InterruptsSpinLocker locker(gThreadSpinlock);
555 
556 		while (sUndertakerEntries.IsEmpty()) {
557 			ConditionVariableEntry conditionEntry;
558 			sUndertakerCondition.Add(&conditionEntry);
559 			locker.Unlock();
560 
561 			conditionEntry.Wait();
562 
563 			locker.Lock();
564 		}
565 
566 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
567 		locker.Unlock();
568 
569 		UndertakerEntry entry = *_entry;
570 			// we need a copy, since the original entry is on the thread's stack
571 
572 		// we've got an entry
573 		struct thread* thread = entry.thread;
574 
575 		// delete the old kernel stack area
576 		delete_area(thread->kernel_stack_area);
577 
578 		// remove this thread from all of the global lists
579 		disable_interrupts();
580 		GRAB_TEAM_LOCK();
581 
582 		remove_thread_from_team(team_get_kernel_team(), thread);
583 
584 		RELEASE_TEAM_LOCK();
585 		enable_interrupts();
586 			// needed for the debugger notification below
587 
588 		if (entry.deathSem >= 0)
589 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
590 
591 		// free the thread structure
592 		locker.Lock();
593 		thread_enqueue(thread, &dead_q);
594 			// TODO: Use the slab allocator!
595 	}
596 }
597 
598 
599 static sem_id
600 get_thread_wait_sem(struct thread* thread)
601 {
602 	if (thread->state == B_THREAD_WAITING
603 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
604 		return (sem_id)(addr_t)thread->wait.object;
605 	}
606 	return -1;
607 }
608 
609 
610 /*!
611 	Fills the thread_info structure with information from the specified
612 	thread.
613 	The thread lock must be held when called.
614 */
615 static void
616 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
617 {
618 	info->thread = thread->id;
619 	info->team = thread->team->id;
620 
621 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
622 
623 	if (thread->state == B_THREAD_WAITING) {
624 		info->state = B_THREAD_WAITING;
625 
626 		switch (thread->wait.type) {
627 			case THREAD_BLOCK_TYPE_SNOOZE:
628 				info->state = B_THREAD_ASLEEP;
629 				break;
630 
631 			case THREAD_BLOCK_TYPE_SEMAPHORE:
632 			{
633 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
634 				if (sem == thread->msg.read_sem)
635 					info->state = B_THREAD_RECEIVING;
636 				break;
637 			}
638 
639 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
640 			default:
641 				break;
642 		}
643 	} else
644 		info->state = (thread_state)thread->state;
645 
646 	info->priority = thread->priority;
647 	info->user_time = thread->user_time;
648 	info->kernel_time = thread->kernel_time;
649 	info->stack_base = (void *)thread->user_stack_base;
650 	info->stack_end = (void *)(thread->user_stack_base
651 		+ thread->user_stack_size);
652 	info->sem = get_thread_wait_sem(thread);
653 }
654 
655 static status_t
656 send_data_etc(thread_id id, int32 code, const void *buffer,
657 	size_t bufferSize, int32 flags)
658 {
659 	struct thread *target;
660 	sem_id cachedSem;
661 	cpu_status state;
662 	status_t status;
663 	cbuf *data;
664 
665 	state = disable_interrupts();
666 	GRAB_THREAD_LOCK();
667 	target = thread_get_thread_struct_locked(id);
668 	if (!target) {
669 		RELEASE_THREAD_LOCK();
670 		restore_interrupts(state);
671 		return B_BAD_THREAD_ID;
672 	}
673 	cachedSem = target->msg.write_sem;
674 	RELEASE_THREAD_LOCK();
675 	restore_interrupts(state);
676 
677 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
678 		return B_NO_MEMORY;
679 
680 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
681 	if (status == B_INTERRUPTED) {
682 		// We got interrupted by a signal
683 		return status;
684 	}
685 	if (status != B_OK) {
686 		// Any other acquisition problems may be due to thread deletion
687 		return B_BAD_THREAD_ID;
688 	}
689 
690 	if (bufferSize > 0) {
691 		data = cbuf_get_chain(bufferSize);
692 		if (data == NULL)
693 			return B_NO_MEMORY;
694 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
695 		if (status < B_OK) {
696 			cbuf_free_chain(data);
697 			return B_NO_MEMORY;
698 		}
699 	} else
700 		data = NULL;
701 
702 	state = disable_interrupts();
703 	GRAB_THREAD_LOCK();
704 
705 	// The target thread could have been deleted at this point
706 	target = thread_get_thread_struct_locked(id);
707 	if (target == NULL) {
708 		RELEASE_THREAD_LOCK();
709 		restore_interrupts(state);
710 		cbuf_free_chain(data);
711 		return B_BAD_THREAD_ID;
712 	}
713 
714 	// Save message informations
715 	target->msg.sender = thread_get_current_thread()->id;
716 	target->msg.code = code;
717 	target->msg.size = bufferSize;
718 	target->msg.buffer = data;
719 	cachedSem = target->msg.read_sem;
720 
721 	RELEASE_THREAD_LOCK();
722 	restore_interrupts(state);
723 
724 	release_sem(cachedSem);
725 	return B_OK;
726 }
727 
728 
729 static int32
730 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
731 	int32 flags)
732 {
733 	struct thread *thread = thread_get_current_thread();
734 	status_t status;
735 	size_t size;
736 	int32 code;
737 
738 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
739 	if (status < B_OK) {
740 		// Actually, we're not supposed to return error codes
741 		// but since the only reason this can fail is that we
742 		// were killed, it's probably okay to do so (but also
743 		// meaningless).
744 		return status;
745 	}
746 
747 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
748 		size = min_c(bufferSize, thread->msg.size);
749 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
750 			0, size);
751 		if (status < B_OK) {
752 			cbuf_free_chain(thread->msg.buffer);
753 			release_sem(thread->msg.write_sem);
754 			return status;
755 		}
756 	}
757 
758 	*_sender = thread->msg.sender;
759 	code = thread->msg.code;
760 
761 	cbuf_free_chain(thread->msg.buffer);
762 	release_sem(thread->msg.write_sem);
763 
764 	return code;
765 }
766 
767 
768 static status_t
769 common_getrlimit(int resource, struct rlimit * rlp)
770 {
771 	if (!rlp)
772 		return B_BAD_ADDRESS;
773 
774 	switch (resource) {
775 		case RLIMIT_NOFILE:
776 		case RLIMIT_NOVMON:
777 			return vfs_getrlimit(resource, rlp);
778 
779 		case RLIMIT_CORE:
780 			rlp->rlim_cur = 0;
781 			rlp->rlim_max = 0;
782 			return B_OK;
783 
784 		case RLIMIT_STACK:
785 		{
786 			struct thread *thread = thread_get_current_thread();
787 			if (!thread)
788 				return B_ERROR;
789 			rlp->rlim_cur = thread->user_stack_size;
790 			rlp->rlim_max = thread->user_stack_size;
791 			return B_OK;
792 		}
793 
794 		default:
795 			return EINVAL;
796 	}
797 
798 	return B_OK;
799 }
800 
801 
802 static status_t
803 common_setrlimit(int resource, const struct rlimit * rlp)
804 {
805 	if (!rlp)
806 		return B_BAD_ADDRESS;
807 
808 	switch (resource) {
809 		case RLIMIT_NOFILE:
810 		case RLIMIT_NOVMON:
811 			return vfs_setrlimit(resource, rlp);
812 
813 		case RLIMIT_CORE:
814 			// We don't support core file, so allow settings to 0/0 only.
815 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
816 				return EINVAL;
817 			return B_OK;
818 
819 		default:
820 			return EINVAL;
821 	}
822 
823 	return B_OK;
824 }
825 
826 
827 //	#pragma mark - debugger calls
828 
829 
830 static int
831 make_thread_unreal(int argc, char **argv)
832 {
833 	struct thread *thread;
834 	struct hash_iterator i;
835 	int32 id = -1;
836 
837 	if (argc > 2) {
838 		print_debugger_command_usage(argv[0]);
839 		return 0;
840 	}
841 
842 	if (argc > 1)
843 		id = strtoul(argv[1], NULL, 0);
844 
845 	hash_open(sThreadHash, &i);
846 
847 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
848 		if (id != -1 && thread->id != id)
849 			continue;
850 
851 		if (thread->priority > B_DISPLAY_PRIORITY) {
852 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
853 			kprintf("thread %ld made unreal\n", thread->id);
854 		}
855 	}
856 
857 	hash_close(sThreadHash, &i, false);
858 	return 0;
859 }
860 
861 
862 static int
863 set_thread_prio(int argc, char **argv)
864 {
865 	struct thread *thread;
866 	struct hash_iterator i;
867 	int32 id;
868 	int32 prio;
869 
870 	if (argc > 3 || argc < 2) {
871 		print_debugger_command_usage(argv[0]);
872 		return 0;
873 	}
874 
875 	prio = strtoul(argv[1], NULL, 0);
876 	if (prio > THREAD_MAX_SET_PRIORITY)
877 		prio = THREAD_MAX_SET_PRIORITY;
878 	if (prio < THREAD_MIN_SET_PRIORITY)
879 		prio = THREAD_MIN_SET_PRIORITY;
880 
881 	if (argc > 2)
882 		id = strtoul(argv[2], NULL, 0);
883 	else
884 		id = thread_get_current_thread()->id;
885 
886 	hash_open(sThreadHash, &i);
887 
888 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
889 		if (thread->id != id)
890 			continue;
891 		thread->priority = thread->next_priority = prio;
892 		kprintf("thread %ld set to priority %ld\n", id, prio);
893 		break;
894 	}
895 	if (!thread)
896 		kprintf("thread %ld (%#lx) not found\n", id, id);
897 
898 	hash_close(sThreadHash, &i, false);
899 	return 0;
900 }
901 
902 
903 static int
904 make_thread_suspended(int argc, char **argv)
905 {
906 	struct thread *thread;
907 	struct hash_iterator i;
908 	int32 id;
909 
910 	if (argc > 2) {
911 		print_debugger_command_usage(argv[0]);
912 		return 0;
913 	}
914 
915 	if (argc == 1)
916 		id = thread_get_current_thread()->id;
917 	else
918 		id = strtoul(argv[1], NULL, 0);
919 
920 	hash_open(sThreadHash, &i);
921 
922 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
923 		if (thread->id != id)
924 			continue;
925 
926 		thread->next_state = B_THREAD_SUSPENDED;
927 		kprintf("thread %ld suspended\n", id);
928 		break;
929 	}
930 	if (!thread)
931 		kprintf("thread %ld (%#lx) not found\n", id, id);
932 
933 	hash_close(sThreadHash, &i, false);
934 	return 0;
935 }
936 
937 
938 static int
939 make_thread_resumed(int argc, char **argv)
940 {
941 	struct thread *thread;
942 	struct hash_iterator i;
943 	int32 id;
944 
945 	if (argc != 2) {
946 		print_debugger_command_usage(argv[0]);
947 		return 0;
948 	}
949 
950 	// force user to enter a thread id, as using
951 	// the current thread is usually not intended
952 	id = strtoul(argv[1], NULL, 0);
953 
954 	hash_open(sThreadHash, &i);
955 
956 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
957 		if (thread->id != id)
958 			continue;
959 
960 		if (thread->state == B_THREAD_SUSPENDED) {
961 			scheduler_enqueue_in_run_queue(thread);
962 			kprintf("thread %ld resumed\n", thread->id);
963 		}
964 		break;
965 	}
966 	if (!thread)
967 		kprintf("thread %ld (%#lx) not found\n", id, id);
968 
969 	hash_close(sThreadHash, &i, false);
970 	return 0;
971 }
972 
973 
974 static int
975 drop_into_debugger(int argc, char **argv)
976 {
977 	status_t err;
978 	int32 id;
979 
980 	if (argc > 2) {
981 		print_debugger_command_usage(argv[0]);
982 		return 0;
983 	}
984 
985 	if (argc == 1)
986 		id = thread_get_current_thread()->id;
987 	else
988 		id = strtoul(argv[1], NULL, 0);
989 
990 	err = _user_debug_thread(id);
991 	if (err)
992 		kprintf("drop failed\n");
993 	else
994 		kprintf("thread %ld dropped into user debugger\n", id);
995 
996 	return 0;
997 }
998 
999 
1000 static const char *
1001 state_to_text(struct thread *thread, int32 state)
1002 {
1003 	switch (state) {
1004 		case B_THREAD_READY:
1005 			return "ready";
1006 
1007 		case B_THREAD_RUNNING:
1008 			return "running";
1009 
1010 		case B_THREAD_WAITING:
1011 		{
1012 			if (thread != NULL) {
1013 				switch (thread->wait.type) {
1014 					case THREAD_BLOCK_TYPE_SNOOZE:
1015 						return "zzz";
1016 
1017 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1018 					{
1019 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1020 						if (sem == thread->msg.read_sem)
1021 							return "receive";
1022 						break;
1023 					}
1024 				}
1025 			}
1026 
1027 			return "waiting";
1028 		}
1029 
1030 		case B_THREAD_SUSPENDED:
1031 			return "suspended";
1032 
1033 		case THREAD_STATE_FREE_ON_RESCHED:
1034 			return "death";
1035 
1036 		default:
1037 			return "UNKNOWN";
1038 	}
1039 }
1040 
1041 
1042 static void
1043 print_thread_list_table_head()
1044 {
1045 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1046 		"  team  name\n");
1047 }
1048 
1049 
1050 static void
1051 _dump_thread_info(struct thread *thread, bool shortInfo)
1052 {
1053 	if (shortInfo) {
1054 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1055 			thread->state));
1056 
1057 		// does it block on a semaphore or a condition variable?
1058 		if (thread->state == B_THREAD_WAITING) {
1059 			switch (thread->wait.type) {
1060 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1061 				{
1062 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1063 					if (sem == thread->msg.read_sem)
1064 						kprintf("                    ");
1065 					else
1066 						kprintf("sem  %12ld   ", sem);
1067 					break;
1068 				}
1069 
1070 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1071 					kprintf("cvar   %p   ", thread->wait.object);
1072 					break;
1073 
1074 				case THREAD_BLOCK_TYPE_SNOOZE:
1075 					kprintf("                    ");
1076 					break;
1077 
1078 				case THREAD_BLOCK_TYPE_SIGNAL:
1079 					kprintf("signal              ");
1080 					break;
1081 
1082 				case THREAD_BLOCK_TYPE_MUTEX:
1083 					kprintf("mutex  %p   ", thread->wait.object);
1084 					break;
1085 
1086 				case THREAD_BLOCK_TYPE_RW_LOCK:
1087 					kprintf("rwlock %p   ", thread->wait.object);
1088 					break;
1089 
1090 				case THREAD_BLOCK_TYPE_OTHER:
1091 					kprintf("other               ");
1092 					break;
1093 
1094 				default:
1095 					kprintf("???    %p   ", thread->wait.object);
1096 					break;
1097 			}
1098 		} else
1099 			kprintf("        -           ");
1100 
1101 		// on which CPU does it run?
1102 		if (thread->cpu)
1103 			kprintf("%2d", thread->cpu->cpu_num);
1104 		else
1105 			kprintf(" -");
1106 
1107 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1108 			(void *)thread->kernel_stack_base, thread->team->id,
1109 			thread->name != NULL ? thread->name : "<NULL>");
1110 
1111 		return;
1112 	}
1113 
1114 	// print the long info
1115 
1116 	struct death_entry *death = NULL;
1117 
1118 	kprintf("THREAD: %p\n", thread);
1119 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1120 	kprintf("name:               \"%s\"\n", thread->name);
1121 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1122 		thread->all_next, thread->team_next, thread->queue_next);
1123 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1124 		thread->next_priority, thread->io_priority);
1125 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1126 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1127 	kprintf("cpu:                %p ", thread->cpu);
1128 	if (thread->cpu)
1129 		kprintf("(%d)\n", thread->cpu->cpu_num);
1130 	else
1131 		kprintf("\n");
1132 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1133 		thread->sig_block_mask);
1134 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1135 
1136 	if (thread->state == B_THREAD_WAITING) {
1137 		kprintf("waiting for:        ");
1138 
1139 		switch (thread->wait.type) {
1140 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1141 			{
1142 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1143 				if (sem == thread->msg.read_sem)
1144 					kprintf("data\n");
1145 				else
1146 					kprintf("semaphore %ld\n", sem);
1147 				break;
1148 			}
1149 
1150 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1151 				kprintf("condition variable %p\n", thread->wait.object);
1152 				break;
1153 
1154 			case THREAD_BLOCK_TYPE_SNOOZE:
1155 				kprintf("snooze()\n");
1156 				break;
1157 
1158 			case THREAD_BLOCK_TYPE_SIGNAL:
1159 				kprintf("signal\n");
1160 				break;
1161 
1162 			case THREAD_BLOCK_TYPE_MUTEX:
1163 				kprintf("mutex %p\n", thread->wait.object);
1164 				break;
1165 
1166 			case THREAD_BLOCK_TYPE_RW_LOCK:
1167 				kprintf("rwlock %p\n", thread->wait.object);
1168 				break;
1169 
1170 			case THREAD_BLOCK_TYPE_OTHER:
1171 				kprintf("other (%s)\n", (char*)thread->wait.object);
1172 				break;
1173 
1174 			default:
1175 				kprintf("unknown (%p)\n", thread->wait.object);
1176 				break;
1177 		}
1178 	}
1179 
1180 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1181 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1182 	kprintf("entry:              %p\n", (void *)thread->entry);
1183 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1184 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1185 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1186 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1187 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1188 	kprintf("  exit.waiters:\n");
1189 	while ((death = (struct death_entry*)list_get_next_item(
1190 			&thread->exit.waiters, death)) != NULL) {
1191 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1192 	}
1193 
1194 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1195 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1196 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1197 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1198 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1199 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1200 		strerror(thread->kernel_errno));
1201 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1202 	kprintf("user_time:          %Ld\n", thread->user_time);
1203 	kprintf("flags:              0x%lx\n", thread->flags);
1204 	kprintf("architecture dependant section:\n");
1205 	arch_thread_dump_info(&thread->arch_info);
1206 }
1207 
1208 
1209 static int
1210 dump_thread_info(int argc, char **argv)
1211 {
1212 	bool shortInfo = false;
1213 	int argi = 1;
1214 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1215 		shortInfo = true;
1216 		print_thread_list_table_head();
1217 		argi++;
1218 	}
1219 
1220 	if (argi == argc) {
1221 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1222 		return 0;
1223 	}
1224 
1225 	for (; argi < argc; argi++) {
1226 		const char *name = argv[argi];
1227 		int32 id = strtoul(name, NULL, 0);
1228 
1229 		if (IS_KERNEL_ADDRESS(id)) {
1230 			// semi-hack
1231 			_dump_thread_info((struct thread *)id, shortInfo);
1232 			continue;
1233 		}
1234 
1235 		// walk through the thread list, trying to match name or id
1236 		bool found = false;
1237 		struct hash_iterator i;
1238 		hash_open(sThreadHash, &i);
1239 		struct thread *thread;
1240 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1241 			if (!strcmp(name, thread->name) || thread->id == id) {
1242 				_dump_thread_info(thread, shortInfo);
1243 				found = true;
1244 				break;
1245 			}
1246 		}
1247 		hash_close(sThreadHash, &i, false);
1248 
1249 		if (!found)
1250 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 
1257 static int
1258 dump_thread_list(int argc, char **argv)
1259 {
1260 	struct thread *thread;
1261 	struct hash_iterator i;
1262 	bool realTimeOnly = false;
1263 	bool calling = false;
1264 	const char *callSymbol = NULL;
1265 	addr_t callStart = 0;
1266 	addr_t callEnd = 0;
1267 	int32 requiredState = 0;
1268 	team_id team = -1;
1269 	sem_id sem = -1;
1270 
1271 	if (!strcmp(argv[0], "realtime"))
1272 		realTimeOnly = true;
1273 	else if (!strcmp(argv[0], "ready"))
1274 		requiredState = B_THREAD_READY;
1275 	else if (!strcmp(argv[0], "running"))
1276 		requiredState = B_THREAD_RUNNING;
1277 	else if (!strcmp(argv[0], "waiting")) {
1278 		requiredState = B_THREAD_WAITING;
1279 
1280 		if (argc > 1) {
1281 			sem = strtoul(argv[1], NULL, 0);
1282 			if (sem == 0)
1283 				kprintf("ignoring invalid semaphore argument.\n");
1284 		}
1285 	} else if (!strcmp(argv[0], "calling")) {
1286 		if (argc < 2) {
1287 			kprintf("Need to give a symbol name or start and end arguments.\n");
1288 			return 0;
1289 		} else if (argc == 3) {
1290 			callStart = parse_expression(argv[1]);
1291 			callEnd = parse_expression(argv[2]);
1292 		} else
1293 			callSymbol = argv[1];
1294 
1295 		calling = true;
1296 	} else if (argc > 1) {
1297 		team = strtoul(argv[1], NULL, 0);
1298 		if (team == 0)
1299 			kprintf("ignoring invalid team argument.\n");
1300 	}
1301 
1302 	print_thread_list_table_head();
1303 
1304 	hash_open(sThreadHash, &i);
1305 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1306 		// filter out threads not matching the search criteria
1307 		if ((requiredState && thread->state != requiredState)
1308 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1309 					callStart, callEnd))
1310 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1311 			|| (team > 0 && thread->team->id != team)
1312 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1313 			continue;
1314 
1315 		_dump_thread_info(thread, true);
1316 	}
1317 	hash_close(sThreadHash, &i, false);
1318 	return 0;
1319 }
1320 
1321 
1322 //	#pragma mark - private kernel API
1323 
1324 
1325 void
1326 thread_exit(void)
1327 {
1328 	cpu_status state;
1329 	struct thread *thread = thread_get_current_thread();
1330 	struct team *team = thread->team;
1331 	thread_id parentID = -1;
1332 	bool deleteTeam = false;
1333 	sem_id cachedDeathSem = -1;
1334 	status_t status;
1335 	struct thread_debug_info debugInfo;
1336 	team_id teamID = team->id;
1337 
1338 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1339 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1340 			? "due to signal" : "normally", thread->exit.status));
1341 
1342 	if (!are_interrupts_enabled())
1343 		panic("thread_exit() called with interrupts disabled!\n");
1344 
1345 	// boost our priority to get this over with
1346 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1347 
1348 	// Cancel previously installed alarm timer, if any
1349 	cancel_timer(&thread->alarm);
1350 
1351 	// delete the user stack area first, we won't need it anymore
1352 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1353 		area_id area = thread->user_stack_area;
1354 		thread->user_stack_area = -1;
1355 		vm_delete_area(team->id, area, true);
1356 	}
1357 
1358 	struct job_control_entry *death = NULL;
1359 	struct death_entry* threadDeathEntry = NULL;
1360 
1361 	if (team != team_get_kernel_team()) {
1362 		user_debug_thread_exiting(thread);
1363 
1364 		if (team->main_thread == thread) {
1365 			// this was the main thread in this team, so we will delete that as well
1366 			deleteTeam = true;
1367 		} else {
1368 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1369 			team_free_user_thread(thread);
1370 		}
1371 
1372 		// remove this thread from the current team and add it to the kernel
1373 		// put the thread into the kernel team until it dies
1374 		state = disable_interrupts();
1375 		GRAB_TEAM_LOCK();
1376 		GRAB_THREAD_LOCK();
1377 			// removing the thread and putting its death entry to the parent
1378 			// team needs to be an atomic operation
1379 
1380 		// remember how long this thread lasted
1381 		team->dead_threads_kernel_time += thread->kernel_time;
1382 		team->dead_threads_user_time += thread->user_time;
1383 
1384 		remove_thread_from_team(team, thread);
1385 		insert_thread_into_team(team_get_kernel_team(), thread);
1386 
1387 		cachedDeathSem = team->death_sem;
1388 
1389 		if (deleteTeam) {
1390 			struct team *parent = team->parent;
1391 
1392 			// remember who our parent was so we can send a signal
1393 			parentID = parent->id;
1394 
1395 			// Set the team job control state to "dead" and detach the job
1396 			// control entry from our team struct.
1397 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1398 			death = team->job_control_entry;
1399 			team->job_control_entry = NULL;
1400 
1401 			if (death != NULL) {
1402 				death->InitDeadState();
1403 
1404 				// team_set_job_control_state() already moved our entry
1405 				// into the parent's list. We just check the soft limit of
1406 				// death entries.
1407 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1408 					death = parent->dead_children->entries.RemoveHead();
1409 					parent->dead_children->count--;
1410 				} else
1411 					death = NULL;
1412 
1413 				RELEASE_THREAD_LOCK();
1414 			} else
1415 				RELEASE_THREAD_LOCK();
1416 
1417 			team_remove_team(team);
1418 
1419 			send_signal_etc(parentID, SIGCHLD,
1420 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1421 		} else {
1422 			// The thread is not the main thread. We store a thread death
1423 			// entry for it, unless someone is already waiting it.
1424 			if (threadDeathEntry != NULL
1425 				&& list_is_empty(&thread->exit.waiters)) {
1426 				threadDeathEntry->thread = thread->id;
1427 				threadDeathEntry->status = thread->exit.status;
1428 				threadDeathEntry->reason = thread->exit.reason;
1429 				threadDeathEntry->signal = thread->exit.signal;
1430 
1431 				// add entry -- remove and old one, if we hit the limit
1432 				list_add_item(&team->dead_threads, threadDeathEntry);
1433 				team->dead_threads_count++;
1434 				threadDeathEntry = NULL;
1435 
1436 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1437 					threadDeathEntry = (death_entry*)list_remove_head_item(
1438 						&team->dead_threads);
1439 					team->dead_threads_count--;
1440 				}
1441 			}
1442 
1443 			RELEASE_THREAD_LOCK();
1444 		}
1445 
1446 		RELEASE_TEAM_LOCK();
1447 
1448 		// swap address spaces, to make sure we're running on the kernel's pgdir
1449 		vm_swap_address_space(team->address_space, vm_kernel_address_space());
1450 		restore_interrupts(state);
1451 
1452 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1453 	}
1454 
1455 	if (threadDeathEntry != NULL)
1456 		free(threadDeathEntry);
1457 
1458 	// delete the team if we're its main thread
1459 	if (deleteTeam) {
1460 		team_delete_team(team);
1461 
1462 		// we need to delete any death entry that made it to here
1463 		if (death != NULL)
1464 			delete death;
1465 
1466 		cachedDeathSem = -1;
1467 	}
1468 
1469 	state = disable_interrupts();
1470 	GRAB_THREAD_LOCK();
1471 
1472 	// remove thread from hash, so it's no longer accessible
1473 	hash_remove(sThreadHash, thread);
1474 	sUsedThreads--;
1475 
1476 	// Stop debugging for this thread
1477 	debugInfo = thread->debug_info;
1478 	clear_thread_debug_info(&thread->debug_info, true);
1479 
1480 	// Remove the select infos. We notify them a little later.
1481 	select_info* selectInfos = thread->select_infos;
1482 	thread->select_infos = NULL;
1483 
1484 	RELEASE_THREAD_LOCK();
1485 	restore_interrupts(state);
1486 
1487 	destroy_thread_debug_info(&debugInfo);
1488 
1489 	// notify select infos
1490 	select_info* info = selectInfos;
1491 	while (info != NULL) {
1492 		select_sync* sync = info->sync;
1493 
1494 		notify_select_events(info, B_EVENT_INVALID);
1495 		info = info->next;
1496 		put_select_sync(sync);
1497 	}
1498 
1499 	// shutdown the thread messaging
1500 
1501 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1502 	if (status == B_WOULD_BLOCK) {
1503 		// there is data waiting for us, so let us eat it
1504 		thread_id sender;
1505 
1506 		delete_sem(thread->msg.write_sem);
1507 			// first, let's remove all possibly waiting writers
1508 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1509 	} else {
1510 		// we probably own the semaphore here, and we're the last to do so
1511 		delete_sem(thread->msg.write_sem);
1512 	}
1513 	// now we can safely remove the msg.read_sem
1514 	delete_sem(thread->msg.read_sem);
1515 
1516 	// fill all death entries and delete the sem that others will use to wait on us
1517 	{
1518 		sem_id cachedExitSem = thread->exit.sem;
1519 		cpu_status state;
1520 
1521 		state = disable_interrupts();
1522 		GRAB_THREAD_LOCK();
1523 
1524 		// make sure no one will grab this semaphore again
1525 		thread->exit.sem = -1;
1526 
1527 		// fill all death entries
1528 		death_entry* entry = NULL;
1529 		while ((entry = (struct death_entry*)list_get_next_item(
1530 				&thread->exit.waiters, entry)) != NULL) {
1531 			entry->status = thread->exit.status;
1532 			entry->reason = thread->exit.reason;
1533 			entry->signal = thread->exit.signal;
1534 		}
1535 
1536 		RELEASE_THREAD_LOCK();
1537 		restore_interrupts(state);
1538 
1539 		delete_sem(cachedExitSem);
1540 	}
1541 
1542 	// notify the debugger
1543 	if (teamID != team_get_kernel_team_id())
1544 		user_debug_thread_deleted(teamID, thread->id);
1545 
1546 	// enqueue in the undertaker list and reschedule for the last time
1547 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1548 
1549 	disable_interrupts();
1550 	GRAB_THREAD_LOCK();
1551 
1552 	sUndertakerEntries.Add(&undertakerEntry);
1553 	sUndertakerCondition.NotifyOne(true);
1554 
1555 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1556 	scheduler_reschedule();
1557 
1558 	panic("never can get here\n");
1559 }
1560 
1561 
1562 struct thread *
1563 thread_get_thread_struct(thread_id id)
1564 {
1565 	struct thread *thread;
1566 	cpu_status state;
1567 
1568 	state = disable_interrupts();
1569 	GRAB_THREAD_LOCK();
1570 
1571 	thread = thread_get_thread_struct_locked(id);
1572 
1573 	RELEASE_THREAD_LOCK();
1574 	restore_interrupts(state);
1575 
1576 	return thread;
1577 }
1578 
1579 
1580 struct thread *
1581 thread_get_thread_struct_locked(thread_id id)
1582 {
1583 	struct thread_key key;
1584 
1585 	key.id = id;
1586 
1587 	return (struct thread*)hash_lookup(sThreadHash, &key);
1588 }
1589 
1590 
1591 /*!
1592 	Called in the interrupt handler code when a thread enters
1593 	the kernel for any reason.
1594 	Only tracks time for now.
1595 	Interrupts are disabled.
1596 */
1597 void
1598 thread_at_kernel_entry(bigtime_t now)
1599 {
1600 	struct thread *thread = thread_get_current_thread();
1601 
1602 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1603 
1604 	// track user time
1605 	thread->user_time += now - thread->last_time;
1606 	thread->last_time = now;
1607 
1608 	thread->in_kernel = true;
1609 }
1610 
1611 
1612 /*!
1613 	Called whenever a thread exits kernel space to user space.
1614 	Tracks time, handles signals, ...
1615 	Interrupts must be enabled. When the function returns, interrupts will be
1616 	disabled.
1617 */
1618 void
1619 thread_at_kernel_exit(void)
1620 {
1621 	struct thread *thread = thread_get_current_thread();
1622 
1623 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1624 
1625 	while (handle_signals(thread)) {
1626 		InterruptsSpinLocker _(gThreadSpinlock);
1627 		scheduler_reschedule();
1628 	}
1629 
1630 	disable_interrupts();
1631 
1632 	thread->in_kernel = false;
1633 
1634 	// track kernel time
1635 	bigtime_t now = system_time();
1636 	thread->kernel_time += now - thread->last_time;
1637 	thread->last_time = now;
1638 }
1639 
1640 
1641 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1642 	and no debugging shall be done.
1643 	Interrupts must be disabled.
1644 */
1645 void
1646 thread_at_kernel_exit_no_signals(void)
1647 {
1648 	struct thread *thread = thread_get_current_thread();
1649 
1650 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1651 
1652 	thread->in_kernel = false;
1653 
1654 	// track kernel time
1655 	bigtime_t now = system_time();
1656 	thread->kernel_time += now - thread->last_time;
1657 	thread->last_time = now;
1658 }
1659 
1660 
1661 void
1662 thread_reset_for_exec(void)
1663 {
1664 	struct thread *thread = thread_get_current_thread();
1665 
1666 	cancel_timer(&thread->alarm);
1667 	reset_signals(thread);
1668 }
1669 
1670 
1671 /*! Insert a thread to the tail of a queue */
1672 void
1673 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1674 {
1675 	thread->queue_next = NULL;
1676 	if (queue->head == NULL) {
1677 		queue->head = thread;
1678 		queue->tail = thread;
1679 	} else {
1680 		queue->tail->queue_next = thread;
1681 		queue->tail = thread;
1682 	}
1683 }
1684 
1685 
1686 struct thread *
1687 thread_lookat_queue(struct thread_queue *queue)
1688 {
1689 	return queue->head;
1690 }
1691 
1692 
1693 struct thread *
1694 thread_dequeue(struct thread_queue *queue)
1695 {
1696 	struct thread *thread = queue->head;
1697 
1698 	if (thread != NULL) {
1699 		queue->head = thread->queue_next;
1700 		if (queue->tail == thread)
1701 			queue->tail = NULL;
1702 	}
1703 	return thread;
1704 }
1705 
1706 
1707 struct thread *
1708 thread_dequeue_id(struct thread_queue *q, thread_id id)
1709 {
1710 	struct thread *thread;
1711 	struct thread *last = NULL;
1712 
1713 	thread = q->head;
1714 	while (thread != NULL) {
1715 		if (thread->id == id) {
1716 			if (last == NULL)
1717 				q->head = thread->queue_next;
1718 			else
1719 				last->queue_next = thread->queue_next;
1720 
1721 			if (q->tail == thread)
1722 				q->tail = last;
1723 			break;
1724 		}
1725 		last = thread;
1726 		thread = thread->queue_next;
1727 	}
1728 	return thread;
1729 }
1730 
1731 
1732 thread_id
1733 allocate_thread_id(void)
1734 {
1735 	return atomic_add(&sNextThreadID, 1);
1736 }
1737 
1738 
1739 thread_id
1740 peek_next_thread_id(void)
1741 {
1742 	return atomic_get(&sNextThreadID);
1743 }
1744 
1745 
1746 /*!	Yield the CPU to other threads.
1747 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1748 	If \c false, it will continue to run, if there's no other thread in ready
1749 	state, and if it has a higher priority than the other ready threads, it
1750 	still has a good chance to continue.
1751 */
1752 void
1753 thread_yield(bool force)
1754 {
1755 	if (force) {
1756 		// snooze for roughly 3 thread quantums
1757 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1758 #if 0
1759 		cpu_status state;
1760 
1761 		struct thread *thread = thread_get_current_thread();
1762 		if (thread == NULL)
1763 			return;
1764 
1765 		state = disable_interrupts();
1766 		GRAB_THREAD_LOCK();
1767 
1768 		// mark the thread as yielded, so it will not be scheduled next
1769 		//thread->was_yielded = true;
1770 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1771 		scheduler_reschedule();
1772 
1773 		RELEASE_THREAD_LOCK();
1774 		restore_interrupts(state);
1775 #endif
1776 	} else {
1777 		struct thread *thread = thread_get_current_thread();
1778 		if (thread == NULL)
1779 			return;
1780 
1781 		// Don't force the thread off the CPU, just reschedule.
1782 		InterruptsSpinLocker _(gThreadSpinlock);
1783 		scheduler_reschedule();
1784 	}
1785 }
1786 
1787 
1788 /*!
1789 	Kernel private thread creation function.
1790 
1791 	\param threadID The ID to be assigned to the new thread. If
1792 		  \code < 0 \endcode a fresh one is allocated.
1793 */
1794 thread_id
1795 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1796 	void *arg, team_id team, thread_id threadID)
1797 {
1798 	thread_creation_attributes attributes;
1799 	attributes.entry = (thread_entry_func)function;
1800 	attributes.name = name;
1801 	attributes.priority = priority;
1802 	attributes.args1 = arg;
1803 	attributes.args2 = NULL;
1804 	attributes.stack_address = NULL;
1805 	attributes.stack_size = 0;
1806 	attributes.team = team;
1807 	attributes.thread = threadID;
1808 
1809 	return create_thread(attributes, true);
1810 }
1811 
1812 
1813 status_t
1814 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1815 	status_t *_returnCode)
1816 {
1817 	sem_id exitSem = B_BAD_THREAD_ID;
1818 	struct death_entry death;
1819 	job_control_entry* freeDeath = NULL;
1820 	struct thread *thread;
1821 	cpu_status state;
1822 	status_t status = B_OK;
1823 
1824 	if (id < B_OK)
1825 		return B_BAD_THREAD_ID;
1826 
1827 	// we need to resume the thread we're waiting for first
1828 
1829 	state = disable_interrupts();
1830 	GRAB_THREAD_LOCK();
1831 
1832 	thread = thread_get_thread_struct_locked(id);
1833 	if (thread != NULL) {
1834 		// remember the semaphore we have to wait on and place our death entry
1835 		exitSem = thread->exit.sem;
1836 		list_add_link_to_head(&thread->exit.waiters, &death);
1837 	}
1838 
1839 	death_entry* threadDeathEntry = NULL;
1840 
1841 	RELEASE_THREAD_LOCK();
1842 
1843 	if (thread == NULL) {
1844 		// we couldn't find this thread - maybe it's already gone, and we'll
1845 		// find its death entry in our team
1846 		GRAB_TEAM_LOCK();
1847 
1848 		struct team* team = thread_get_current_thread()->team;
1849 
1850 		// check the child death entries first (i.e. main threads of child
1851 		// teams)
1852 		bool deleteEntry;
1853 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1854 		if (freeDeath != NULL) {
1855 			death.status = freeDeath->status;
1856 			if (!deleteEntry)
1857 				freeDeath = NULL;
1858 		} else {
1859 			// check the thread death entries of the team (non-main threads)
1860 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1861 					&team->dead_threads, threadDeathEntry)) != NULL) {
1862 				if (threadDeathEntry->thread == id) {
1863 					list_remove_item(&team->dead_threads, threadDeathEntry);
1864 					team->dead_threads_count--;
1865 					death.status = threadDeathEntry->status;
1866 					break;
1867 				}
1868 			}
1869 
1870 			if (threadDeathEntry == NULL)
1871 				status = B_BAD_THREAD_ID;
1872 		}
1873 
1874 		RELEASE_TEAM_LOCK();
1875 	}
1876 
1877 	restore_interrupts(state);
1878 
1879 	if (thread == NULL && status == B_OK) {
1880 		// we found the thread's death entry in our team
1881 		if (_returnCode)
1882 			*_returnCode = death.status;
1883 
1884 		delete freeDeath;
1885 		free(threadDeathEntry);
1886 		return B_OK;
1887 	}
1888 
1889 	// we need to wait for the death of the thread
1890 
1891 	if (exitSem < B_OK)
1892 		return B_BAD_THREAD_ID;
1893 
1894 	resume_thread(id);
1895 		// make sure we don't wait forever on a suspended thread
1896 
1897 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1898 
1899 	if (status == B_OK) {
1900 		// this should never happen as the thread deletes the semaphore on exit
1901 		panic("could acquire exit_sem for thread %ld\n", id);
1902 	} else if (status == B_BAD_SEM_ID) {
1903 		// this is the way the thread normally exits
1904 		status = B_OK;
1905 
1906 		if (_returnCode)
1907 			*_returnCode = death.status;
1908 	} else {
1909 		// We were probably interrupted; we need to remove our death entry now.
1910 		state = disable_interrupts();
1911 		GRAB_THREAD_LOCK();
1912 
1913 		thread = thread_get_thread_struct_locked(id);
1914 		if (thread != NULL)
1915 			list_remove_link(&death);
1916 
1917 		RELEASE_THREAD_LOCK();
1918 		restore_interrupts(state);
1919 
1920 		// If the thread is already gone, we need to wait for its exit semaphore
1921 		// to make sure our death entry stays valid - it won't take long
1922 		if (thread == NULL)
1923 			acquire_sem(exitSem);
1924 	}
1925 
1926 	return status;
1927 }
1928 
1929 
1930 status_t
1931 select_thread(int32 id, struct select_info* info, bool kernel)
1932 {
1933 	InterruptsSpinLocker locker(gThreadSpinlock);
1934 
1935 	// get thread
1936 	struct thread* thread = thread_get_thread_struct_locked(id);
1937 	if (thread == NULL)
1938 		return B_BAD_THREAD_ID;
1939 
1940 	// We support only B_EVENT_INVALID at the moment.
1941 	info->selected_events &= B_EVENT_INVALID;
1942 
1943 	// add info to list
1944 	if (info->selected_events != 0) {
1945 		info->next = thread->select_infos;
1946 		thread->select_infos = info;
1947 
1948 		// we need a sync reference
1949 		atomic_add(&info->sync->ref_count, 1);
1950 	}
1951 
1952 	return B_OK;
1953 }
1954 
1955 
1956 status_t
1957 deselect_thread(int32 id, struct select_info* info, bool kernel)
1958 {
1959 	InterruptsSpinLocker locker(gThreadSpinlock);
1960 
1961 	// get thread
1962 	struct thread* thread = thread_get_thread_struct_locked(id);
1963 	if (thread == NULL)
1964 		return B_BAD_THREAD_ID;
1965 
1966 	// remove info from list
1967 	select_info** infoLocation = &thread->select_infos;
1968 	while (*infoLocation != NULL && *infoLocation != info)
1969 		infoLocation = &(*infoLocation)->next;
1970 
1971 	if (*infoLocation != info)
1972 		return B_OK;
1973 
1974 	*infoLocation = info->next;
1975 
1976 	locker.Unlock();
1977 
1978 	// surrender sync reference
1979 	put_select_sync(info->sync);
1980 
1981 	return B_OK;
1982 }
1983 
1984 
1985 int32
1986 thread_max_threads(void)
1987 {
1988 	return sMaxThreads;
1989 }
1990 
1991 
1992 int32
1993 thread_used_threads(void)
1994 {
1995 	return sUsedThreads;
1996 }
1997 
1998 
1999 const char*
2000 thread_state_to_text(struct thread* thread, int32 state)
2001 {
2002 	return state_to_text(thread, state);
2003 }
2004 
2005 
2006 int32
2007 thread_get_io_priority(thread_id id)
2008 {
2009 	// take a shortcut, if it is the current thread
2010 	struct thread* thread = thread_get_current_thread();
2011 	int32 priority;
2012 	if (id == thread->id) {
2013 		int32 priority = thread->io_priority;
2014 		return priority < 0 ? thread->priority : priority;
2015 	}
2016 
2017 	// not the current thread -- get it
2018 	InterruptsSpinLocker locker(gThreadSpinlock);
2019 
2020 	thread = thread_get_thread_struct_locked(id);
2021 	if (thread == NULL)
2022 		return B_BAD_THREAD_ID;
2023 
2024 	priority = thread->io_priority;
2025 	return priority < 0 ? thread->priority : priority;
2026 }
2027 
2028 
2029 void
2030 thread_set_io_priority(int32 priority)
2031 {
2032 	struct thread* thread = thread_get_current_thread();
2033 	thread->io_priority = priority;
2034 }
2035 
2036 
2037 status_t
2038 thread_init(kernel_args *args)
2039 {
2040 	uint32 i;
2041 
2042 	TRACE(("thread_init: entry\n"));
2043 
2044 	// create the thread hash table
2045 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2046 		&thread_struct_compare, &thread_struct_hash);
2047 
2048 	// zero out the dead thread structure q
2049 	memset(&dead_q, 0, sizeof(dead_q));
2050 
2051 	if (arch_thread_init(args) < B_OK)
2052 		panic("arch_thread_init() failed!\n");
2053 
2054 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2055 	sNextThreadID = B_SYSTEM_TEAM + 1;
2056 
2057 	// create an idle thread for each cpu
2058 
2059 	for (i = 0; i < args->num_cpus; i++) {
2060 		struct thread *thread;
2061 		area_info info;
2062 		char name[64];
2063 
2064 		sprintf(name, "idle thread %lu", i + 1);
2065 		thread = create_thread_struct(&sIdleThreads[i], name,
2066 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2067 		if (thread == NULL) {
2068 			panic("error creating idle thread struct\n");
2069 			return B_NO_MEMORY;
2070 		}
2071 
2072 		thread->team = team_get_kernel_team();
2073 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2074 		thread->state = B_THREAD_RUNNING;
2075 		thread->next_state = B_THREAD_READY;
2076 		sprintf(name, "idle thread %lu kstack", i + 1);
2077 		thread->kernel_stack_area = find_area(name);
2078 		thread->entry = NULL;
2079 
2080 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2081 			panic("error finding idle kstack area\n");
2082 
2083 		thread->kernel_stack_base = (addr_t)info.address;
2084 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2085 
2086 		hash_insert(sThreadHash, thread);
2087 		insert_thread_into_team(thread->team, thread);
2088 	}
2089 	sUsedThreads = args->num_cpus;
2090 
2091 	// start the undertaker thread
2092 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2093 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2094 
2095 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2096 		B_DISPLAY_PRIORITY, NULL);
2097 	if (undertakerThread < 0)
2098 		panic("Failed to create undertaker thread!");
2099 	resume_thread(undertakerThread);
2100 
2101 	// set up some debugger commands
2102 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2103 		"[ <team> ]\n"
2104 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2105 		"all threads of the specified team.\n"
2106 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2107 	add_debugger_command_etc("ready", &dump_thread_list,
2108 		"List all ready threads",
2109 		"\n"
2110 		"Prints a list of all threads in ready state.\n", 0);
2111 	add_debugger_command_etc("running", &dump_thread_list,
2112 		"List all running threads",
2113 		"\n"
2114 		"Prints a list of all threads in running state.\n", 0);
2115 	add_debugger_command_etc("waiting", &dump_thread_list,
2116 		"List all waiting threads (optionally for a specific semaphore)",
2117 		"[ <sem> ]\n"
2118 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2119 		"specified, only the threads waiting on that semaphore are listed.\n"
2120 		"  <sem>  - ID of the semaphore.\n", 0);
2121 	add_debugger_command_etc("realtime", &dump_thread_list,
2122 		"List all realtime threads",
2123 		"\n"
2124 		"Prints a list of all threads with realtime priority.\n", 0);
2125 	add_debugger_command_etc("thread", &dump_thread_info,
2126 		"Dump info about a particular thread",
2127 		"[ -s ] ( <id> | <address> | <name> )*\n"
2128 		"Prints information about the specified thread. If no argument is\n"
2129 		"given the current thread is selected.\n"
2130 		"  -s         - Print info in compact table form (like \"threads\").\n"
2131 		"  <id>       - The ID of the thread.\n"
2132 		"  <address>  - The address of the thread structure.\n"
2133 		"  <name>     - The thread's name.\n", 0);
2134 	add_debugger_command_etc("calling", &dump_thread_list,
2135 		"Show all threads that have a specific address in their call chain",
2136 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2137 	add_debugger_command_etc("unreal", &make_thread_unreal,
2138 		"Set realtime priority threads to normal priority",
2139 		"[ <id> ]\n"
2140 		"Sets the priority of all realtime threads or, if given, the one\n"
2141 		"with the specified ID to \"normal\" priority.\n"
2142 		"  <id>  - The ID of the thread.\n", 0);
2143 	add_debugger_command_etc("suspend", &make_thread_suspended,
2144 		"Suspend a thread",
2145 		"[ <id> ]\n"
2146 		"Suspends the thread with the given ID. If no ID argument is given\n"
2147 		"the current thread is selected.\n"
2148 		"  <id>  - The ID of the thread.\n", 0);
2149 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2150 		"<id>\n"
2151 		"Resumes the specified thread, if it is currently suspended.\n"
2152 		"  <id>  - The ID of the thread.\n", 0);
2153 	add_debugger_command_etc("drop", &drop_into_debugger,
2154 		"Drop a thread into the userland debugger",
2155 		"<id>\n"
2156 		"Drops the specified (userland) thread into the userland debugger\n"
2157 		"after leaving the kernel debugger.\n"
2158 		"  <id>  - The ID of the thread.\n", 0);
2159 	add_debugger_command_etc("priority", &set_thread_prio,
2160 		"Set a thread's priority",
2161 		"<priority> [ <id> ]\n"
2162 		"Sets the priority of the thread with the specified ID to the given\n"
2163 		"priority. If no thread ID is given, the current thread is selected.\n"
2164 		"  <priority>  - The thread's new priority (0 - 120)\n"
2165 		"  <id>        - The ID of the thread.\n", 0);
2166 
2167 	return B_OK;
2168 }
2169 
2170 
2171 status_t
2172 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2173 {
2174 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2175 	// so that get_current_cpu and friends will work, which is crucial for
2176 	// a lot of low level routines
2177 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2178 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2179 	return B_OK;
2180 }
2181 
2182 
2183 //	#pragma mark - thread blocking API
2184 
2185 
2186 static status_t
2187 thread_block_timeout(timer* timer)
2188 {
2189 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2190 	// we're holding the thread lock already. This makes things comfortably
2191 	// easy.
2192 
2193 	struct thread* thread = (struct thread*)timer->user_data;
2194 	if (thread_unblock_locked(thread, B_TIMED_OUT)) {
2195 		// We actually woke up the thread. If it has a higher priority than the
2196 		// currently running thread, we invoke the scheduler.
2197 		// TODO: Is this really such a good idea or should we do that only when
2198 		// the woken up thread has realtime priority?
2199 		if (thread->priority > thread_get_current_thread()->priority)
2200 			return B_INVOKE_SCHEDULER;
2201 	}
2202 
2203 	return B_HANDLED_INTERRUPT;
2204 }
2205 
2206 
2207 status_t
2208 thread_block()
2209 {
2210 	InterruptsSpinLocker _(gThreadSpinlock);
2211 	return thread_block_locked(thread_get_current_thread());
2212 }
2213 
2214 
2215 bool
2216 thread_unblock(status_t threadID, status_t status)
2217 {
2218 	InterruptsSpinLocker _(gThreadSpinlock);
2219 
2220 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2221 	if (thread == NULL)
2222 		return false;
2223 	return thread_unblock_locked(thread, status);
2224 }
2225 
2226 
2227 status_t
2228 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2229 {
2230 	InterruptsSpinLocker _(gThreadSpinlock);
2231 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2232 }
2233 
2234 
2235 status_t
2236 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2237 {
2238 	struct thread* thread = thread_get_current_thread();
2239 
2240 	if (thread->wait.status != 1)
2241 		return thread->wait.status;
2242 
2243 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2244 		&& timeout != B_INFINITE_TIMEOUT;
2245 
2246 	if (useTimer) {
2247 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2248 		// avoids nasty race conditions and deadlock problems that could
2249 		// otherwise occur between our cancel_timer() and a concurrently
2250 		// executing thread_block_timeout().
2251 		uint32 timerFlags;
2252 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2253 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2254 		} else {
2255 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2256 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2257 				timeout -= rtc_boot_time();
2258 		}
2259 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2260 
2261 		// install the timer
2262 		thread->wait.unblock_timer.user_data = thread;
2263 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2264 			timerFlags);
2265 	}
2266 
2267 	// block
2268 	status_t error = thread_block_locked(thread);
2269 
2270 	// cancel timer, if it didn't fire
2271 	if (error != B_TIMED_OUT && useTimer)
2272 		cancel_timer(&thread->wait.unblock_timer);
2273 
2274 	return error;
2275 }
2276 
2277 
2278 /*!	Thread spinlock must be held.
2279 */
2280 static status_t
2281 user_unblock_thread(thread_id threadID, status_t status)
2282 {
2283 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2284 	if (thread == NULL)
2285 		return B_BAD_THREAD_ID;
2286 	if (thread->user_thread == NULL)
2287 		return B_NOT_ALLOWED;
2288 
2289 	thread_unblock_locked(thread, status);
2290 
2291 	return B_OK;
2292 }
2293 
2294 
2295 //	#pragma mark - public kernel API
2296 
2297 
2298 void
2299 exit_thread(status_t returnValue)
2300 {
2301 	struct thread *thread = thread_get_current_thread();
2302 
2303 	thread->exit.status = returnValue;
2304 	thread->exit.reason = THREAD_RETURN_EXIT;
2305 
2306 	// if called from a kernel thread, we don't deliver the signal,
2307 	// we just exit directly to keep the user space behaviour of
2308 	// this function
2309 	if (thread->team != team_get_kernel_team())
2310 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2311 	else
2312 		thread_exit();
2313 }
2314 
2315 
2316 status_t
2317 kill_thread(thread_id id)
2318 {
2319 	if (id <= 0)
2320 		return B_BAD_VALUE;
2321 
2322 	return send_signal(id, SIGKILLTHR);
2323 }
2324 
2325 
2326 status_t
2327 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2328 {
2329 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2330 }
2331 
2332 
2333 int32
2334 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2335 {
2336 	return receive_data_etc(sender, buffer, bufferSize, 0);
2337 }
2338 
2339 
2340 bool
2341 has_data(thread_id thread)
2342 {
2343 	int32 count;
2344 
2345 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2346 			&count) != B_OK)
2347 		return false;
2348 
2349 	return count == 0 ? false : true;
2350 }
2351 
2352 
2353 status_t
2354 _get_thread_info(thread_id id, thread_info *info, size_t size)
2355 {
2356 	status_t status = B_OK;
2357 	struct thread *thread;
2358 	cpu_status state;
2359 
2360 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2361 		return B_BAD_VALUE;
2362 
2363 	state = disable_interrupts();
2364 	GRAB_THREAD_LOCK();
2365 
2366 	thread = thread_get_thread_struct_locked(id);
2367 	if (thread == NULL) {
2368 		status = B_BAD_VALUE;
2369 		goto err;
2370 	}
2371 
2372 	fill_thread_info(thread, info, size);
2373 
2374 err:
2375 	RELEASE_THREAD_LOCK();
2376 	restore_interrupts(state);
2377 
2378 	return status;
2379 }
2380 
2381 
2382 status_t
2383 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2384 	size_t size)
2385 {
2386 	status_t status = B_BAD_VALUE;
2387 	struct thread *thread = NULL;
2388 	cpu_status state;
2389 	int slot;
2390 	thread_id lastThreadID;
2391 
2392 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2393 		return B_BAD_VALUE;
2394 
2395 	if (team == B_CURRENT_TEAM)
2396 		team = team_get_current_team_id();
2397 	else if (!team_is_valid(team))
2398 		return B_BAD_VALUE;
2399 
2400 	slot = *_cookie;
2401 
2402 	state = disable_interrupts();
2403 	GRAB_THREAD_LOCK();
2404 
2405 	lastThreadID = peek_next_thread_id();
2406 	if (slot >= lastThreadID)
2407 		goto err;
2408 
2409 	while (slot < lastThreadID
2410 		&& (!(thread = thread_get_thread_struct_locked(slot))
2411 			|| thread->team->id != team))
2412 		slot++;
2413 
2414 	if (thread != NULL && thread->team->id == team) {
2415 		fill_thread_info(thread, info, size);
2416 
2417 		*_cookie = slot + 1;
2418 		status = B_OK;
2419 	}
2420 
2421 err:
2422 	RELEASE_THREAD_LOCK();
2423 	restore_interrupts(state);
2424 
2425 	return status;
2426 }
2427 
2428 
2429 thread_id
2430 find_thread(const char *name)
2431 {
2432 	struct hash_iterator iterator;
2433 	struct thread *thread;
2434 	cpu_status state;
2435 
2436 	if (name == NULL)
2437 		return thread_get_current_thread_id();
2438 
2439 	state = disable_interrupts();
2440 	GRAB_THREAD_LOCK();
2441 
2442 	// ToDo: this might not be in the same order as find_thread() in BeOS
2443 	//		which could be theoretically problematic.
2444 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2445 	//		cheap either - although this function is probably used very rarely.
2446 
2447 	hash_open(sThreadHash, &iterator);
2448 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2449 			!= NULL) {
2450 		// Search through hash
2451 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2452 			thread_id id = thread->id;
2453 
2454 			RELEASE_THREAD_LOCK();
2455 			restore_interrupts(state);
2456 			return id;
2457 		}
2458 	}
2459 
2460 	RELEASE_THREAD_LOCK();
2461 	restore_interrupts(state);
2462 
2463 	return B_NAME_NOT_FOUND;
2464 }
2465 
2466 
2467 status_t
2468 rename_thread(thread_id id, const char *name)
2469 {
2470 	struct thread *thread = thread_get_current_thread();
2471 	status_t status = B_BAD_THREAD_ID;
2472 	cpu_status state;
2473 
2474 	if (name == NULL)
2475 		return B_BAD_VALUE;
2476 
2477 	state = disable_interrupts();
2478 	GRAB_THREAD_LOCK();
2479 
2480 	if (thread->id != id)
2481 		thread = thread_get_thread_struct_locked(id);
2482 
2483 	if (thread != NULL) {
2484 		if (thread->team == thread_get_current_thread()->team) {
2485 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2486 			status = B_OK;
2487 		} else
2488 			status = B_NOT_ALLOWED;
2489 	}
2490 
2491 	RELEASE_THREAD_LOCK();
2492 	restore_interrupts(state);
2493 
2494 	return status;
2495 }
2496 
2497 
2498 status_t
2499 set_thread_priority(thread_id id, int32 priority)
2500 {
2501 	struct thread *thread;
2502 	int32 oldPriority;
2503 
2504 	// make sure the passed in priority is within bounds
2505 	if (priority > THREAD_MAX_SET_PRIORITY)
2506 		priority = THREAD_MAX_SET_PRIORITY;
2507 	if (priority < THREAD_MIN_SET_PRIORITY)
2508 		priority = THREAD_MIN_SET_PRIORITY;
2509 
2510 	thread = thread_get_current_thread();
2511 	if (thread->id == id) {
2512 		if (thread_is_idle_thread(thread))
2513 			return B_NOT_ALLOWED;
2514 
2515 		// It's ourself, so we know we aren't in the run queue, and we can
2516 		// manipulate our structure directly
2517 		oldPriority = thread->priority;
2518 			// Note that this might not return the correct value if we are
2519 			// preempted here, and another thread changes our priority before
2520 			// the next line is executed.
2521 		thread->priority = thread->next_priority = priority;
2522 	} else {
2523 		InterruptsSpinLocker _(gThreadSpinlock);
2524 
2525 		thread = thread_get_thread_struct_locked(id);
2526 		if (thread == NULL)
2527 			return B_BAD_THREAD_ID;
2528 
2529 		if (thread_is_idle_thread(thread))
2530 			return B_NOT_ALLOWED;
2531 
2532 		oldPriority = thread->priority;
2533 		scheduler_set_thread_priority(thread, priority);
2534 	}
2535 
2536 	return oldPriority;
2537 }
2538 
2539 
2540 status_t
2541 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2542 {
2543 	status_t status;
2544 
2545 	if (timebase != B_SYSTEM_TIMEBASE)
2546 		return B_BAD_VALUE;
2547 
2548 	InterruptsSpinLocker _(gThreadSpinlock);
2549 	struct thread* thread = thread_get_current_thread();
2550 
2551 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2552 	status = thread_block_with_timeout_locked(flags, timeout);
2553 
2554 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2555 		return B_OK;
2556 
2557 	return status;
2558 }
2559 
2560 
2561 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2562 status_t
2563 snooze(bigtime_t timeout)
2564 {
2565 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2566 }
2567 
2568 
2569 /*!
2570 	snooze_until() for internal kernel use only; doesn't interrupt on
2571 	signals.
2572 */
2573 status_t
2574 snooze_until(bigtime_t timeout, int timebase)
2575 {
2576 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2577 }
2578 
2579 
2580 status_t
2581 wait_for_thread(thread_id thread, status_t *_returnCode)
2582 {
2583 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2584 }
2585 
2586 
2587 status_t
2588 suspend_thread(thread_id id)
2589 {
2590 	if (id <= 0)
2591 		return B_BAD_VALUE;
2592 
2593 	return send_signal(id, SIGSTOP);
2594 }
2595 
2596 
2597 status_t
2598 resume_thread(thread_id id)
2599 {
2600 	if (id <= 0)
2601 		return B_BAD_VALUE;
2602 
2603 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2604 		// This retains compatibility to BeOS which documents the
2605 		// combination of suspend_thread() and resume_thread() to
2606 		// interrupt threads waiting on semaphores.
2607 }
2608 
2609 
2610 thread_id
2611 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2612 	void *arg)
2613 {
2614 	thread_creation_attributes attributes;
2615 	attributes.entry = (thread_entry_func)function;
2616 	attributes.name = name;
2617 	attributes.priority = priority;
2618 	attributes.args1 = arg;
2619 	attributes.args2 = NULL;
2620 	attributes.stack_address = NULL;
2621 	attributes.stack_size = 0;
2622 	attributes.team = team_get_kernel_team()->id;
2623 	attributes.thread = -1;
2624 
2625 	return create_thread(attributes, true);
2626 }
2627 
2628 
2629 int
2630 getrlimit(int resource, struct rlimit * rlp)
2631 {
2632 	status_t error = common_getrlimit(resource, rlp);
2633 	if (error != B_OK) {
2634 		errno = error;
2635 		return -1;
2636 	}
2637 
2638 	return 0;
2639 }
2640 
2641 
2642 int
2643 setrlimit(int resource, const struct rlimit * rlp)
2644 {
2645 	status_t error = common_setrlimit(resource, rlp);
2646 	if (error != B_OK) {
2647 		errno = error;
2648 		return -1;
2649 	}
2650 
2651 	return 0;
2652 }
2653 
2654 
2655 //	#pragma mark - syscalls
2656 
2657 
2658 void
2659 _user_exit_thread(status_t returnValue)
2660 {
2661 	exit_thread(returnValue);
2662 }
2663 
2664 
2665 status_t
2666 _user_kill_thread(thread_id thread)
2667 {
2668 	return kill_thread(thread);
2669 }
2670 
2671 
2672 status_t
2673 _user_resume_thread(thread_id thread)
2674 {
2675 	return resume_thread(thread);
2676 }
2677 
2678 
2679 status_t
2680 _user_suspend_thread(thread_id thread)
2681 {
2682 	return suspend_thread(thread);
2683 }
2684 
2685 
2686 status_t
2687 _user_rename_thread(thread_id thread, const char *userName)
2688 {
2689 	char name[B_OS_NAME_LENGTH];
2690 
2691 	if (!IS_USER_ADDRESS(userName)
2692 		|| userName == NULL
2693 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2694 		return B_BAD_ADDRESS;
2695 
2696 	return rename_thread(thread, name);
2697 }
2698 
2699 
2700 int32
2701 _user_set_thread_priority(thread_id thread, int32 newPriority)
2702 {
2703 	return set_thread_priority(thread, newPriority);
2704 }
2705 
2706 
2707 thread_id
2708 _user_spawn_thread(thread_creation_attributes* userAttributes)
2709 {
2710 	thread_creation_attributes attributes;
2711 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2712 		|| user_memcpy(&attributes, userAttributes,
2713 				sizeof(attributes)) != B_OK) {
2714 		return B_BAD_ADDRESS;
2715 	}
2716 
2717 	if (attributes.stack_size != 0
2718 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2719 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2720 		return B_BAD_VALUE;
2721 	}
2722 
2723 	char name[B_OS_NAME_LENGTH];
2724 	thread_id threadID;
2725 
2726 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2727 		|| attributes.stack_address != NULL
2728 			&& !IS_USER_ADDRESS(attributes.stack_address)
2729 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2730 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2731 		return B_BAD_ADDRESS;
2732 
2733 	attributes.name = attributes.name != NULL ? name : "user thread";
2734 	attributes.team = thread_get_current_thread()->team->id;
2735 	attributes.thread = -1;
2736 
2737 	threadID = create_thread(attributes, false);
2738 
2739 	if (threadID >= 0)
2740 		user_debug_thread_created(threadID);
2741 
2742 	return threadID;
2743 }
2744 
2745 
2746 status_t
2747 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2748 {
2749 	// NOTE: We only know the system timebase at the moment.
2750 	syscall_restart_handle_timeout_pre(flags, timeout);
2751 
2752 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2753 
2754 	return syscall_restart_handle_timeout_post(error, timeout);
2755 }
2756 
2757 
2758 void
2759 _user_thread_yield(void)
2760 {
2761 	thread_yield(true);
2762 }
2763 
2764 
2765 status_t
2766 _user_get_thread_info(thread_id id, thread_info *userInfo)
2767 {
2768 	thread_info info;
2769 	status_t status;
2770 
2771 	if (!IS_USER_ADDRESS(userInfo))
2772 		return B_BAD_ADDRESS;
2773 
2774 	status = _get_thread_info(id, &info, sizeof(thread_info));
2775 
2776 	if (status >= B_OK
2777 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2778 		return B_BAD_ADDRESS;
2779 
2780 	return status;
2781 }
2782 
2783 
2784 status_t
2785 _user_get_next_thread_info(team_id team, int32 *userCookie,
2786 	thread_info *userInfo)
2787 {
2788 	status_t status;
2789 	thread_info info;
2790 	int32 cookie;
2791 
2792 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2793 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2794 		return B_BAD_ADDRESS;
2795 
2796 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2797 	if (status < B_OK)
2798 		return status;
2799 
2800 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2801 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2802 		return B_BAD_ADDRESS;
2803 
2804 	return status;
2805 }
2806 
2807 
2808 thread_id
2809 _user_find_thread(const char *userName)
2810 {
2811 	char name[B_OS_NAME_LENGTH];
2812 
2813 	if (userName == NULL)
2814 		return find_thread(NULL);
2815 
2816 	if (!IS_USER_ADDRESS(userName)
2817 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2818 		return B_BAD_ADDRESS;
2819 
2820 	return find_thread(name);
2821 }
2822 
2823 
2824 status_t
2825 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2826 {
2827 	status_t returnCode;
2828 	status_t status;
2829 
2830 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2831 		return B_BAD_ADDRESS;
2832 
2833 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2834 
2835 	if (status == B_OK && userReturnCode != NULL
2836 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2837 		return B_BAD_ADDRESS;
2838 	}
2839 
2840 	return syscall_restart_handle_post(status);
2841 }
2842 
2843 
2844 bool
2845 _user_has_data(thread_id thread)
2846 {
2847 	return has_data(thread);
2848 }
2849 
2850 
2851 status_t
2852 _user_send_data(thread_id thread, int32 code, const void *buffer,
2853 	size_t bufferSize)
2854 {
2855 	if (!IS_USER_ADDRESS(buffer))
2856 		return B_BAD_ADDRESS;
2857 
2858 	return send_data_etc(thread, code, buffer, bufferSize,
2859 		B_KILL_CAN_INTERRUPT);
2860 		// supports userland buffers
2861 }
2862 
2863 
2864 status_t
2865 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2866 {
2867 	thread_id sender;
2868 	status_t code;
2869 
2870 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2871 		|| !IS_USER_ADDRESS(buffer))
2872 		return B_BAD_ADDRESS;
2873 
2874 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2875 		// supports userland buffers
2876 
2877 	if (_userSender != NULL)
2878 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2879 			return B_BAD_ADDRESS;
2880 
2881 	return code;
2882 }
2883 
2884 
2885 status_t
2886 _user_block_thread(uint32 flags, bigtime_t timeout)
2887 {
2888 	syscall_restart_handle_timeout_pre(flags, timeout);
2889 	flags |= B_CAN_INTERRUPT;
2890 
2891 	struct thread* thread = thread_get_current_thread();
2892 
2893 	InterruptsSpinLocker locker(gThreadSpinlock);
2894 
2895 	// check, if already done
2896 	if (thread->user_thread->wait_status <= 0)
2897 		return thread->user_thread->wait_status;
2898 
2899 	// nope, so wait
2900 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2901 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2902 	thread->user_thread->wait_status = status;
2903 
2904 	return syscall_restart_handle_timeout_post(status, timeout);
2905 }
2906 
2907 
2908 status_t
2909 _user_unblock_thread(thread_id threadID, status_t status)
2910 {
2911 	InterruptsSpinLocker locker(gThreadSpinlock);
2912 	return user_unblock_thread(threadID, status);
2913 }
2914 
2915 
2916 status_t
2917 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2918 {
2919 	enum {
2920 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2921 	};
2922 
2923 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2924 		return B_BAD_ADDRESS;
2925 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2926 		return B_BAD_VALUE;
2927 
2928 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2929 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2930 		return B_BAD_ADDRESS;
2931 
2932 	for (uint32 i = 0; i < count; i++)
2933 		user_unblock_thread(threads[i], status);
2934 
2935 	return B_OK;
2936 }
2937 
2938 
2939 // TODO: the following two functions don't belong here
2940 
2941 
2942 int
2943 _user_getrlimit(int resource, struct rlimit *urlp)
2944 {
2945 	struct rlimit rl;
2946 	int ret;
2947 
2948 	if (urlp == NULL)
2949 		return EINVAL;
2950 
2951 	if (!IS_USER_ADDRESS(urlp))
2952 		return B_BAD_ADDRESS;
2953 
2954 	ret = common_getrlimit(resource, &rl);
2955 
2956 	if (ret == 0) {
2957 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2958 		if (ret < 0)
2959 			return ret;
2960 
2961 		return 0;
2962 	}
2963 
2964 	return ret;
2965 }
2966 
2967 
2968 int
2969 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2970 {
2971 	struct rlimit resourceLimit;
2972 
2973 	if (userResourceLimit == NULL)
2974 		return EINVAL;
2975 
2976 	if (!IS_USER_ADDRESS(userResourceLimit)
2977 		|| user_memcpy(&resourceLimit, userResourceLimit,
2978 			sizeof(struct rlimit)) < B_OK)
2979 		return B_BAD_ADDRESS;
2980 
2981 	return common_setrlimit(resource, &resourceLimit);
2982 }
2983