xref: /haiku/src/system/kernel/thread.cpp (revision 0562493379cd52eb7103531f895f10bb8e77c085)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <real_time_clock.h>
34 #include <smp.h>
35 #include <syscalls.h>
36 #include <syscall_restart.h>
37 #include <team.h>
38 #include <tls.h>
39 #include <user_runtime.h>
40 #include <user_thread.h>
41 #include <vfs.h>
42 #include <vm.h>
43 #include <vm_address_space.h>
44 #include <wait_for_objects.h>
45 
46 
47 //#define TRACE_THREAD
48 #ifdef TRACE_THREAD
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 
55 #define THREAD_MAX_MESSAGE_SIZE		65536
56 
57 
58 struct thread_key {
59 	thread_id id;
60 };
61 
62 // global
63 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
64 
65 // thread list
66 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
67 static hash_table *sThreadHash = NULL;
68 static thread_id sNextThreadID = 1;
69 
70 // some arbitrary chosen limits - should probably depend on the available
71 // memory (the limit is not yet enforced)
72 static int32 sMaxThreads = 4096;
73 static int32 sUsedThreads = 0;
74 
75 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
76 	struct thread*	thread;
77 	team_id			teamID;
78 	sem_id			deathSem;
79 
80 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
81 		:
82 		thread(thread),
83 		teamID(teamID),
84 		deathSem(deathSem)
85 	{
86 	}
87 };
88 
89 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
90 static ConditionVariable sUndertakerCondition;
91 
92 // The dead queue is used as a pool from which to retrieve and reuse previously
93 // allocated thread structs when creating a new thread. It should be gone once
94 // the slab allocator is in.
95 static struct thread_queue dead_q;
96 
97 static void thread_kthread_entry(void);
98 static void thread_kthread_exit(void);
99 
100 
101 /*!
102 	Inserts a thread into a team.
103 	You must hold the team lock when you call this function.
104 */
105 static void
106 insert_thread_into_team(struct team *team, struct thread *thread)
107 {
108 	thread->team_next = team->thread_list;
109 	team->thread_list = thread;
110 	team->num_threads++;
111 
112 	if (team->num_threads == 1) {
113 		// this was the first thread
114 		team->main_thread = thread;
115 	}
116 	thread->team = team;
117 }
118 
119 
120 /*!
121 	Removes a thread from a team.
122 	You must hold the team lock when you call this function.
123 */
124 static void
125 remove_thread_from_team(struct team *team, struct thread *thread)
126 {
127 	struct thread *temp, *last = NULL;
128 
129 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
130 		if (temp == thread) {
131 			if (last == NULL)
132 				team->thread_list = temp->team_next;
133 			else
134 				last->team_next = temp->team_next;
135 
136 			team->num_threads--;
137 			break;
138 		}
139 		last = temp;
140 	}
141 }
142 
143 
144 static int
145 thread_struct_compare(void *_t, const void *_key)
146 {
147 	struct thread *thread = (struct thread*)_t;
148 	const struct thread_key *key = (const struct thread_key*)_key;
149 
150 	if (thread->id == key->id)
151 		return 0;
152 
153 	return 1;
154 }
155 
156 
157 static uint32
158 thread_struct_hash(void *_t, const void *_key, uint32 range)
159 {
160 	struct thread *thread = (struct thread*)_t;
161 	const struct thread_key *key = (const struct thread_key*)_key;
162 
163 	if (thread != NULL)
164 		return thread->id % range;
165 
166 	return (uint32)key->id % range;
167 }
168 
169 
170 static void
171 reset_signals(struct thread *thread)
172 {
173 	thread->sig_pending = 0;
174 	thread->sig_block_mask = 0;
175 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
176 	thread->signal_stack_base = 0;
177 	thread->signal_stack_size = 0;
178 	thread->signal_stack_enabled = false;
179 }
180 
181 
182 /*!
183 	Allocates and fills in thread structure (or reuses one from the
184 	dead queue).
185 
186 	\param threadID The ID to be assigned to the new thread. If
187 		  \code < 0 \endcode a fresh one is allocated.
188 	\param thread initialize this thread struct if nonnull
189 */
190 
191 static struct thread *
192 create_thread_struct(struct thread *inthread, const char *name,
193 	thread_id threadID, struct cpu_ent *cpu)
194 {
195 	struct thread *thread;
196 	cpu_status state;
197 	char temp[64];
198 	bool recycled = false;
199 
200 	if (inthread == NULL) {
201 		// try to recycle one from the dead queue first
202 		state = disable_interrupts();
203 		GRAB_THREAD_LOCK();
204 		thread = thread_dequeue(&dead_q);
205 		RELEASE_THREAD_LOCK();
206 		restore_interrupts(state);
207 
208 		// if not, create a new one
209 		if (thread == NULL) {
210 			thread = (struct thread *)malloc(sizeof(struct thread));
211 			if (thread == NULL)
212 				return NULL;
213 		} else {
214 			recycled = true;
215 		}
216 	} else {
217 		thread = inthread;
218 	}
219 
220 	if (!recycled)
221 		scheduler_on_thread_create(thread);
222 
223 	if (name != NULL)
224 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
225 	else
226 		strcpy(thread->name, "unnamed thread");
227 
228 	thread->flags = 0;
229 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
230 	thread->team = NULL;
231 	thread->cpu = cpu;
232 	thread->previous_cpu = NULL;
233 	thread->pinned_to_cpu = 0;
234 	thread->keep_scheduled = 0;
235 	thread->fault_handler = 0;
236 	thread->page_faults_allowed = 1;
237 	thread->kernel_stack_area = -1;
238 	thread->kernel_stack_base = 0;
239 	thread->user_stack_area = -1;
240 	thread->user_stack_base = 0;
241 	thread->user_local_storage = 0;
242 	thread->kernel_errno = 0;
243 	thread->team_next = NULL;
244 	thread->queue_next = NULL;
245 	thread->priority = thread->next_priority = -1;
246 	thread->io_priority = -1;
247 	thread->args1 = NULL;  thread->args2 = NULL;
248 	thread->alarm.period = 0;
249 	reset_signals(thread);
250 	thread->in_kernel = true;
251 	thread->was_yielded = false;
252 	thread->user_time = 0;
253 	thread->kernel_time = 0;
254 	thread->last_time = 0;
255 	thread->exit.status = 0;
256 	thread->exit.reason = 0;
257 	thread->exit.signal = 0;
258 	list_init(&thread->exit.waiters);
259 	thread->select_infos = NULL;
260 	thread->post_interrupt_callback = NULL;
261 	thread->post_interrupt_data = NULL;
262 
263 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
264 	thread->exit.sem = create_sem(0, temp);
265 	if (thread->exit.sem < B_OK)
266 		goto err1;
267 
268 	sprintf(temp, "%s send", thread->name);
269 	thread->msg.write_sem = create_sem(1, temp);
270 	if (thread->msg.write_sem < B_OK)
271 		goto err2;
272 
273 	sprintf(temp, "%s receive", thread->name);
274 	thread->msg.read_sem = create_sem(0, temp);
275 	if (thread->msg.read_sem < B_OK)
276 		goto err3;
277 
278 	if (arch_thread_init_thread_struct(thread) < B_OK)
279 		goto err4;
280 
281 	return thread;
282 
283 err4:
284 	delete_sem(thread->msg.read_sem);
285 err3:
286 	delete_sem(thread->msg.write_sem);
287 err2:
288 	delete_sem(thread->exit.sem);
289 err1:
290 	// ToDo: put them in the dead queue instead?
291 	if (inthread == NULL) {
292 		free(thread);
293 		scheduler_on_thread_destroy(thread);
294 	}
295 	return NULL;
296 }
297 
298 
299 static void
300 delete_thread_struct(struct thread *thread)
301 {
302 	delete_sem(thread->exit.sem);
303 	delete_sem(thread->msg.write_sem);
304 	delete_sem(thread->msg.read_sem);
305 
306 	scheduler_on_thread_destroy(thread);
307 
308 	// ToDo: put them in the dead queue instead?
309 	free(thread);
310 }
311 
312 
313 /*! This function gets run by a new thread before anything else */
314 static void
315 thread_kthread_entry(void)
316 {
317 	struct thread *thread = thread_get_current_thread();
318 
319 	// The thread is new and has been scheduled the first time. Notify the user
320 	// debugger code.
321 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
322 		user_debug_thread_scheduled(thread);
323 
324 	// simulates the thread spinlock release that would occur if the thread had been
325 	// rescheded from. The resched didn't happen because the thread is new.
326 	RELEASE_THREAD_LOCK();
327 
328 	// start tracking time
329 	thread->last_time = system_time();
330 
331 	enable_interrupts(); // this essentially simulates a return-from-interrupt
332 }
333 
334 
335 static void
336 thread_kthread_exit(void)
337 {
338 	struct thread *thread = thread_get_current_thread();
339 
340 	thread->exit.reason = THREAD_RETURN_EXIT;
341 	thread_exit();
342 }
343 
344 
345 /*!
346 	Initializes the thread and jumps to its userspace entry point.
347 	This function is called at creation time of every user thread,
348 	but not for a team's main thread.
349 */
350 static int
351 _create_user_thread_kentry(void)
352 {
353 	struct thread *thread = thread_get_current_thread();
354 
355 	// jump to the entry point in user space
356 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
357 		thread->args1, thread->args2);
358 
359 	// only get here if the above call fails
360 	return 0;
361 }
362 
363 
364 /*! Initializes the thread and calls it kernel space entry point. */
365 static int
366 _create_kernel_thread_kentry(void)
367 {
368 	struct thread *thread = thread_get_current_thread();
369 	int (*func)(void *args) = (int (*)(void *))thread->entry;
370 
371 	// call the entry function with the appropriate args
372 	return func(thread->args1);
373 }
374 
375 
376 /*!
377 	Creates a new thread in the team with the specified team ID.
378 
379 	\param threadID The ID to be assigned to the new thread. If
380 		  \code < 0 \endcode a fresh one is allocated.
381 */
382 static thread_id
383 create_thread(thread_creation_attributes& attributes, bool kernel)
384 {
385 	struct thread *thread, *currentThread;
386 	struct team *team;
387 	cpu_status state;
388 	char stack_name[B_OS_NAME_LENGTH];
389 	status_t status;
390 	bool abort = false;
391 	bool debugNewThread = false;
392 
393 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
394 		attributes.thread, kernel ? "kernel" : "user"));
395 
396 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
397 		NULL);
398 	if (thread == NULL)
399 		return B_NO_MEMORY;
400 
401 	thread->priority = attributes.priority == -1
402 		? B_NORMAL_PRIORITY : attributes.priority;
403 	thread->next_priority = thread->priority;
404 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
405 	thread->state = B_THREAD_SUSPENDED;
406 	thread->next_state = B_THREAD_SUSPENDED;
407 
408 	// init debug structure
409 	init_thread_debug_info(&thread->debug_info);
410 
411 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
412 		thread->id);
413 	thread->kernel_stack_area = create_area(stack_name,
414 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
415 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
416 		B_FULL_LOCK,
417 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
418 
419 	if (thread->kernel_stack_area < 0) {
420 		// we're not yet part of a team, so we can just bail out
421 		status = thread->kernel_stack_area;
422 
423 		dprintf("create_thread: error creating kernel stack: %s!\n",
424 			strerror(status));
425 
426 		delete_thread_struct(thread);
427 		return status;
428 	}
429 
430 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
431 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
432 
433 	state = disable_interrupts();
434 	GRAB_THREAD_LOCK();
435 
436 	// If the new thread belongs to the same team as the current thread,
437 	// it may inherit some of the thread debug flags.
438 	currentThread = thread_get_current_thread();
439 	if (currentThread && currentThread->team->id == attributes.team) {
440 		// inherit all user flags...
441 		int32 debugFlags = currentThread->debug_info.flags
442 			& B_THREAD_DEBUG_USER_FLAG_MASK;
443 
444 		// ... save the syscall tracing flags, unless explicitely specified
445 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
446 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
447 				| B_THREAD_DEBUG_POST_SYSCALL);
448 		}
449 
450 		thread->debug_info.flags = debugFlags;
451 
452 		// stop the new thread, if desired
453 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
454 	}
455 
456 	// insert into global list
457 	hash_insert(sThreadHash, thread);
458 	sUsedThreads++;
459 	scheduler_on_thread_init(thread);
460 	RELEASE_THREAD_LOCK();
461 
462 	GRAB_TEAM_LOCK();
463 	// look at the team, make sure it's not being deleted
464 	team = team_get_team_struct_locked(attributes.team);
465 
466 	if (team == NULL || team->state == TEAM_STATE_DEATH)
467 		abort = true;
468 
469 	if (!abort && !kernel) {
470 		thread->user_thread = team_allocate_user_thread(team);
471 		abort = thread->user_thread == NULL;
472 	}
473 
474 	if (!abort) {
475 		// Debug the new thread, if the parent thread required that (see above),
476 		// or the respective global team debug flag is set. But only, if a
477 		// debugger is installed for the team.
478 		debugNewThread |= (atomic_get(&team->debug_info.flags)
479 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
480 		if (debugNewThread
481 			&& (atomic_get(&team->debug_info.flags)
482 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
483 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
484 		}
485 
486 		insert_thread_into_team(team, thread);
487 	}
488 
489 	RELEASE_TEAM_LOCK();
490 	if (abort) {
491 		GRAB_THREAD_LOCK();
492 		hash_remove(sThreadHash, thread);
493 		RELEASE_THREAD_LOCK();
494 	}
495 	restore_interrupts(state);
496 	if (abort) {
497 		delete_area(thread->kernel_stack_area);
498 		delete_thread_struct(thread);
499 		return B_BAD_TEAM_ID;
500 	}
501 
502 	thread->args1 = attributes.args1;
503 	thread->args2 = attributes.args2;
504 	thread->entry = attributes.entry;
505 	status = thread->id;
506 
507 	if (kernel) {
508 		// this sets up an initial kthread stack that runs the entry
509 
510 		// Note: whatever function wants to set up a user stack later for this
511 		// thread must initialize the TLS for it
512 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
513 			&thread_kthread_entry, &thread_kthread_exit);
514 	} else {
515 		// create user stack
516 
517 		// the stack will be between USER_STACK_REGION and the main thread stack
518 		// area (the user stack of the main thread is created in
519 		// team_create_team())
520 		if (attributes.stack_address == NULL) {
521 			thread->user_stack_base = USER_STACK_REGION;
522 			if (attributes.stack_size <= 0)
523 				thread->user_stack_size = USER_STACK_SIZE;
524 			else
525 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
526 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
527 
528 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
529 				attributes.name, thread->id);
530 			thread->user_stack_area = create_area_etc(team->id, stack_name,
531 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
532 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
533 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
534 			if (thread->user_stack_area < B_OK
535 				|| arch_thread_init_tls(thread) < B_OK) {
536 				// great, we have a fully running thread without a (usable)
537 				// stack
538 				dprintf("create_thread: unable to create proper user stack!\n");
539 				status = thread->user_stack_area;
540 				kill_thread(thread->id);
541 			}
542 		} else {
543 			thread->user_stack_base = (addr_t)attributes.stack_address;
544 			thread->user_stack_size = attributes.stack_size;
545 		}
546 
547 		user_debug_update_new_thread_flags(thread->id);
548 
549 		// copy the user entry over to the args field in the thread struct
550 		// the function this will call will immediately switch the thread into
551 		// user space.
552 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
553 			&thread_kthread_entry, &thread_kthread_exit);
554 	}
555 
556 	return status;
557 }
558 
559 
560 static status_t
561 undertaker(void* /*args*/)
562 {
563 	while (true) {
564 		// wait for a thread to bury
565 		InterruptsSpinLocker locker(gThreadSpinlock);
566 
567 		while (sUndertakerEntries.IsEmpty()) {
568 			ConditionVariableEntry conditionEntry;
569 			sUndertakerCondition.Add(&conditionEntry);
570 			locker.Unlock();
571 
572 			conditionEntry.Wait();
573 
574 			locker.Lock();
575 		}
576 
577 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
578 		locker.Unlock();
579 
580 		UndertakerEntry entry = *_entry;
581 			// we need a copy, since the original entry is on the thread's stack
582 
583 		// we've got an entry
584 		struct thread* thread = entry.thread;
585 
586 		// delete the old kernel stack area
587 		delete_area(thread->kernel_stack_area);
588 
589 		// remove this thread from all of the global lists
590 		disable_interrupts();
591 		GRAB_TEAM_LOCK();
592 
593 		remove_thread_from_team(team_get_kernel_team(), thread);
594 
595 		RELEASE_TEAM_LOCK();
596 		enable_interrupts();
597 			// needed for the debugger notification below
598 
599 		if (entry.deathSem >= 0)
600 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
601 
602 		// free the thread structure
603 		locker.Lock();
604 		thread_enqueue(thread, &dead_q);
605 			// TODO: Use the slab allocator!
606 	}
607 }
608 
609 
610 static sem_id
611 get_thread_wait_sem(struct thread* thread)
612 {
613 	if (thread->state == B_THREAD_WAITING
614 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
615 		return (sem_id)(addr_t)thread->wait.object;
616 	}
617 	return -1;
618 }
619 
620 
621 /*!
622 	Fills the thread_info structure with information from the specified
623 	thread.
624 	The thread lock must be held when called.
625 */
626 static void
627 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
628 {
629 	info->thread = thread->id;
630 	info->team = thread->team->id;
631 
632 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
633 
634 	if (thread->state == B_THREAD_WAITING) {
635 		info->state = B_THREAD_WAITING;
636 
637 		switch (thread->wait.type) {
638 			case THREAD_BLOCK_TYPE_SNOOZE:
639 				info->state = B_THREAD_ASLEEP;
640 				break;
641 
642 			case THREAD_BLOCK_TYPE_SEMAPHORE:
643 			{
644 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
645 				if (sem == thread->msg.read_sem)
646 					info->state = B_THREAD_RECEIVING;
647 				break;
648 			}
649 
650 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
651 			default:
652 				break;
653 		}
654 	} else
655 		info->state = (thread_state)thread->state;
656 
657 	info->priority = thread->priority;
658 	info->user_time = thread->user_time;
659 	info->kernel_time = thread->kernel_time;
660 	info->stack_base = (void *)thread->user_stack_base;
661 	info->stack_end = (void *)(thread->user_stack_base
662 		+ thread->user_stack_size);
663 	info->sem = get_thread_wait_sem(thread);
664 }
665 
666 static status_t
667 send_data_etc(thread_id id, int32 code, const void *buffer,
668 	size_t bufferSize, int32 flags)
669 {
670 	struct thread *target;
671 	sem_id cachedSem;
672 	cpu_status state;
673 	status_t status;
674 	cbuf *data;
675 
676 	state = disable_interrupts();
677 	GRAB_THREAD_LOCK();
678 	target = thread_get_thread_struct_locked(id);
679 	if (!target) {
680 		RELEASE_THREAD_LOCK();
681 		restore_interrupts(state);
682 		return B_BAD_THREAD_ID;
683 	}
684 	cachedSem = target->msg.write_sem;
685 	RELEASE_THREAD_LOCK();
686 	restore_interrupts(state);
687 
688 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
689 		return B_NO_MEMORY;
690 
691 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
692 	if (status == B_INTERRUPTED) {
693 		// We got interrupted by a signal
694 		return status;
695 	}
696 	if (status != B_OK) {
697 		// Any other acquisition problems may be due to thread deletion
698 		return B_BAD_THREAD_ID;
699 	}
700 
701 	if (bufferSize > 0) {
702 		data = cbuf_get_chain(bufferSize);
703 		if (data == NULL)
704 			return B_NO_MEMORY;
705 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
706 		if (status < B_OK) {
707 			cbuf_free_chain(data);
708 			return B_NO_MEMORY;
709 		}
710 	} else
711 		data = NULL;
712 
713 	state = disable_interrupts();
714 	GRAB_THREAD_LOCK();
715 
716 	// The target thread could have been deleted at this point
717 	target = thread_get_thread_struct_locked(id);
718 	if (target == NULL) {
719 		RELEASE_THREAD_LOCK();
720 		restore_interrupts(state);
721 		cbuf_free_chain(data);
722 		return B_BAD_THREAD_ID;
723 	}
724 
725 	// Save message informations
726 	target->msg.sender = thread_get_current_thread()->id;
727 	target->msg.code = code;
728 	target->msg.size = bufferSize;
729 	target->msg.buffer = data;
730 	cachedSem = target->msg.read_sem;
731 
732 	RELEASE_THREAD_LOCK();
733 	restore_interrupts(state);
734 
735 	release_sem(cachedSem);
736 	return B_OK;
737 }
738 
739 
740 static int32
741 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
742 	int32 flags)
743 {
744 	struct thread *thread = thread_get_current_thread();
745 	status_t status;
746 	size_t size;
747 	int32 code;
748 
749 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
750 	if (status < B_OK) {
751 		// Actually, we're not supposed to return error codes
752 		// but since the only reason this can fail is that we
753 		// were killed, it's probably okay to do so (but also
754 		// meaningless).
755 		return status;
756 	}
757 
758 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
759 		size = min_c(bufferSize, thread->msg.size);
760 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
761 			0, size);
762 		if (status < B_OK) {
763 			cbuf_free_chain(thread->msg.buffer);
764 			release_sem(thread->msg.write_sem);
765 			return status;
766 		}
767 	}
768 
769 	*_sender = thread->msg.sender;
770 	code = thread->msg.code;
771 
772 	cbuf_free_chain(thread->msg.buffer);
773 	release_sem(thread->msg.write_sem);
774 
775 	return code;
776 }
777 
778 
779 static status_t
780 common_getrlimit(int resource, struct rlimit * rlp)
781 {
782 	if (!rlp)
783 		return B_BAD_ADDRESS;
784 
785 	switch (resource) {
786 		case RLIMIT_NOFILE:
787 		case RLIMIT_NOVMON:
788 			return vfs_getrlimit(resource, rlp);
789 
790 		case RLIMIT_CORE:
791 			rlp->rlim_cur = 0;
792 			rlp->rlim_max = 0;
793 			return B_OK;
794 
795 		case RLIMIT_STACK:
796 		{
797 			struct thread *thread = thread_get_current_thread();
798 			if (!thread)
799 				return B_ERROR;
800 			rlp->rlim_cur = thread->user_stack_size;
801 			rlp->rlim_max = thread->user_stack_size;
802 			return B_OK;
803 		}
804 
805 		default:
806 			return EINVAL;
807 	}
808 
809 	return B_OK;
810 }
811 
812 
813 static status_t
814 common_setrlimit(int resource, const struct rlimit * rlp)
815 {
816 	if (!rlp)
817 		return B_BAD_ADDRESS;
818 
819 	switch (resource) {
820 		case RLIMIT_NOFILE:
821 		case RLIMIT_NOVMON:
822 			return vfs_setrlimit(resource, rlp);
823 
824 		case RLIMIT_CORE:
825 			// We don't support core file, so allow settings to 0/0 only.
826 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
827 				return EINVAL;
828 			return B_OK;
829 
830 		default:
831 			return EINVAL;
832 	}
833 
834 	return B_OK;
835 }
836 
837 
838 //	#pragma mark - debugger calls
839 
840 
841 static int
842 make_thread_unreal(int argc, char **argv)
843 {
844 	struct thread *thread;
845 	struct hash_iterator i;
846 	int32 id = -1;
847 
848 	if (argc > 2) {
849 		print_debugger_command_usage(argv[0]);
850 		return 0;
851 	}
852 
853 	if (argc > 1)
854 		id = strtoul(argv[1], NULL, 0);
855 
856 	hash_open(sThreadHash, &i);
857 
858 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
859 		if (id != -1 && thread->id != id)
860 			continue;
861 
862 		if (thread->priority > B_DISPLAY_PRIORITY) {
863 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
864 			kprintf("thread %ld made unreal\n", thread->id);
865 		}
866 	}
867 
868 	hash_close(sThreadHash, &i, false);
869 	return 0;
870 }
871 
872 
873 static int
874 set_thread_prio(int argc, char **argv)
875 {
876 	struct thread *thread;
877 	struct hash_iterator i;
878 	int32 id;
879 	int32 prio;
880 
881 	if (argc > 3 || argc < 2) {
882 		print_debugger_command_usage(argv[0]);
883 		return 0;
884 	}
885 
886 	prio = strtoul(argv[1], NULL, 0);
887 	if (prio > THREAD_MAX_SET_PRIORITY)
888 		prio = THREAD_MAX_SET_PRIORITY;
889 	if (prio < THREAD_MIN_SET_PRIORITY)
890 		prio = THREAD_MIN_SET_PRIORITY;
891 
892 	if (argc > 2)
893 		id = strtoul(argv[2], NULL, 0);
894 	else
895 		id = thread_get_current_thread()->id;
896 
897 	hash_open(sThreadHash, &i);
898 
899 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
900 		if (thread->id != id)
901 			continue;
902 		thread->priority = thread->next_priority = prio;
903 		kprintf("thread %ld set to priority %ld\n", id, prio);
904 		break;
905 	}
906 	if (!thread)
907 		kprintf("thread %ld (%#lx) not found\n", id, id);
908 
909 	hash_close(sThreadHash, &i, false);
910 	return 0;
911 }
912 
913 
914 static int
915 make_thread_suspended(int argc, char **argv)
916 {
917 	struct thread *thread;
918 	struct hash_iterator i;
919 	int32 id;
920 
921 	if (argc > 2) {
922 		print_debugger_command_usage(argv[0]);
923 		return 0;
924 	}
925 
926 	if (argc == 1)
927 		id = thread_get_current_thread()->id;
928 	else
929 		id = strtoul(argv[1], NULL, 0);
930 
931 	hash_open(sThreadHash, &i);
932 
933 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
934 		if (thread->id != id)
935 			continue;
936 
937 		thread->next_state = B_THREAD_SUSPENDED;
938 		kprintf("thread %ld suspended\n", id);
939 		break;
940 	}
941 	if (!thread)
942 		kprintf("thread %ld (%#lx) not found\n", id, id);
943 
944 	hash_close(sThreadHash, &i, false);
945 	return 0;
946 }
947 
948 
949 static int
950 make_thread_resumed(int argc, char **argv)
951 {
952 	struct thread *thread;
953 	struct hash_iterator i;
954 	int32 id;
955 
956 	if (argc != 2) {
957 		print_debugger_command_usage(argv[0]);
958 		return 0;
959 	}
960 
961 	// force user to enter a thread id, as using
962 	// the current thread is usually not intended
963 	id = strtoul(argv[1], NULL, 0);
964 
965 	hash_open(sThreadHash, &i);
966 
967 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
968 		if (thread->id != id)
969 			continue;
970 
971 		if (thread->state == B_THREAD_SUSPENDED) {
972 			scheduler_enqueue_in_run_queue(thread);
973 			kprintf("thread %ld resumed\n", thread->id);
974 		}
975 		break;
976 	}
977 	if (!thread)
978 		kprintf("thread %ld (%#lx) not found\n", id, id);
979 
980 	hash_close(sThreadHash, &i, false);
981 	return 0;
982 }
983 
984 
985 static int
986 drop_into_debugger(int argc, char **argv)
987 {
988 	status_t err;
989 	int32 id;
990 
991 	if (argc > 2) {
992 		print_debugger_command_usage(argv[0]);
993 		return 0;
994 	}
995 
996 	if (argc == 1)
997 		id = thread_get_current_thread()->id;
998 	else
999 		id = strtoul(argv[1], NULL, 0);
1000 
1001 	err = _user_debug_thread(id);
1002 	if (err)
1003 		kprintf("drop failed\n");
1004 	else
1005 		kprintf("thread %ld dropped into user debugger\n", id);
1006 
1007 	return 0;
1008 }
1009 
1010 
1011 static const char *
1012 state_to_text(struct thread *thread, int32 state)
1013 {
1014 	switch (state) {
1015 		case B_THREAD_READY:
1016 			return "ready";
1017 
1018 		case B_THREAD_RUNNING:
1019 			return "running";
1020 
1021 		case B_THREAD_WAITING:
1022 		{
1023 			if (thread != NULL) {
1024 				switch (thread->wait.type) {
1025 					case THREAD_BLOCK_TYPE_SNOOZE:
1026 						return "zzz";
1027 
1028 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1029 					{
1030 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1031 						if (sem == thread->msg.read_sem)
1032 							return "receive";
1033 						break;
1034 					}
1035 				}
1036 			}
1037 
1038 			return "waiting";
1039 		}
1040 
1041 		case B_THREAD_SUSPENDED:
1042 			return "suspended";
1043 
1044 		case THREAD_STATE_FREE_ON_RESCHED:
1045 			return "death";
1046 
1047 		default:
1048 			return "UNKNOWN";
1049 	}
1050 }
1051 
1052 
1053 static void
1054 print_thread_list_table_head()
1055 {
1056 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1057 		"  team  name\n");
1058 }
1059 
1060 
1061 static void
1062 _dump_thread_info(struct thread *thread, bool shortInfo)
1063 {
1064 	if (shortInfo) {
1065 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1066 			thread->state));
1067 
1068 		// does it block on a semaphore or a condition variable?
1069 		if (thread->state == B_THREAD_WAITING) {
1070 			switch (thread->wait.type) {
1071 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1072 				{
1073 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1074 					if (sem == thread->msg.read_sem)
1075 						kprintf("                    ");
1076 					else
1077 						kprintf("sem  %12ld   ", sem);
1078 					break;
1079 				}
1080 
1081 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1082 					kprintf("cvar   %p   ", thread->wait.object);
1083 					break;
1084 
1085 				case THREAD_BLOCK_TYPE_SNOOZE:
1086 					kprintf("                    ");
1087 					break;
1088 
1089 				case THREAD_BLOCK_TYPE_SIGNAL:
1090 					kprintf("signal              ");
1091 					break;
1092 
1093 				case THREAD_BLOCK_TYPE_MUTEX:
1094 					kprintf("mutex  %p   ", thread->wait.object);
1095 					break;
1096 
1097 				case THREAD_BLOCK_TYPE_RW_LOCK:
1098 					kprintf("rwlock %p   ", thread->wait.object);
1099 					break;
1100 
1101 				case THREAD_BLOCK_TYPE_OTHER:
1102 					kprintf("other               ");
1103 					break;
1104 
1105 				default:
1106 					kprintf("???    %p   ", thread->wait.object);
1107 					break;
1108 			}
1109 		} else
1110 			kprintf("        -           ");
1111 
1112 		// on which CPU does it run?
1113 		if (thread->cpu)
1114 			kprintf("%2d", thread->cpu->cpu_num);
1115 		else
1116 			kprintf(" -");
1117 
1118 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1119 			(void *)thread->kernel_stack_base, thread->team->id,
1120 			thread->name != NULL ? thread->name : "<NULL>");
1121 
1122 		return;
1123 	}
1124 
1125 	// print the long info
1126 
1127 	struct death_entry *death = NULL;
1128 
1129 	kprintf("THREAD: %p\n", thread);
1130 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1131 	kprintf("name:               \"%s\"\n", thread->name);
1132 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1133 		thread->all_next, thread->team_next, thread->queue_next);
1134 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1135 		thread->next_priority, thread->io_priority);
1136 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1137 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1138 	kprintf("cpu:                %p ", thread->cpu);
1139 	if (thread->cpu)
1140 		kprintf("(%d)\n", thread->cpu->cpu_num);
1141 	else
1142 		kprintf("\n");
1143 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1144 		thread->sig_block_mask);
1145 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1146 
1147 	if (thread->state == B_THREAD_WAITING) {
1148 		kprintf("waiting for:        ");
1149 
1150 		switch (thread->wait.type) {
1151 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1152 			{
1153 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1154 				if (sem == thread->msg.read_sem)
1155 					kprintf("data\n");
1156 				else
1157 					kprintf("semaphore %ld\n", sem);
1158 				break;
1159 			}
1160 
1161 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1162 				kprintf("condition variable %p\n", thread->wait.object);
1163 				break;
1164 
1165 			case THREAD_BLOCK_TYPE_SNOOZE:
1166 				kprintf("snooze()\n");
1167 				break;
1168 
1169 			case THREAD_BLOCK_TYPE_SIGNAL:
1170 				kprintf("signal\n");
1171 				break;
1172 
1173 			case THREAD_BLOCK_TYPE_MUTEX:
1174 				kprintf("mutex %p\n", thread->wait.object);
1175 				break;
1176 
1177 			case THREAD_BLOCK_TYPE_RW_LOCK:
1178 				kprintf("rwlock %p\n", thread->wait.object);
1179 				break;
1180 
1181 			case THREAD_BLOCK_TYPE_OTHER:
1182 				kprintf("other (%s)\n", (char*)thread->wait.object);
1183 				break;
1184 
1185 			default:
1186 				kprintf("unknown (%p)\n", thread->wait.object);
1187 				break;
1188 		}
1189 	}
1190 
1191 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1192 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1193 	kprintf("entry:              %p\n", (void *)thread->entry);
1194 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1195 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1196 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1197 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1198 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1199 	kprintf("  exit.waiters:\n");
1200 	while ((death = (struct death_entry*)list_get_next_item(
1201 			&thread->exit.waiters, death)) != NULL) {
1202 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1203 	}
1204 
1205 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1206 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1207 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1208 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1209 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1210 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1211 		strerror(thread->kernel_errno));
1212 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1213 	kprintf("user_time:          %Ld\n", thread->user_time);
1214 	kprintf("flags:              0x%lx\n", thread->flags);
1215 	kprintf("architecture dependant section:\n");
1216 	arch_thread_dump_info(&thread->arch_info);
1217 }
1218 
1219 
1220 static int
1221 dump_thread_info(int argc, char **argv)
1222 {
1223 	bool shortInfo = false;
1224 	int argi = 1;
1225 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1226 		shortInfo = true;
1227 		print_thread_list_table_head();
1228 		argi++;
1229 	}
1230 
1231 	if (argi == argc) {
1232 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1233 		return 0;
1234 	}
1235 
1236 	for (; argi < argc; argi++) {
1237 		const char *name = argv[argi];
1238 		int32 id = strtoul(name, NULL, 0);
1239 
1240 		if (IS_KERNEL_ADDRESS(id)) {
1241 			// semi-hack
1242 			_dump_thread_info((struct thread *)id, shortInfo);
1243 			continue;
1244 		}
1245 
1246 		// walk through the thread list, trying to match name or id
1247 		bool found = false;
1248 		struct hash_iterator i;
1249 		hash_open(sThreadHash, &i);
1250 		struct thread *thread;
1251 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1252 			if (!strcmp(name, thread->name) || thread->id == id) {
1253 				_dump_thread_info(thread, shortInfo);
1254 				found = true;
1255 				break;
1256 			}
1257 		}
1258 		hash_close(sThreadHash, &i, false);
1259 
1260 		if (!found)
1261 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 
1268 static int
1269 dump_thread_list(int argc, char **argv)
1270 {
1271 	struct thread *thread;
1272 	struct hash_iterator i;
1273 	bool realTimeOnly = false;
1274 	bool calling = false;
1275 	const char *callSymbol = NULL;
1276 	addr_t callStart = 0;
1277 	addr_t callEnd = 0;
1278 	int32 requiredState = 0;
1279 	team_id team = -1;
1280 	sem_id sem = -1;
1281 
1282 	if (!strcmp(argv[0], "realtime"))
1283 		realTimeOnly = true;
1284 	else if (!strcmp(argv[0], "ready"))
1285 		requiredState = B_THREAD_READY;
1286 	else if (!strcmp(argv[0], "running"))
1287 		requiredState = B_THREAD_RUNNING;
1288 	else if (!strcmp(argv[0], "waiting")) {
1289 		requiredState = B_THREAD_WAITING;
1290 
1291 		if (argc > 1) {
1292 			sem = strtoul(argv[1], NULL, 0);
1293 			if (sem == 0)
1294 				kprintf("ignoring invalid semaphore argument.\n");
1295 		}
1296 	} else if (!strcmp(argv[0], "calling")) {
1297 		if (argc < 2) {
1298 			kprintf("Need to give a symbol name or start and end arguments.\n");
1299 			return 0;
1300 		} else if (argc == 3) {
1301 			callStart = parse_expression(argv[1]);
1302 			callEnd = parse_expression(argv[2]);
1303 		} else
1304 			callSymbol = argv[1];
1305 
1306 		calling = true;
1307 	} else if (argc > 1) {
1308 		team = strtoul(argv[1], NULL, 0);
1309 		if (team == 0)
1310 			kprintf("ignoring invalid team argument.\n");
1311 	}
1312 
1313 	print_thread_list_table_head();
1314 
1315 	hash_open(sThreadHash, &i);
1316 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1317 		// filter out threads not matching the search criteria
1318 		if ((requiredState && thread->state != requiredState)
1319 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1320 					callStart, callEnd))
1321 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1322 			|| (team > 0 && thread->team->id != team)
1323 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1324 			continue;
1325 
1326 		_dump_thread_info(thread, true);
1327 	}
1328 	hash_close(sThreadHash, &i, false);
1329 	return 0;
1330 }
1331 
1332 
1333 //	#pragma mark - private kernel API
1334 
1335 
1336 void
1337 thread_exit(void)
1338 {
1339 	cpu_status state;
1340 	struct thread *thread = thread_get_current_thread();
1341 	struct team *team = thread->team;
1342 	thread_id parentID = -1;
1343 	bool deleteTeam = false;
1344 	sem_id cachedDeathSem = -1;
1345 	status_t status;
1346 	struct thread_debug_info debugInfo;
1347 	team_id teamID = team->id;
1348 
1349 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1350 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1351 			? "due to signal" : "normally", thread->exit.status));
1352 
1353 	if (!are_interrupts_enabled())
1354 		panic("thread_exit() called with interrupts disabled!\n");
1355 
1356 	// boost our priority to get this over with
1357 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1358 
1359 	// Cancel previously installed alarm timer, if any
1360 	cancel_timer(&thread->alarm);
1361 
1362 	// delete the user stack area first, we won't need it anymore
1363 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1364 		area_id area = thread->user_stack_area;
1365 		thread->user_stack_area = -1;
1366 		vm_delete_area(team->id, area, true);
1367 	}
1368 
1369 	struct job_control_entry *death = NULL;
1370 	struct death_entry* threadDeathEntry = NULL;
1371 
1372 	if (team != team_get_kernel_team()) {
1373 		user_debug_thread_exiting(thread);
1374 
1375 		if (team->main_thread == thread) {
1376 			// this was the main thread in this team, so we will delete that as well
1377 			deleteTeam = true;
1378 		} else {
1379 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1380 			team_free_user_thread(thread);
1381 		}
1382 
1383 		// remove this thread from the current team and add it to the kernel
1384 		// put the thread into the kernel team until it dies
1385 		state = disable_interrupts();
1386 		GRAB_TEAM_LOCK();
1387 		GRAB_THREAD_LOCK();
1388 			// removing the thread and putting its death entry to the parent
1389 			// team needs to be an atomic operation
1390 
1391 		// remember how long this thread lasted
1392 		team->dead_threads_kernel_time += thread->kernel_time;
1393 		team->dead_threads_user_time += thread->user_time;
1394 
1395 		remove_thread_from_team(team, thread);
1396 		insert_thread_into_team(team_get_kernel_team(), thread);
1397 
1398 		cachedDeathSem = team->death_sem;
1399 
1400 		if (deleteTeam) {
1401 			struct team *parent = team->parent;
1402 
1403 			// remember who our parent was so we can send a signal
1404 			parentID = parent->id;
1405 
1406 			// Set the team job control state to "dead" and detach the job
1407 			// control entry from our team struct.
1408 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1409 			death = team->job_control_entry;
1410 			team->job_control_entry = NULL;
1411 
1412 			if (death != NULL) {
1413 				death->InitDeadState();
1414 
1415 				// team_set_job_control_state() already moved our entry
1416 				// into the parent's list. We just check the soft limit of
1417 				// death entries.
1418 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1419 					death = parent->dead_children->entries.RemoveHead();
1420 					parent->dead_children->count--;
1421 				} else
1422 					death = NULL;
1423 
1424 				RELEASE_THREAD_LOCK();
1425 			} else
1426 				RELEASE_THREAD_LOCK();
1427 
1428 			team_remove_team(team);
1429 
1430 			send_signal_etc(parentID, SIGCHLD,
1431 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1432 		} else {
1433 			// The thread is not the main thread. We store a thread death
1434 			// entry for it, unless someone is already waiting it.
1435 			if (threadDeathEntry != NULL
1436 				&& list_is_empty(&thread->exit.waiters)) {
1437 				threadDeathEntry->thread = thread->id;
1438 				threadDeathEntry->status = thread->exit.status;
1439 				threadDeathEntry->reason = thread->exit.reason;
1440 				threadDeathEntry->signal = thread->exit.signal;
1441 
1442 				// add entry -- remove and old one, if we hit the limit
1443 				list_add_item(&team->dead_threads, threadDeathEntry);
1444 				team->dead_threads_count++;
1445 				threadDeathEntry = NULL;
1446 
1447 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1448 					threadDeathEntry = (death_entry*)list_remove_head_item(
1449 						&team->dead_threads);
1450 					team->dead_threads_count--;
1451 				}
1452 			}
1453 
1454 			RELEASE_THREAD_LOCK();
1455 		}
1456 
1457 		RELEASE_TEAM_LOCK();
1458 
1459 		// swap address spaces, to make sure we're running on the kernel's pgdir
1460 		vm_swap_address_space(team->address_space, vm_kernel_address_space());
1461 		restore_interrupts(state);
1462 
1463 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1464 	}
1465 
1466 	if (threadDeathEntry != NULL)
1467 		free(threadDeathEntry);
1468 
1469 	// delete the team if we're its main thread
1470 	if (deleteTeam) {
1471 		team_delete_team(team);
1472 
1473 		// we need to delete any death entry that made it to here
1474 		if (death != NULL)
1475 			delete death;
1476 
1477 		cachedDeathSem = -1;
1478 	}
1479 
1480 	state = disable_interrupts();
1481 	GRAB_THREAD_LOCK();
1482 
1483 	// remove thread from hash, so it's no longer accessible
1484 	hash_remove(sThreadHash, thread);
1485 	sUsedThreads--;
1486 
1487 	// Stop debugging for this thread
1488 	debugInfo = thread->debug_info;
1489 	clear_thread_debug_info(&thread->debug_info, true);
1490 
1491 	// Remove the select infos. We notify them a little later.
1492 	select_info* selectInfos = thread->select_infos;
1493 	thread->select_infos = NULL;
1494 
1495 	RELEASE_THREAD_LOCK();
1496 	restore_interrupts(state);
1497 
1498 	destroy_thread_debug_info(&debugInfo);
1499 
1500 	// notify select infos
1501 	select_info* info = selectInfos;
1502 	while (info != NULL) {
1503 		select_sync* sync = info->sync;
1504 
1505 		notify_select_events(info, B_EVENT_INVALID);
1506 		info = info->next;
1507 		put_select_sync(sync);
1508 	}
1509 
1510 	// shutdown the thread messaging
1511 
1512 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1513 	if (status == B_WOULD_BLOCK) {
1514 		// there is data waiting for us, so let us eat it
1515 		thread_id sender;
1516 
1517 		delete_sem(thread->msg.write_sem);
1518 			// first, let's remove all possibly waiting writers
1519 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1520 	} else {
1521 		// we probably own the semaphore here, and we're the last to do so
1522 		delete_sem(thread->msg.write_sem);
1523 	}
1524 	// now we can safely remove the msg.read_sem
1525 	delete_sem(thread->msg.read_sem);
1526 
1527 	// fill all death entries and delete the sem that others will use to wait on us
1528 	{
1529 		sem_id cachedExitSem = thread->exit.sem;
1530 		cpu_status state;
1531 
1532 		state = disable_interrupts();
1533 		GRAB_THREAD_LOCK();
1534 
1535 		// make sure no one will grab this semaphore again
1536 		thread->exit.sem = -1;
1537 
1538 		// fill all death entries
1539 		death_entry* entry = NULL;
1540 		while ((entry = (struct death_entry*)list_get_next_item(
1541 				&thread->exit.waiters, entry)) != NULL) {
1542 			entry->status = thread->exit.status;
1543 			entry->reason = thread->exit.reason;
1544 			entry->signal = thread->exit.signal;
1545 		}
1546 
1547 		RELEASE_THREAD_LOCK();
1548 		restore_interrupts(state);
1549 
1550 		delete_sem(cachedExitSem);
1551 	}
1552 
1553 	// notify the debugger
1554 	if (teamID != team_get_kernel_team_id())
1555 		user_debug_thread_deleted(teamID, thread->id);
1556 
1557 	// enqueue in the undertaker list and reschedule for the last time
1558 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1559 
1560 	disable_interrupts();
1561 	GRAB_THREAD_LOCK();
1562 
1563 	sUndertakerEntries.Add(&undertakerEntry);
1564 	sUndertakerCondition.NotifyOne(true);
1565 
1566 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1567 	scheduler_reschedule();
1568 
1569 	panic("never can get here\n");
1570 }
1571 
1572 
1573 struct thread *
1574 thread_get_thread_struct(thread_id id)
1575 {
1576 	struct thread *thread;
1577 	cpu_status state;
1578 
1579 	state = disable_interrupts();
1580 	GRAB_THREAD_LOCK();
1581 
1582 	thread = thread_get_thread_struct_locked(id);
1583 
1584 	RELEASE_THREAD_LOCK();
1585 	restore_interrupts(state);
1586 
1587 	return thread;
1588 }
1589 
1590 
1591 struct thread *
1592 thread_get_thread_struct_locked(thread_id id)
1593 {
1594 	struct thread_key key;
1595 
1596 	key.id = id;
1597 
1598 	return (struct thread*)hash_lookup(sThreadHash, &key);
1599 }
1600 
1601 
1602 /*!
1603 	Called in the interrupt handler code when a thread enters
1604 	the kernel for any reason.
1605 	Only tracks time for now.
1606 	Interrupts are disabled.
1607 */
1608 void
1609 thread_at_kernel_entry(bigtime_t now)
1610 {
1611 	struct thread *thread = thread_get_current_thread();
1612 
1613 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1614 
1615 	// track user time
1616 	thread->user_time += now - thread->last_time;
1617 	thread->last_time = now;
1618 
1619 	thread->in_kernel = true;
1620 }
1621 
1622 
1623 /*!
1624 	Called whenever a thread exits kernel space to user space.
1625 	Tracks time, handles signals, ...
1626 	Interrupts must be enabled. When the function returns, interrupts will be
1627 	disabled.
1628 */
1629 void
1630 thread_at_kernel_exit(void)
1631 {
1632 	struct thread *thread = thread_get_current_thread();
1633 
1634 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1635 
1636 	while (handle_signals(thread)) {
1637 		InterruptsSpinLocker _(gThreadSpinlock);
1638 		scheduler_reschedule();
1639 	}
1640 
1641 	disable_interrupts();
1642 
1643 	thread->in_kernel = false;
1644 
1645 	// track kernel time
1646 	bigtime_t now = system_time();
1647 	thread->kernel_time += now - thread->last_time;
1648 	thread->last_time = now;
1649 }
1650 
1651 
1652 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1653 	and no debugging shall be done.
1654 	Interrupts must be disabled.
1655 */
1656 void
1657 thread_at_kernel_exit_no_signals(void)
1658 {
1659 	struct thread *thread = thread_get_current_thread();
1660 
1661 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1662 
1663 	thread->in_kernel = false;
1664 
1665 	// track kernel time
1666 	bigtime_t now = system_time();
1667 	thread->kernel_time += now - thread->last_time;
1668 	thread->last_time = now;
1669 }
1670 
1671 
1672 void
1673 thread_reset_for_exec(void)
1674 {
1675 	struct thread *thread = thread_get_current_thread();
1676 
1677 	cancel_timer(&thread->alarm);
1678 	reset_signals(thread);
1679 }
1680 
1681 
1682 /*! Insert a thread to the tail of a queue */
1683 void
1684 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1685 {
1686 	thread->queue_next = NULL;
1687 	if (queue->head == NULL) {
1688 		queue->head = thread;
1689 		queue->tail = thread;
1690 	} else {
1691 		queue->tail->queue_next = thread;
1692 		queue->tail = thread;
1693 	}
1694 }
1695 
1696 
1697 struct thread *
1698 thread_lookat_queue(struct thread_queue *queue)
1699 {
1700 	return queue->head;
1701 }
1702 
1703 
1704 struct thread *
1705 thread_dequeue(struct thread_queue *queue)
1706 {
1707 	struct thread *thread = queue->head;
1708 
1709 	if (thread != NULL) {
1710 		queue->head = thread->queue_next;
1711 		if (queue->tail == thread)
1712 			queue->tail = NULL;
1713 	}
1714 	return thread;
1715 }
1716 
1717 
1718 struct thread *
1719 thread_dequeue_id(struct thread_queue *q, thread_id id)
1720 {
1721 	struct thread *thread;
1722 	struct thread *last = NULL;
1723 
1724 	thread = q->head;
1725 	while (thread != NULL) {
1726 		if (thread->id == id) {
1727 			if (last == NULL)
1728 				q->head = thread->queue_next;
1729 			else
1730 				last->queue_next = thread->queue_next;
1731 
1732 			if (q->tail == thread)
1733 				q->tail = last;
1734 			break;
1735 		}
1736 		last = thread;
1737 		thread = thread->queue_next;
1738 	}
1739 	return thread;
1740 }
1741 
1742 
1743 thread_id
1744 allocate_thread_id(void)
1745 {
1746 	return atomic_add(&sNextThreadID, 1);
1747 }
1748 
1749 
1750 thread_id
1751 peek_next_thread_id(void)
1752 {
1753 	return atomic_get(&sNextThreadID);
1754 }
1755 
1756 
1757 /*!	Yield the CPU to other threads.
1758 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1759 	If \c false, it will continue to run, if there's no other thread in ready
1760 	state, and if it has a higher priority than the other ready threads, it
1761 	still has a good chance to continue.
1762 */
1763 void
1764 thread_yield(bool force)
1765 {
1766 	if (force) {
1767 		// snooze for roughly 3 thread quantums
1768 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1769 #if 0
1770 		cpu_status state;
1771 
1772 		struct thread *thread = thread_get_current_thread();
1773 		if (thread == NULL)
1774 			return;
1775 
1776 		state = disable_interrupts();
1777 		GRAB_THREAD_LOCK();
1778 
1779 		// mark the thread as yielded, so it will not be scheduled next
1780 		//thread->was_yielded = true;
1781 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1782 		scheduler_reschedule();
1783 
1784 		RELEASE_THREAD_LOCK();
1785 		restore_interrupts(state);
1786 #endif
1787 	} else {
1788 		struct thread *thread = thread_get_current_thread();
1789 		if (thread == NULL)
1790 			return;
1791 
1792 		// Don't force the thread off the CPU, just reschedule.
1793 		InterruptsSpinLocker _(gThreadSpinlock);
1794 		scheduler_reschedule();
1795 	}
1796 }
1797 
1798 
1799 /*!
1800 	Kernel private thread creation function.
1801 
1802 	\param threadID The ID to be assigned to the new thread. If
1803 		  \code < 0 \endcode a fresh one is allocated.
1804 */
1805 thread_id
1806 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1807 	void *arg, team_id team, thread_id threadID)
1808 {
1809 	thread_creation_attributes attributes;
1810 	attributes.entry = (thread_entry_func)function;
1811 	attributes.name = name;
1812 	attributes.priority = priority;
1813 	attributes.args1 = arg;
1814 	attributes.args2 = NULL;
1815 	attributes.stack_address = NULL;
1816 	attributes.stack_size = 0;
1817 	attributes.team = team;
1818 	attributes.thread = threadID;
1819 
1820 	return create_thread(attributes, true);
1821 }
1822 
1823 
1824 status_t
1825 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1826 	status_t *_returnCode)
1827 {
1828 	sem_id exitSem = B_BAD_THREAD_ID;
1829 	struct death_entry death;
1830 	job_control_entry* freeDeath = NULL;
1831 	struct thread *thread;
1832 	cpu_status state;
1833 	status_t status = B_OK;
1834 
1835 	if (id < B_OK)
1836 		return B_BAD_THREAD_ID;
1837 
1838 	// we need to resume the thread we're waiting for first
1839 
1840 	state = disable_interrupts();
1841 	GRAB_THREAD_LOCK();
1842 
1843 	thread = thread_get_thread_struct_locked(id);
1844 	if (thread != NULL) {
1845 		// remember the semaphore we have to wait on and place our death entry
1846 		exitSem = thread->exit.sem;
1847 		list_add_link_to_head(&thread->exit.waiters, &death);
1848 	}
1849 
1850 	death_entry* threadDeathEntry = NULL;
1851 
1852 	RELEASE_THREAD_LOCK();
1853 
1854 	if (thread == NULL) {
1855 		// we couldn't find this thread - maybe it's already gone, and we'll
1856 		// find its death entry in our team
1857 		GRAB_TEAM_LOCK();
1858 
1859 		struct team* team = thread_get_current_thread()->team;
1860 
1861 		// check the child death entries first (i.e. main threads of child
1862 		// teams)
1863 		bool deleteEntry;
1864 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1865 		if (freeDeath != NULL) {
1866 			death.status = freeDeath->status;
1867 			if (!deleteEntry)
1868 				freeDeath = NULL;
1869 		} else {
1870 			// check the thread death entries of the team (non-main threads)
1871 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1872 					&team->dead_threads, threadDeathEntry)) != NULL) {
1873 				if (threadDeathEntry->thread == id) {
1874 					list_remove_item(&team->dead_threads, threadDeathEntry);
1875 					team->dead_threads_count--;
1876 					death.status = threadDeathEntry->status;
1877 					break;
1878 				}
1879 			}
1880 
1881 			if (threadDeathEntry == NULL)
1882 				status = B_BAD_THREAD_ID;
1883 		}
1884 
1885 		RELEASE_TEAM_LOCK();
1886 	}
1887 
1888 	restore_interrupts(state);
1889 
1890 	if (thread == NULL && status == B_OK) {
1891 		// we found the thread's death entry in our team
1892 		if (_returnCode)
1893 			*_returnCode = death.status;
1894 
1895 		delete freeDeath;
1896 		free(threadDeathEntry);
1897 		return B_OK;
1898 	}
1899 
1900 	// we need to wait for the death of the thread
1901 
1902 	if (exitSem < B_OK)
1903 		return B_BAD_THREAD_ID;
1904 
1905 	resume_thread(id);
1906 		// make sure we don't wait forever on a suspended thread
1907 
1908 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1909 
1910 	if (status == B_OK) {
1911 		// this should never happen as the thread deletes the semaphore on exit
1912 		panic("could acquire exit_sem for thread %ld\n", id);
1913 	} else if (status == B_BAD_SEM_ID) {
1914 		// this is the way the thread normally exits
1915 		status = B_OK;
1916 
1917 		if (_returnCode)
1918 			*_returnCode = death.status;
1919 	} else {
1920 		// We were probably interrupted; we need to remove our death entry now.
1921 		state = disable_interrupts();
1922 		GRAB_THREAD_LOCK();
1923 
1924 		thread = thread_get_thread_struct_locked(id);
1925 		if (thread != NULL)
1926 			list_remove_link(&death);
1927 
1928 		RELEASE_THREAD_LOCK();
1929 		restore_interrupts(state);
1930 
1931 		// If the thread is already gone, we need to wait for its exit semaphore
1932 		// to make sure our death entry stays valid - it won't take long
1933 		if (thread == NULL)
1934 			acquire_sem(exitSem);
1935 	}
1936 
1937 	return status;
1938 }
1939 
1940 
1941 status_t
1942 select_thread(int32 id, struct select_info* info, bool kernel)
1943 {
1944 	InterruptsSpinLocker locker(gThreadSpinlock);
1945 
1946 	// get thread
1947 	struct thread* thread = thread_get_thread_struct_locked(id);
1948 	if (thread == NULL)
1949 		return B_BAD_THREAD_ID;
1950 
1951 	// We support only B_EVENT_INVALID at the moment.
1952 	info->selected_events &= B_EVENT_INVALID;
1953 
1954 	// add info to list
1955 	if (info->selected_events != 0) {
1956 		info->next = thread->select_infos;
1957 		thread->select_infos = info;
1958 
1959 		// we need a sync reference
1960 		atomic_add(&info->sync->ref_count, 1);
1961 	}
1962 
1963 	return B_OK;
1964 }
1965 
1966 
1967 status_t
1968 deselect_thread(int32 id, struct select_info* info, bool kernel)
1969 {
1970 	InterruptsSpinLocker locker(gThreadSpinlock);
1971 
1972 	// get thread
1973 	struct thread* thread = thread_get_thread_struct_locked(id);
1974 	if (thread == NULL)
1975 		return B_BAD_THREAD_ID;
1976 
1977 	// remove info from list
1978 	select_info** infoLocation = &thread->select_infos;
1979 	while (*infoLocation != NULL && *infoLocation != info)
1980 		infoLocation = &(*infoLocation)->next;
1981 
1982 	if (*infoLocation != info)
1983 		return B_OK;
1984 
1985 	*infoLocation = info->next;
1986 
1987 	locker.Unlock();
1988 
1989 	// surrender sync reference
1990 	put_select_sync(info->sync);
1991 
1992 	return B_OK;
1993 }
1994 
1995 
1996 int32
1997 thread_max_threads(void)
1998 {
1999 	return sMaxThreads;
2000 }
2001 
2002 
2003 int32
2004 thread_used_threads(void)
2005 {
2006 	return sUsedThreads;
2007 }
2008 
2009 
2010 const char*
2011 thread_state_to_text(struct thread* thread, int32 state)
2012 {
2013 	return state_to_text(thread, state);
2014 }
2015 
2016 
2017 int32
2018 thread_get_io_priority(thread_id id)
2019 {
2020 	// take a shortcut, if it is the current thread
2021 	struct thread* thread = thread_get_current_thread();
2022 	int32 priority;
2023 	if (id == thread->id) {
2024 		int32 priority = thread->io_priority;
2025 		return priority < 0 ? thread->priority : priority;
2026 	}
2027 
2028 	// not the current thread -- get it
2029 	InterruptsSpinLocker locker(gThreadSpinlock);
2030 
2031 	thread = thread_get_thread_struct_locked(id);
2032 	if (thread == NULL)
2033 		return B_BAD_THREAD_ID;
2034 
2035 	priority = thread->io_priority;
2036 	return priority < 0 ? thread->priority : priority;
2037 }
2038 
2039 
2040 void
2041 thread_set_io_priority(int32 priority)
2042 {
2043 	struct thread* thread = thread_get_current_thread();
2044 	thread->io_priority = priority;
2045 }
2046 
2047 
2048 status_t
2049 thread_init(kernel_args *args)
2050 {
2051 	uint32 i;
2052 
2053 	TRACE(("thread_init: entry\n"));
2054 
2055 	// create the thread hash table
2056 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2057 		&thread_struct_compare, &thread_struct_hash);
2058 
2059 	// zero out the dead thread structure q
2060 	memset(&dead_q, 0, sizeof(dead_q));
2061 
2062 	if (arch_thread_init(args) < B_OK)
2063 		panic("arch_thread_init() failed!\n");
2064 
2065 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2066 	sNextThreadID = B_SYSTEM_TEAM + 1;
2067 
2068 	// create an idle thread for each cpu
2069 
2070 	for (i = 0; i < args->num_cpus; i++) {
2071 		struct thread *thread;
2072 		area_info info;
2073 		char name[64];
2074 
2075 		sprintf(name, "idle thread %lu", i + 1);
2076 		thread = create_thread_struct(&sIdleThreads[i], name,
2077 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2078 		if (thread == NULL) {
2079 			panic("error creating idle thread struct\n");
2080 			return B_NO_MEMORY;
2081 		}
2082 
2083 		thread->team = team_get_kernel_team();
2084 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2085 		thread->state = B_THREAD_RUNNING;
2086 		thread->next_state = B_THREAD_READY;
2087 		sprintf(name, "idle thread %lu kstack", i + 1);
2088 		thread->kernel_stack_area = find_area(name);
2089 		thread->entry = NULL;
2090 
2091 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2092 			panic("error finding idle kstack area\n");
2093 
2094 		thread->kernel_stack_base = (addr_t)info.address;
2095 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2096 
2097 		hash_insert(sThreadHash, thread);
2098 		insert_thread_into_team(thread->team, thread);
2099 	}
2100 	sUsedThreads = args->num_cpus;
2101 
2102 	// start the undertaker thread
2103 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2104 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2105 
2106 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2107 		B_DISPLAY_PRIORITY, NULL);
2108 	if (undertakerThread < 0)
2109 		panic("Failed to create undertaker thread!");
2110 	resume_thread(undertakerThread);
2111 
2112 	// set up some debugger commands
2113 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2114 		"[ <team> ]\n"
2115 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2116 		"all threads of the specified team.\n"
2117 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2118 	add_debugger_command_etc("ready", &dump_thread_list,
2119 		"List all ready threads",
2120 		"\n"
2121 		"Prints a list of all threads in ready state.\n", 0);
2122 	add_debugger_command_etc("running", &dump_thread_list,
2123 		"List all running threads",
2124 		"\n"
2125 		"Prints a list of all threads in running state.\n", 0);
2126 	add_debugger_command_etc("waiting", &dump_thread_list,
2127 		"List all waiting threads (optionally for a specific semaphore)",
2128 		"[ <sem> ]\n"
2129 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2130 		"specified, only the threads waiting on that semaphore are listed.\n"
2131 		"  <sem>  - ID of the semaphore.\n", 0);
2132 	add_debugger_command_etc("realtime", &dump_thread_list,
2133 		"List all realtime threads",
2134 		"\n"
2135 		"Prints a list of all threads with realtime priority.\n", 0);
2136 	add_debugger_command_etc("thread", &dump_thread_info,
2137 		"Dump info about a particular thread",
2138 		"[ -s ] ( <id> | <address> | <name> )*\n"
2139 		"Prints information about the specified thread. If no argument is\n"
2140 		"given the current thread is selected.\n"
2141 		"  -s         - Print info in compact table form (like \"threads\").\n"
2142 		"  <id>       - The ID of the thread.\n"
2143 		"  <address>  - The address of the thread structure.\n"
2144 		"  <name>     - The thread's name.\n", 0);
2145 	add_debugger_command_etc("calling", &dump_thread_list,
2146 		"Show all threads that have a specific address in their call chain",
2147 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2148 	add_debugger_command_etc("unreal", &make_thread_unreal,
2149 		"Set realtime priority threads to normal priority",
2150 		"[ <id> ]\n"
2151 		"Sets the priority of all realtime threads or, if given, the one\n"
2152 		"with the specified ID to \"normal\" priority.\n"
2153 		"  <id>  - The ID of the thread.\n", 0);
2154 	add_debugger_command_etc("suspend", &make_thread_suspended,
2155 		"Suspend a thread",
2156 		"[ <id> ]\n"
2157 		"Suspends the thread with the given ID. If no ID argument is given\n"
2158 		"the current thread is selected.\n"
2159 		"  <id>  - The ID of the thread.\n", 0);
2160 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2161 		"<id>\n"
2162 		"Resumes the specified thread, if it is currently suspended.\n"
2163 		"  <id>  - The ID of the thread.\n", 0);
2164 	add_debugger_command_etc("drop", &drop_into_debugger,
2165 		"Drop a thread into the userland debugger",
2166 		"<id>\n"
2167 		"Drops the specified (userland) thread into the userland debugger\n"
2168 		"after leaving the kernel debugger.\n"
2169 		"  <id>  - The ID of the thread.\n", 0);
2170 	add_debugger_command_etc("priority", &set_thread_prio,
2171 		"Set a thread's priority",
2172 		"<priority> [ <id> ]\n"
2173 		"Sets the priority of the thread with the specified ID to the given\n"
2174 		"priority. If no thread ID is given, the current thread is selected.\n"
2175 		"  <priority>  - The thread's new priority (0 - 120)\n"
2176 		"  <id>        - The ID of the thread.\n", 0);
2177 
2178 	return B_OK;
2179 }
2180 
2181 
2182 status_t
2183 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2184 {
2185 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2186 	// so that get_current_cpu and friends will work, which is crucial for
2187 	// a lot of low level routines
2188 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2189 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2190 	return B_OK;
2191 }
2192 
2193 
2194 //	#pragma mark - thread blocking API
2195 
2196 
2197 static status_t
2198 thread_block_timeout(timer* timer)
2199 {
2200 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2201 	// we're holding the thread lock already. This makes things comfortably
2202 	// easy.
2203 
2204 	struct thread* thread = (struct thread*)timer->user_data;
2205 	if (thread_unblock_locked(thread, B_TIMED_OUT)) {
2206 		// We actually woke up the thread. If it has a higher priority than the
2207 		// currently running thread, we invoke the scheduler.
2208 		// TODO: Is this really such a good idea or should we do that only when
2209 		// the woken up thread has realtime priority?
2210 		if (thread->priority > thread_get_current_thread()->priority)
2211 			return B_INVOKE_SCHEDULER;
2212 	}
2213 
2214 	return B_HANDLED_INTERRUPT;
2215 }
2216 
2217 
2218 status_t
2219 thread_block()
2220 {
2221 	InterruptsSpinLocker _(gThreadSpinlock);
2222 	return thread_block_locked(thread_get_current_thread());
2223 }
2224 
2225 
2226 bool
2227 thread_unblock(status_t threadID, status_t status)
2228 {
2229 	InterruptsSpinLocker _(gThreadSpinlock);
2230 
2231 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2232 	if (thread == NULL)
2233 		return false;
2234 	return thread_unblock_locked(thread, status);
2235 }
2236 
2237 
2238 status_t
2239 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2240 {
2241 	InterruptsSpinLocker _(gThreadSpinlock);
2242 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2243 }
2244 
2245 
2246 status_t
2247 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2248 {
2249 	struct thread* thread = thread_get_current_thread();
2250 
2251 	if (thread->wait.status != 1)
2252 		return thread->wait.status;
2253 
2254 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2255 		&& timeout != B_INFINITE_TIMEOUT;
2256 
2257 	if (useTimer) {
2258 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2259 		// avoids nasty race conditions and deadlock problems that could
2260 		// otherwise occur between our cancel_timer() and a concurrently
2261 		// executing thread_block_timeout().
2262 		uint32 timerFlags;
2263 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2264 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2265 		} else {
2266 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2267 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2268 				timeout -= rtc_boot_time();
2269 		}
2270 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2271 
2272 		// install the timer
2273 		thread->wait.unblock_timer.user_data = thread;
2274 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2275 			timerFlags);
2276 	}
2277 
2278 	// block
2279 	status_t error = thread_block_locked(thread);
2280 
2281 	// cancel timer, if it didn't fire
2282 	if (error != B_TIMED_OUT && useTimer)
2283 		cancel_timer(&thread->wait.unblock_timer);
2284 
2285 	return error;
2286 }
2287 
2288 
2289 /*!	Thread spinlock must be held.
2290 */
2291 static status_t
2292 user_unblock_thread(thread_id threadID, status_t status)
2293 {
2294 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2295 	if (thread == NULL)
2296 		return B_BAD_THREAD_ID;
2297 	if (thread->user_thread == NULL)
2298 		return B_NOT_ALLOWED;
2299 
2300 	if (thread->user_thread->wait_status > 0) {
2301 		thread->user_thread->wait_status = status;
2302 		thread_unblock_locked(thread, status);
2303 	}
2304 
2305 	return B_OK;
2306 }
2307 
2308 
2309 //	#pragma mark - public kernel API
2310 
2311 
2312 void
2313 exit_thread(status_t returnValue)
2314 {
2315 	struct thread *thread = thread_get_current_thread();
2316 
2317 	thread->exit.status = returnValue;
2318 	thread->exit.reason = THREAD_RETURN_EXIT;
2319 
2320 	// if called from a kernel thread, we don't deliver the signal,
2321 	// we just exit directly to keep the user space behaviour of
2322 	// this function
2323 	if (thread->team != team_get_kernel_team())
2324 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2325 	else
2326 		thread_exit();
2327 }
2328 
2329 
2330 status_t
2331 kill_thread(thread_id id)
2332 {
2333 	if (id <= 0)
2334 		return B_BAD_VALUE;
2335 
2336 	return send_signal(id, SIGKILLTHR);
2337 }
2338 
2339 
2340 status_t
2341 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2342 {
2343 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2344 }
2345 
2346 
2347 int32
2348 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2349 {
2350 	return receive_data_etc(sender, buffer, bufferSize, 0);
2351 }
2352 
2353 
2354 bool
2355 has_data(thread_id thread)
2356 {
2357 	int32 count;
2358 
2359 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2360 			&count) != B_OK)
2361 		return false;
2362 
2363 	return count == 0 ? false : true;
2364 }
2365 
2366 
2367 status_t
2368 _get_thread_info(thread_id id, thread_info *info, size_t size)
2369 {
2370 	status_t status = B_OK;
2371 	struct thread *thread;
2372 	cpu_status state;
2373 
2374 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2375 		return B_BAD_VALUE;
2376 
2377 	state = disable_interrupts();
2378 	GRAB_THREAD_LOCK();
2379 
2380 	thread = thread_get_thread_struct_locked(id);
2381 	if (thread == NULL) {
2382 		status = B_BAD_VALUE;
2383 		goto err;
2384 	}
2385 
2386 	fill_thread_info(thread, info, size);
2387 
2388 err:
2389 	RELEASE_THREAD_LOCK();
2390 	restore_interrupts(state);
2391 
2392 	return status;
2393 }
2394 
2395 
2396 status_t
2397 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2398 	size_t size)
2399 {
2400 	status_t status = B_BAD_VALUE;
2401 	struct thread *thread = NULL;
2402 	cpu_status state;
2403 	int slot;
2404 	thread_id lastThreadID;
2405 
2406 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2407 		return B_BAD_VALUE;
2408 
2409 	if (team == B_CURRENT_TEAM)
2410 		team = team_get_current_team_id();
2411 	else if (!team_is_valid(team))
2412 		return B_BAD_VALUE;
2413 
2414 	slot = *_cookie;
2415 
2416 	state = disable_interrupts();
2417 	GRAB_THREAD_LOCK();
2418 
2419 	lastThreadID = peek_next_thread_id();
2420 	if (slot >= lastThreadID)
2421 		goto err;
2422 
2423 	while (slot < lastThreadID
2424 		&& (!(thread = thread_get_thread_struct_locked(slot))
2425 			|| thread->team->id != team))
2426 		slot++;
2427 
2428 	if (thread != NULL && thread->team->id == team) {
2429 		fill_thread_info(thread, info, size);
2430 
2431 		*_cookie = slot + 1;
2432 		status = B_OK;
2433 	}
2434 
2435 err:
2436 	RELEASE_THREAD_LOCK();
2437 	restore_interrupts(state);
2438 
2439 	return status;
2440 }
2441 
2442 
2443 thread_id
2444 find_thread(const char *name)
2445 {
2446 	struct hash_iterator iterator;
2447 	struct thread *thread;
2448 	cpu_status state;
2449 
2450 	if (name == NULL)
2451 		return thread_get_current_thread_id();
2452 
2453 	state = disable_interrupts();
2454 	GRAB_THREAD_LOCK();
2455 
2456 	// ToDo: this might not be in the same order as find_thread() in BeOS
2457 	//		which could be theoretically problematic.
2458 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2459 	//		cheap either - although this function is probably used very rarely.
2460 
2461 	hash_open(sThreadHash, &iterator);
2462 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2463 			!= NULL) {
2464 		// Search through hash
2465 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2466 			thread_id id = thread->id;
2467 
2468 			RELEASE_THREAD_LOCK();
2469 			restore_interrupts(state);
2470 			return id;
2471 		}
2472 	}
2473 
2474 	RELEASE_THREAD_LOCK();
2475 	restore_interrupts(state);
2476 
2477 	return B_NAME_NOT_FOUND;
2478 }
2479 
2480 
2481 status_t
2482 rename_thread(thread_id id, const char *name)
2483 {
2484 	struct thread *thread = thread_get_current_thread();
2485 	status_t status = B_BAD_THREAD_ID;
2486 	cpu_status state;
2487 
2488 	if (name == NULL)
2489 		return B_BAD_VALUE;
2490 
2491 	state = disable_interrupts();
2492 	GRAB_THREAD_LOCK();
2493 
2494 	if (thread->id != id)
2495 		thread = thread_get_thread_struct_locked(id);
2496 
2497 	if (thread != NULL) {
2498 		if (thread->team == thread_get_current_thread()->team) {
2499 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2500 			status = B_OK;
2501 		} else
2502 			status = B_NOT_ALLOWED;
2503 	}
2504 
2505 	RELEASE_THREAD_LOCK();
2506 	restore_interrupts(state);
2507 
2508 	return status;
2509 }
2510 
2511 
2512 status_t
2513 set_thread_priority(thread_id id, int32 priority)
2514 {
2515 	struct thread *thread;
2516 	int32 oldPriority;
2517 
2518 	// make sure the passed in priority is within bounds
2519 	if (priority > THREAD_MAX_SET_PRIORITY)
2520 		priority = THREAD_MAX_SET_PRIORITY;
2521 	if (priority < THREAD_MIN_SET_PRIORITY)
2522 		priority = THREAD_MIN_SET_PRIORITY;
2523 
2524 	thread = thread_get_current_thread();
2525 	if (thread->id == id) {
2526 		if (thread_is_idle_thread(thread))
2527 			return B_NOT_ALLOWED;
2528 
2529 		// It's ourself, so we know we aren't in the run queue, and we can
2530 		// manipulate our structure directly
2531 		oldPriority = thread->priority;
2532 			// Note that this might not return the correct value if we are
2533 			// preempted here, and another thread changes our priority before
2534 			// the next line is executed.
2535 		thread->priority = thread->next_priority = priority;
2536 	} else {
2537 		InterruptsSpinLocker _(gThreadSpinlock);
2538 
2539 		thread = thread_get_thread_struct_locked(id);
2540 		if (thread == NULL)
2541 			return B_BAD_THREAD_ID;
2542 
2543 		if (thread_is_idle_thread(thread))
2544 			return B_NOT_ALLOWED;
2545 
2546 		oldPriority = thread->priority;
2547 		scheduler_set_thread_priority(thread, priority);
2548 	}
2549 
2550 	return oldPriority;
2551 }
2552 
2553 
2554 status_t
2555 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2556 {
2557 	status_t status;
2558 
2559 	if (timebase != B_SYSTEM_TIMEBASE)
2560 		return B_BAD_VALUE;
2561 
2562 	InterruptsSpinLocker _(gThreadSpinlock);
2563 	struct thread* thread = thread_get_current_thread();
2564 
2565 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2566 	status = thread_block_with_timeout_locked(flags, timeout);
2567 
2568 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2569 		return B_OK;
2570 
2571 	return status;
2572 }
2573 
2574 
2575 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2576 status_t
2577 snooze(bigtime_t timeout)
2578 {
2579 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2580 }
2581 
2582 
2583 /*!
2584 	snooze_until() for internal kernel use only; doesn't interrupt on
2585 	signals.
2586 */
2587 status_t
2588 snooze_until(bigtime_t timeout, int timebase)
2589 {
2590 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2591 }
2592 
2593 
2594 status_t
2595 wait_for_thread(thread_id thread, status_t *_returnCode)
2596 {
2597 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2598 }
2599 
2600 
2601 status_t
2602 suspend_thread(thread_id id)
2603 {
2604 	if (id <= 0)
2605 		return B_BAD_VALUE;
2606 
2607 	return send_signal(id, SIGSTOP);
2608 }
2609 
2610 
2611 status_t
2612 resume_thread(thread_id id)
2613 {
2614 	if (id <= 0)
2615 		return B_BAD_VALUE;
2616 
2617 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2618 		// This retains compatibility to BeOS which documents the
2619 		// combination of suspend_thread() and resume_thread() to
2620 		// interrupt threads waiting on semaphores.
2621 }
2622 
2623 
2624 thread_id
2625 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2626 	void *arg)
2627 {
2628 	thread_creation_attributes attributes;
2629 	attributes.entry = (thread_entry_func)function;
2630 	attributes.name = name;
2631 	attributes.priority = priority;
2632 	attributes.args1 = arg;
2633 	attributes.args2 = NULL;
2634 	attributes.stack_address = NULL;
2635 	attributes.stack_size = 0;
2636 	attributes.team = team_get_kernel_team()->id;
2637 	attributes.thread = -1;
2638 
2639 	return create_thread(attributes, true);
2640 }
2641 
2642 
2643 int
2644 getrlimit(int resource, struct rlimit * rlp)
2645 {
2646 	status_t error = common_getrlimit(resource, rlp);
2647 	if (error != B_OK) {
2648 		errno = error;
2649 		return -1;
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 
2656 int
2657 setrlimit(int resource, const struct rlimit * rlp)
2658 {
2659 	status_t error = common_setrlimit(resource, rlp);
2660 	if (error != B_OK) {
2661 		errno = error;
2662 		return -1;
2663 	}
2664 
2665 	return 0;
2666 }
2667 
2668 
2669 //	#pragma mark - syscalls
2670 
2671 
2672 void
2673 _user_exit_thread(status_t returnValue)
2674 {
2675 	exit_thread(returnValue);
2676 }
2677 
2678 
2679 status_t
2680 _user_kill_thread(thread_id thread)
2681 {
2682 	return kill_thread(thread);
2683 }
2684 
2685 
2686 status_t
2687 _user_resume_thread(thread_id thread)
2688 {
2689 	return resume_thread(thread);
2690 }
2691 
2692 
2693 status_t
2694 _user_suspend_thread(thread_id thread)
2695 {
2696 	return suspend_thread(thread);
2697 }
2698 
2699 
2700 status_t
2701 _user_rename_thread(thread_id thread, const char *userName)
2702 {
2703 	char name[B_OS_NAME_LENGTH];
2704 
2705 	if (!IS_USER_ADDRESS(userName)
2706 		|| userName == NULL
2707 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2708 		return B_BAD_ADDRESS;
2709 
2710 	return rename_thread(thread, name);
2711 }
2712 
2713 
2714 int32
2715 _user_set_thread_priority(thread_id thread, int32 newPriority)
2716 {
2717 	return set_thread_priority(thread, newPriority);
2718 }
2719 
2720 
2721 thread_id
2722 _user_spawn_thread(thread_creation_attributes* userAttributes)
2723 {
2724 	thread_creation_attributes attributes;
2725 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2726 		|| user_memcpy(&attributes, userAttributes,
2727 				sizeof(attributes)) != B_OK) {
2728 		return B_BAD_ADDRESS;
2729 	}
2730 
2731 	if (attributes.stack_size != 0
2732 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2733 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2734 		return B_BAD_VALUE;
2735 	}
2736 
2737 	char name[B_OS_NAME_LENGTH];
2738 	thread_id threadID;
2739 
2740 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2741 		|| (attributes.stack_address != NULL
2742 			&& !IS_USER_ADDRESS(attributes.stack_address))
2743 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2744 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2745 		return B_BAD_ADDRESS;
2746 
2747 	attributes.name = attributes.name != NULL ? name : "user thread";
2748 	attributes.team = thread_get_current_thread()->team->id;
2749 	attributes.thread = -1;
2750 
2751 	threadID = create_thread(attributes, false);
2752 
2753 	if (threadID >= 0)
2754 		user_debug_thread_created(threadID);
2755 
2756 	return threadID;
2757 }
2758 
2759 
2760 status_t
2761 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2762 {
2763 	// NOTE: We only know the system timebase at the moment.
2764 	syscall_restart_handle_timeout_pre(flags, timeout);
2765 
2766 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2767 
2768 	return syscall_restart_handle_timeout_post(error, timeout);
2769 }
2770 
2771 
2772 void
2773 _user_thread_yield(void)
2774 {
2775 	thread_yield(true);
2776 }
2777 
2778 
2779 status_t
2780 _user_get_thread_info(thread_id id, thread_info *userInfo)
2781 {
2782 	thread_info info;
2783 	status_t status;
2784 
2785 	if (!IS_USER_ADDRESS(userInfo))
2786 		return B_BAD_ADDRESS;
2787 
2788 	status = _get_thread_info(id, &info, sizeof(thread_info));
2789 
2790 	if (status >= B_OK
2791 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2792 		return B_BAD_ADDRESS;
2793 
2794 	return status;
2795 }
2796 
2797 
2798 status_t
2799 _user_get_next_thread_info(team_id team, int32 *userCookie,
2800 	thread_info *userInfo)
2801 {
2802 	status_t status;
2803 	thread_info info;
2804 	int32 cookie;
2805 
2806 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2807 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2808 		return B_BAD_ADDRESS;
2809 
2810 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2811 	if (status < B_OK)
2812 		return status;
2813 
2814 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2815 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2816 		return B_BAD_ADDRESS;
2817 
2818 	return status;
2819 }
2820 
2821 
2822 thread_id
2823 _user_find_thread(const char *userName)
2824 {
2825 	char name[B_OS_NAME_LENGTH];
2826 
2827 	if (userName == NULL)
2828 		return find_thread(NULL);
2829 
2830 	if (!IS_USER_ADDRESS(userName)
2831 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2832 		return B_BAD_ADDRESS;
2833 
2834 	return find_thread(name);
2835 }
2836 
2837 
2838 status_t
2839 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2840 {
2841 	status_t returnCode;
2842 	status_t status;
2843 
2844 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2845 		return B_BAD_ADDRESS;
2846 
2847 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2848 
2849 	if (status == B_OK && userReturnCode != NULL
2850 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2851 		return B_BAD_ADDRESS;
2852 	}
2853 
2854 	return syscall_restart_handle_post(status);
2855 }
2856 
2857 
2858 bool
2859 _user_has_data(thread_id thread)
2860 {
2861 	return has_data(thread);
2862 }
2863 
2864 
2865 status_t
2866 _user_send_data(thread_id thread, int32 code, const void *buffer,
2867 	size_t bufferSize)
2868 {
2869 	if (!IS_USER_ADDRESS(buffer))
2870 		return B_BAD_ADDRESS;
2871 
2872 	return send_data_etc(thread, code, buffer, bufferSize,
2873 		B_KILL_CAN_INTERRUPT);
2874 		// supports userland buffers
2875 }
2876 
2877 
2878 status_t
2879 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2880 {
2881 	thread_id sender;
2882 	status_t code;
2883 
2884 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2885 		|| !IS_USER_ADDRESS(buffer))
2886 		return B_BAD_ADDRESS;
2887 
2888 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2889 		// supports userland buffers
2890 
2891 	if (_userSender != NULL)
2892 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2893 			return B_BAD_ADDRESS;
2894 
2895 	return code;
2896 }
2897 
2898 
2899 status_t
2900 _user_block_thread(uint32 flags, bigtime_t timeout)
2901 {
2902 	syscall_restart_handle_timeout_pre(flags, timeout);
2903 	flags |= B_CAN_INTERRUPT;
2904 
2905 	struct thread* thread = thread_get_current_thread();
2906 
2907 	InterruptsSpinLocker locker(gThreadSpinlock);
2908 
2909 	// check, if already done
2910 	if (thread->user_thread->wait_status <= 0)
2911 		return thread->user_thread->wait_status;
2912 
2913 	// nope, so wait
2914 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2915 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2916 	thread->user_thread->wait_status = status;
2917 
2918 	return syscall_restart_handle_timeout_post(status, timeout);
2919 }
2920 
2921 
2922 status_t
2923 _user_unblock_thread(thread_id threadID, status_t status)
2924 {
2925 	InterruptsSpinLocker locker(gThreadSpinlock);
2926 	return user_unblock_thread(threadID, status);
2927 }
2928 
2929 
2930 status_t
2931 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2932 {
2933 	enum {
2934 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2935 	};
2936 
2937 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2938 		return B_BAD_ADDRESS;
2939 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2940 		return B_BAD_VALUE;
2941 
2942 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2943 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2944 		return B_BAD_ADDRESS;
2945 
2946 	for (uint32 i = 0; i < count; i++)
2947 		user_unblock_thread(threads[i], status);
2948 
2949 	return B_OK;
2950 }
2951 
2952 
2953 // TODO: the following two functions don't belong here
2954 
2955 
2956 int
2957 _user_getrlimit(int resource, struct rlimit *urlp)
2958 {
2959 	struct rlimit rl;
2960 	int ret;
2961 
2962 	if (urlp == NULL)
2963 		return EINVAL;
2964 
2965 	if (!IS_USER_ADDRESS(urlp))
2966 		return B_BAD_ADDRESS;
2967 
2968 	ret = common_getrlimit(resource, &rl);
2969 
2970 	if (ret == 0) {
2971 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2972 		if (ret < 0)
2973 			return ret;
2974 
2975 		return 0;
2976 	}
2977 
2978 	return ret;
2979 }
2980 
2981 
2982 int
2983 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2984 {
2985 	struct rlimit resourceLimit;
2986 
2987 	if (userResourceLimit == NULL)
2988 		return EINVAL;
2989 
2990 	if (!IS_USER_ADDRESS(userResourceLimit)
2991 		|| user_memcpy(&resourceLimit, userResourceLimit,
2992 			sizeof(struct rlimit)) < B_OK)
2993 		return B_BAD_ADDRESS;
2994 
2995 	return common_setrlimit(resource, &resourceLimit);
2996 }
2997