xref: /haiku/src/system/kernel/thread.cpp (revision e0ef64750f3169cd634bb2f7a001e22488b05231)
1 /*
2  * Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <malloc.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/resource.h>
22 
23 #include <OS.h>
24 
25 #include <util/AutoLock.h>
26 #include <util/khash.h>
27 
28 #include <arch/debug.h>
29 #include <boot/kernel_args.h>
30 #include <condition_variable.h>
31 #include <cpu.h>
32 #include <int.h>
33 #include <kimage.h>
34 #include <kscheduler.h>
35 #include <ksignal.h>
36 #include <Notifications.h>
37 #include <real_time_clock.h>
38 #include <slab/Slab.h>
39 #include <smp.h>
40 #include <syscalls.h>
41 #include <syscall_restart.h>
42 #include <team.h>
43 #include <tls.h>
44 #include <user_runtime.h>
45 #include <user_thread.h>
46 #include <vfs.h>
47 #include <vm/vm.h>
48 #include <vm/VMAddressSpace.h>
49 #include <wait_for_objects.h>
50 
51 
52 //#define TRACE_THREAD
53 #ifdef TRACE_THREAD
54 #	define TRACE(x) dprintf x
55 #else
56 #	define TRACE(x) ;
57 #endif
58 
59 
60 #define THREAD_MAX_MESSAGE_SIZE		65536
61 
62 
63 struct thread_key {
64 	thread_id id;
65 };
66 
67 // global
68 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
69 
70 // thread list
71 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
72 static hash_table *sThreadHash = NULL;
73 static thread_id sNextThreadID = 1;
74 
75 // some arbitrary chosen limits - should probably depend on the available
76 // memory (the limit is not yet enforced)
77 static int32 sMaxThreads = 4096;
78 static int32 sUsedThreads = 0;
79 
80 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
81 	struct thread*	thread;
82 	team_id			teamID;
83 
84 	UndertakerEntry(struct thread* thread, team_id teamID)
85 		:
86 		thread(thread),
87 		teamID(teamID)
88 	{
89 	}
90 };
91 
92 
93 class ThreadNotificationService : public DefaultNotificationService {
94 public:
95 	ThreadNotificationService()
96 		: DefaultNotificationService("threads")
97 	{
98 	}
99 
100 	void Notify(uint32 eventCode, struct thread* thread)
101 	{
102 		char eventBuffer[128];
103 		KMessage event;
104 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
105 		event.AddInt32("event", eventCode);
106 		event.AddInt32("thread", thread->id);
107 		event.AddPointer("threadStruct", thread);
108 
109 		DefaultNotificationService::Notify(event, eventCode);
110 	}
111 };
112 
113 
114 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
115 static ConditionVariable sUndertakerCondition;
116 static ThreadNotificationService sNotificationService;
117 
118 
119 // object cache to allocate thread structures from
120 static object_cache* sThreadCache;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!	Inserts a thread into a team.
127 	You must hold the team lock when you call this function.
128 */
129 static void
130 insert_thread_into_team(struct team *team, struct thread *thread)
131 {
132 	thread->team_next = team->thread_list;
133 	team->thread_list = thread;
134 	team->num_threads++;
135 
136 	if (team->num_threads == 1) {
137 		// this was the first thread
138 		team->main_thread = thread;
139 	}
140 	thread->team = team;
141 }
142 
143 
144 /*!	Removes a thread from a team.
145 	You must hold the team lock when you call this function.
146 */
147 static void
148 remove_thread_from_team(struct team *team, struct thread *thread)
149 {
150 	struct thread *temp, *last = NULL;
151 
152 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
153 		if (temp == thread) {
154 			if (last == NULL)
155 				team->thread_list = temp->team_next;
156 			else
157 				last->team_next = temp->team_next;
158 
159 			team->num_threads--;
160 			break;
161 		}
162 		last = temp;
163 	}
164 }
165 
166 
167 static int
168 thread_struct_compare(void *_t, const void *_key)
169 {
170 	struct thread *thread = (struct thread*)_t;
171 	const struct thread_key *key = (const struct thread_key*)_key;
172 
173 	if (thread->id == key->id)
174 		return 0;
175 
176 	return 1;
177 }
178 
179 
180 static uint32
181 thread_struct_hash(void *_t, const void *_key, uint32 range)
182 {
183 	struct thread *thread = (struct thread*)_t;
184 	const struct thread_key *key = (const struct thread_key*)_key;
185 
186 	if (thread != NULL)
187 		return thread->id % range;
188 
189 	return (uint32)key->id % range;
190 }
191 
192 
193 static void
194 reset_signals(struct thread *thread)
195 {
196 	thread->sig_pending = 0;
197 	thread->sig_block_mask = 0;
198 	thread->sig_temp_enabled = 0;
199 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
200 	thread->signal_stack_base = 0;
201 	thread->signal_stack_size = 0;
202 	thread->signal_stack_enabled = false;
203 }
204 
205 
206 /*!	Allocates and fills in thread structure.
207 
208 	\param threadID The ID to be assigned to the new thread. If
209 		  \code < 0 \endcode a fresh one is allocated.
210 	\param thread initialize this thread struct if nonnull
211 */
212 static struct thread *
213 create_thread_struct(struct thread *inthread, const char *name,
214 	thread_id threadID, struct cpu_ent *cpu)
215 {
216 	struct thread *thread;
217 	char temp[64];
218 
219 	if (inthread == NULL) {
220 		thread = (struct thread*)object_cache_alloc(sThreadCache, 0);
221 		if (thread == NULL)
222 			return NULL;
223 
224 		scheduler_on_thread_create(thread);
225 			// TODO: We could use the object cache object
226 			// constructor/destructor!
227 	} else
228 		thread = inthread;
229 
230 	if (name != NULL)
231 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
232 	else
233 		strcpy(thread->name, "unnamed thread");
234 
235 	thread->flags = 0;
236 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
237 	thread->team = NULL;
238 	thread->cpu = cpu;
239 	thread->previous_cpu = NULL;
240 	thread->pinned_to_cpu = 0;
241 	thread->fault_handler = 0;
242 	thread->page_faults_allowed = 1;
243 	thread->kernel_stack_area = -1;
244 	thread->kernel_stack_base = 0;
245 	thread->user_stack_area = -1;
246 	thread->user_stack_base = 0;
247 	thread->user_local_storage = 0;
248 	thread->kernel_errno = 0;
249 	thread->team_next = NULL;
250 	thread->queue_next = NULL;
251 	thread->priority = thread->next_priority = -1;
252 	thread->io_priority = -1;
253 	thread->args1 = NULL;  thread->args2 = NULL;
254 	thread->alarm.period = 0;
255 	reset_signals(thread);
256 	thread->in_kernel = true;
257 	thread->was_yielded = false;
258 	thread->user_time = 0;
259 	thread->kernel_time = 0;
260 	thread->last_time = 0;
261 	thread->exit.status = 0;
262 	thread->exit.reason = 0;
263 	thread->exit.signal = 0;
264 	list_init(&thread->exit.waiters);
265 	thread->select_infos = NULL;
266 	thread->post_interrupt_callback = NULL;
267 	thread->post_interrupt_data = NULL;
268 	thread->user_thread = NULL;
269 
270 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
271 	thread->exit.sem = create_sem(0, temp);
272 	if (thread->exit.sem < B_OK)
273 		goto err1;
274 
275 	sprintf(temp, "%s send", thread->name);
276 	thread->msg.write_sem = create_sem(1, temp);
277 	if (thread->msg.write_sem < B_OK)
278 		goto err2;
279 
280 	sprintf(temp, "%s receive", thread->name);
281 	thread->msg.read_sem = create_sem(0, temp);
282 	if (thread->msg.read_sem < B_OK)
283 		goto err3;
284 
285 	if (arch_thread_init_thread_struct(thread) < B_OK)
286 		goto err4;
287 
288 	return thread;
289 
290 err4:
291 	delete_sem(thread->msg.read_sem);
292 err3:
293 	delete_sem(thread->msg.write_sem);
294 err2:
295 	delete_sem(thread->exit.sem);
296 err1:
297 	// ToDo: put them in the dead queue instead?
298 	if (inthread == NULL) {
299 		scheduler_on_thread_destroy(thread);
300 		object_cache_free(sThreadCache, thread, 0);
301 	}
302 
303 	return NULL;
304 }
305 
306 
307 static void
308 delete_thread_struct(struct thread *thread)
309 {
310 	delete_sem(thread->exit.sem);
311 	delete_sem(thread->msg.write_sem);
312 	delete_sem(thread->msg.read_sem);
313 
314 	scheduler_on_thread_destroy(thread);
315 	object_cache_free(sThreadCache, thread, 0);
316 }
317 
318 
319 /*! This function gets run by a new thread before anything else */
320 static void
321 thread_kthread_entry(void)
322 {
323 	struct thread *thread = thread_get_current_thread();
324 
325 	// The thread is new and has been scheduled the first time. Notify the user
326 	// debugger code.
327 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
328 		user_debug_thread_scheduled(thread);
329 
330 	// simulates the thread spinlock release that would occur if the thread had been
331 	// rescheded from. The resched didn't happen because the thread is new.
332 	RELEASE_THREAD_LOCK();
333 
334 	// start tracking time
335 	thread->last_time = system_time();
336 
337 	enable_interrupts(); // this essentially simulates a return-from-interrupt
338 }
339 
340 
341 static void
342 thread_kthread_exit(void)
343 {
344 	struct thread *thread = thread_get_current_thread();
345 
346 	thread->exit.reason = THREAD_RETURN_EXIT;
347 	thread_exit();
348 }
349 
350 
351 /*!	Initializes the thread and jumps to its userspace entry point.
352 	This function is called at creation time of every user thread,
353 	but not for a team's main thread.
354 */
355 static int
356 _create_user_thread_kentry(void)
357 {
358 	struct thread *thread = thread_get_current_thread();
359 
360 	// jump to the entry point in user space
361 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
362 		thread->args1, thread->args2);
363 
364 	// only get here if the above call fails
365 	return 0;
366 }
367 
368 
369 /*! Initializes the thread and calls it kernel space entry point. */
370 static int
371 _create_kernel_thread_kentry(void)
372 {
373 	struct thread *thread = thread_get_current_thread();
374 	int (*func)(void *args) = (int (*)(void *))thread->entry;
375 
376 	// call the entry function with the appropriate args
377 	return func(thread->args1);
378 }
379 
380 
381 /*!	Creates a new thread in the team with the specified team ID.
382 
383 	\param threadID The ID to be assigned to the new thread. If
384 		  \code < 0 \endcode a fresh one is allocated.
385 */
386 static thread_id
387 create_thread(thread_creation_attributes& attributes, bool kernel)
388 {
389 	struct thread *thread, *currentThread;
390 	struct team *team;
391 	cpu_status state;
392 	char stack_name[B_OS_NAME_LENGTH];
393 	status_t status;
394 	bool abort = false;
395 	bool debugNewThread = false;
396 
397 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
398 		attributes.thread, kernel ? "kernel" : "user"));
399 
400 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
401 		NULL);
402 	if (thread == NULL)
403 		return B_NO_MEMORY;
404 
405 	thread->priority = attributes.priority == -1
406 		? B_NORMAL_PRIORITY : attributes.priority;
407 	thread->next_priority = thread->priority;
408 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
409 	thread->state = B_THREAD_SUSPENDED;
410 	thread->next_state = B_THREAD_SUSPENDED;
411 
412 	// init debug structure
413 	init_thread_debug_info(&thread->debug_info);
414 
415 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
416 		thread->id);
417 	thread->kernel_stack_area = create_area(stack_name,
418 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
419 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
420 		B_FULL_LOCK,
421 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
422 
423 	if (thread->kernel_stack_area < 0) {
424 		// we're not yet part of a team, so we can just bail out
425 		status = thread->kernel_stack_area;
426 
427 		dprintf("create_thread: error creating kernel stack: %s!\n",
428 			strerror(status));
429 
430 		delete_thread_struct(thread);
431 		return status;
432 	}
433 
434 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
435 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
436 
437 	state = disable_interrupts();
438 	GRAB_THREAD_LOCK();
439 
440 	// If the new thread belongs to the same team as the current thread,
441 	// it may inherit some of the thread debug flags.
442 	currentThread = thread_get_current_thread();
443 	if (currentThread && currentThread->team->id == attributes.team) {
444 		// inherit all user flags...
445 		int32 debugFlags = currentThread->debug_info.flags
446 			& B_THREAD_DEBUG_USER_FLAG_MASK;
447 
448 		// ... save the syscall tracing flags, unless explicitely specified
449 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
450 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
451 				| B_THREAD_DEBUG_POST_SYSCALL);
452 		}
453 
454 		thread->debug_info.flags = debugFlags;
455 
456 		// stop the new thread, if desired
457 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
458 	}
459 
460 	// insert into global list
461 	hash_insert(sThreadHash, thread);
462 	sUsedThreads++;
463 	scheduler_on_thread_init(thread);
464 	RELEASE_THREAD_LOCK();
465 
466 	GRAB_TEAM_LOCK();
467 	// look at the team, make sure it's not being deleted
468 	team = team_get_team_struct_locked(attributes.team);
469 
470 	if (team == NULL || team->state == TEAM_STATE_DEATH
471 		|| team->death_entry != NULL) {
472 		abort = true;
473 	}
474 
475 	if (!abort && !kernel) {
476 		thread->user_thread = team_allocate_user_thread(team);
477 		abort = thread->user_thread == NULL;
478 	}
479 
480 	if (!abort) {
481 		// Debug the new thread, if the parent thread required that (see above),
482 		// or the respective global team debug flag is set. But only, if a
483 		// debugger is installed for the team.
484 		debugNewThread |= (atomic_get(&team->debug_info.flags)
485 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
486 		if (debugNewThread
487 			&& (atomic_get(&team->debug_info.flags)
488 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
489 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
490 		}
491 
492 		insert_thread_into_team(team, thread);
493 	}
494 
495 	RELEASE_TEAM_LOCK();
496 	if (abort) {
497 		GRAB_THREAD_LOCK();
498 		hash_remove(sThreadHash, thread);
499 		RELEASE_THREAD_LOCK();
500 	}
501 	restore_interrupts(state);
502 	if (abort) {
503 		delete_area(thread->kernel_stack_area);
504 		delete_thread_struct(thread);
505 		return B_BAD_TEAM_ID;
506 	}
507 
508 	thread->args1 = attributes.args1;
509 	thread->args2 = attributes.args2;
510 	thread->entry = attributes.entry;
511 	status = thread->id;
512 
513 	// notify listeners
514 	sNotificationService.Notify(THREAD_ADDED, thread);
515 
516 	if (kernel) {
517 		// this sets up an initial kthread stack that runs the entry
518 
519 		// Note: whatever function wants to set up a user stack later for this
520 		// thread must initialize the TLS for it
521 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
522 			&thread_kthread_entry, &thread_kthread_exit);
523 	} else {
524 		// create user stack
525 
526 		// the stack will be between USER_STACK_REGION and the main thread stack
527 		// area (the user stack of the main thread is created in
528 		// team_create_team())
529 		if (attributes.stack_address == NULL) {
530 			thread->user_stack_base = USER_STACK_REGION;
531 			if (attributes.stack_size <= 0)
532 				thread->user_stack_size = USER_STACK_SIZE;
533 			else
534 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
535 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
536 
537 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
538 				attributes.name, thread->id);
539 			virtual_address_restrictions virtualRestrictions = {};
540 			virtualRestrictions.address = (void*)thread->user_stack_base;
541 			virtualRestrictions.address_specification = B_BASE_ADDRESS;
542 			physical_address_restrictions physicalRestrictions = {};
543 			thread->user_stack_area = create_area_etc(team->id, stack_name,
544 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
545 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
546 				&virtualRestrictions, &physicalRestrictions,
547 				(void**)&thread->user_stack_base);
548 			if (thread->user_stack_area < B_OK
549 				|| arch_thread_init_tls(thread) < B_OK) {
550 				// great, we have a fully running thread without a (usable)
551 				// stack
552 				dprintf("create_thread: unable to create proper user stack!\n");
553 				status = thread->user_stack_area;
554 				kill_thread(thread->id);
555 			}
556 		} else {
557 			thread->user_stack_base = (addr_t)attributes.stack_address;
558 			thread->user_stack_size = attributes.stack_size;
559 		}
560 
561 		user_debug_update_new_thread_flags(thread->id);
562 
563 		// copy the user entry over to the args field in the thread struct
564 		// the function this will call will immediately switch the thread into
565 		// user space.
566 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
567 			&thread_kthread_entry, &thread_kthread_exit);
568 	}
569 
570 	return status;
571 }
572 
573 
574 static status_t
575 undertaker(void* /*args*/)
576 {
577 	while (true) {
578 		// wait for a thread to bury
579 		InterruptsSpinLocker locker(gThreadSpinlock);
580 
581 		while (sUndertakerEntries.IsEmpty()) {
582 			ConditionVariableEntry conditionEntry;
583 			sUndertakerCondition.Add(&conditionEntry);
584 			locker.Unlock();
585 
586 			conditionEntry.Wait();
587 
588 			locker.Lock();
589 		}
590 
591 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
592 		locker.Unlock();
593 
594 		UndertakerEntry entry = *_entry;
595 			// we need a copy, since the original entry is on the thread's stack
596 
597 		// we've got an entry
598 		struct thread* thread = entry.thread;
599 
600 		// delete the old kernel stack area
601 		delete_area(thread->kernel_stack_area);
602 
603 		// remove this thread from all of the global lists
604 		disable_interrupts();
605 		GRAB_TEAM_LOCK();
606 
607 		remove_thread_from_team(team_get_kernel_team(), thread);
608 
609 		RELEASE_TEAM_LOCK();
610 		enable_interrupts();
611 			// needed for the debugger notification below
612 
613 		// free the thread structure
614 		delete_thread_struct(thread);
615 	}
616 
617 	// never can get here
618 	return B_OK;
619 }
620 
621 
622 static sem_id
623 get_thread_wait_sem(struct thread* thread)
624 {
625 	if (thread->state == B_THREAD_WAITING
626 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
627 		return (sem_id)(addr_t)thread->wait.object;
628 	}
629 	return -1;
630 }
631 
632 
633 /*!	Fills the thread_info structure with information from the specified
634 	thread.
635 	The thread lock must be held when called.
636 */
637 static void
638 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
639 {
640 	info->thread = thread->id;
641 	info->team = thread->team->id;
642 
643 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
644 
645 	if (thread->state == B_THREAD_WAITING) {
646 		info->state = B_THREAD_WAITING;
647 
648 		switch (thread->wait.type) {
649 			case THREAD_BLOCK_TYPE_SNOOZE:
650 				info->state = B_THREAD_ASLEEP;
651 				break;
652 
653 			case THREAD_BLOCK_TYPE_SEMAPHORE:
654 			{
655 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
656 				if (sem == thread->msg.read_sem)
657 					info->state = B_THREAD_RECEIVING;
658 				break;
659 			}
660 
661 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
662 			default:
663 				break;
664 		}
665 	} else
666 		info->state = (thread_state)thread->state;
667 
668 	info->priority = thread->priority;
669 	info->user_time = thread->user_time;
670 	info->kernel_time = thread->kernel_time;
671 	info->stack_base = (void *)thread->user_stack_base;
672 	info->stack_end = (void *)(thread->user_stack_base
673 		+ thread->user_stack_size);
674 	info->sem = get_thread_wait_sem(thread);
675 }
676 
677 
678 static status_t
679 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
680 	int32 flags)
681 {
682 	struct thread *target;
683 	sem_id cachedSem;
684 	cpu_status state;
685 	status_t status;
686 
687 	state = disable_interrupts();
688 	GRAB_THREAD_LOCK();
689 	target = thread_get_thread_struct_locked(id);
690 	if (!target) {
691 		RELEASE_THREAD_LOCK();
692 		restore_interrupts(state);
693 		return B_BAD_THREAD_ID;
694 	}
695 	cachedSem = target->msg.write_sem;
696 	RELEASE_THREAD_LOCK();
697 	restore_interrupts(state);
698 
699 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
700 		return B_NO_MEMORY;
701 
702 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
703 	if (status == B_INTERRUPTED) {
704 		// We got interrupted by a signal
705 		return status;
706 	}
707 	if (status != B_OK) {
708 		// Any other acquisition problems may be due to thread deletion
709 		return B_BAD_THREAD_ID;
710 	}
711 
712 	void* data;
713 	if (bufferSize > 0) {
714 		data = malloc(bufferSize);
715 		if (data == NULL)
716 			return B_NO_MEMORY;
717 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
718 			free(data);
719 			return B_BAD_DATA;
720 		}
721 	} else
722 		data = NULL;
723 
724 	state = disable_interrupts();
725 	GRAB_THREAD_LOCK();
726 
727 	// The target thread could have been deleted at this point
728 	target = thread_get_thread_struct_locked(id);
729 	if (target == NULL) {
730 		RELEASE_THREAD_LOCK();
731 		restore_interrupts(state);
732 		free(data);
733 		return B_BAD_THREAD_ID;
734 	}
735 
736 	// Save message informations
737 	target->msg.sender = thread_get_current_thread()->id;
738 	target->msg.code = code;
739 	target->msg.size = bufferSize;
740 	target->msg.buffer = data;
741 	cachedSem = target->msg.read_sem;
742 
743 	RELEASE_THREAD_LOCK();
744 	restore_interrupts(state);
745 
746 	release_sem(cachedSem);
747 	return B_OK;
748 }
749 
750 
751 static int32
752 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
753 	int32 flags)
754 {
755 	struct thread *thread = thread_get_current_thread();
756 	status_t status;
757 	size_t size;
758 	int32 code;
759 
760 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
761 	if (status < B_OK) {
762 		// Actually, we're not supposed to return error codes
763 		// but since the only reason this can fail is that we
764 		// were killed, it's probably okay to do so (but also
765 		// meaningless).
766 		return status;
767 	}
768 
769 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
770 		size = min_c(bufferSize, thread->msg.size);
771 		status = user_memcpy(buffer, thread->msg.buffer, size);
772 		if (status != B_OK) {
773 			free(thread->msg.buffer);
774 			release_sem(thread->msg.write_sem);
775 			return status;
776 		}
777 	}
778 
779 	*_sender = thread->msg.sender;
780 	code = thread->msg.code;
781 
782 	free(thread->msg.buffer);
783 	release_sem(thread->msg.write_sem);
784 
785 	return code;
786 }
787 
788 
789 static status_t
790 common_getrlimit(int resource, struct rlimit * rlp)
791 {
792 	if (!rlp)
793 		return B_BAD_ADDRESS;
794 
795 	switch (resource) {
796 		case RLIMIT_NOFILE:
797 		case RLIMIT_NOVMON:
798 			return vfs_getrlimit(resource, rlp);
799 
800 		case RLIMIT_CORE:
801 			rlp->rlim_cur = 0;
802 			rlp->rlim_max = 0;
803 			return B_OK;
804 
805 		case RLIMIT_STACK:
806 		{
807 			struct thread *thread = thread_get_current_thread();
808 			if (!thread)
809 				return B_ERROR;
810 			rlp->rlim_cur = thread->user_stack_size;
811 			rlp->rlim_max = thread->user_stack_size;
812 			return B_OK;
813 		}
814 
815 		default:
816 			return EINVAL;
817 	}
818 
819 	return B_OK;
820 }
821 
822 
823 static status_t
824 common_setrlimit(int resource, const struct rlimit * rlp)
825 {
826 	if (!rlp)
827 		return B_BAD_ADDRESS;
828 
829 	switch (resource) {
830 		case RLIMIT_NOFILE:
831 		case RLIMIT_NOVMON:
832 			return vfs_setrlimit(resource, rlp);
833 
834 		case RLIMIT_CORE:
835 			// We don't support core file, so allow settings to 0/0 only.
836 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
837 				return EINVAL;
838 			return B_OK;
839 
840 		default:
841 			return EINVAL;
842 	}
843 
844 	return B_OK;
845 }
846 
847 
848 //	#pragma mark - debugger calls
849 
850 
851 static int
852 make_thread_unreal(int argc, char **argv)
853 {
854 	struct thread *thread;
855 	struct hash_iterator i;
856 	int32 id = -1;
857 
858 	if (argc > 2) {
859 		print_debugger_command_usage(argv[0]);
860 		return 0;
861 	}
862 
863 	if (argc > 1)
864 		id = strtoul(argv[1], NULL, 0);
865 
866 	hash_open(sThreadHash, &i);
867 
868 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
869 		if (id != -1 && thread->id != id)
870 			continue;
871 
872 		if (thread->priority > B_DISPLAY_PRIORITY) {
873 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
874 			kprintf("thread %ld made unreal\n", thread->id);
875 		}
876 	}
877 
878 	hash_close(sThreadHash, &i, false);
879 	return 0;
880 }
881 
882 
883 static int
884 set_thread_prio(int argc, char **argv)
885 {
886 	struct thread *thread;
887 	struct hash_iterator i;
888 	int32 id;
889 	int32 prio;
890 
891 	if (argc > 3 || argc < 2) {
892 		print_debugger_command_usage(argv[0]);
893 		return 0;
894 	}
895 
896 	prio = strtoul(argv[1], NULL, 0);
897 	if (prio > THREAD_MAX_SET_PRIORITY)
898 		prio = THREAD_MAX_SET_PRIORITY;
899 	if (prio < THREAD_MIN_SET_PRIORITY)
900 		prio = THREAD_MIN_SET_PRIORITY;
901 
902 	if (argc > 2)
903 		id = strtoul(argv[2], NULL, 0);
904 	else
905 		id = thread_get_current_thread()->id;
906 
907 	hash_open(sThreadHash, &i);
908 
909 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
910 		if (thread->id != id)
911 			continue;
912 		thread->priority = thread->next_priority = prio;
913 		kprintf("thread %ld set to priority %ld\n", id, prio);
914 		break;
915 	}
916 	if (!thread)
917 		kprintf("thread %ld (%#lx) not found\n", id, id);
918 
919 	hash_close(sThreadHash, &i, false);
920 	return 0;
921 }
922 
923 
924 static int
925 make_thread_suspended(int argc, char **argv)
926 {
927 	struct thread *thread;
928 	struct hash_iterator i;
929 	int32 id;
930 
931 	if (argc > 2) {
932 		print_debugger_command_usage(argv[0]);
933 		return 0;
934 	}
935 
936 	if (argc == 1)
937 		id = thread_get_current_thread()->id;
938 	else
939 		id = strtoul(argv[1], NULL, 0);
940 
941 	hash_open(sThreadHash, &i);
942 
943 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
944 		if (thread->id != id)
945 			continue;
946 
947 		thread->next_state = B_THREAD_SUSPENDED;
948 		kprintf("thread %ld suspended\n", id);
949 		break;
950 	}
951 	if (!thread)
952 		kprintf("thread %ld (%#lx) not found\n", id, id);
953 
954 	hash_close(sThreadHash, &i, false);
955 	return 0;
956 }
957 
958 
959 static int
960 make_thread_resumed(int argc, char **argv)
961 {
962 	struct thread *thread;
963 	struct hash_iterator i;
964 	int32 id;
965 
966 	if (argc != 2) {
967 		print_debugger_command_usage(argv[0]);
968 		return 0;
969 	}
970 
971 	// force user to enter a thread id, as using
972 	// the current thread is usually not intended
973 	id = strtoul(argv[1], NULL, 0);
974 
975 	hash_open(sThreadHash, &i);
976 
977 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
978 		if (thread->id != id)
979 			continue;
980 
981 		if (thread->state == B_THREAD_SUSPENDED) {
982 			scheduler_enqueue_in_run_queue(thread);
983 			kprintf("thread %ld resumed\n", thread->id);
984 		}
985 		break;
986 	}
987 	if (!thread)
988 		kprintf("thread %ld (%#lx) not found\n", id, id);
989 
990 	hash_close(sThreadHash, &i, false);
991 	return 0;
992 }
993 
994 
995 static int
996 drop_into_debugger(int argc, char **argv)
997 {
998 	status_t err;
999 	int32 id;
1000 
1001 	if (argc > 2) {
1002 		print_debugger_command_usage(argv[0]);
1003 		return 0;
1004 	}
1005 
1006 	if (argc == 1)
1007 		id = thread_get_current_thread()->id;
1008 	else
1009 		id = strtoul(argv[1], NULL, 0);
1010 
1011 	err = _user_debug_thread(id);
1012 	if (err)
1013 		kprintf("drop failed\n");
1014 	else
1015 		kprintf("thread %ld dropped into user debugger\n", id);
1016 
1017 	return 0;
1018 }
1019 
1020 
1021 static const char *
1022 state_to_text(struct thread *thread, int32 state)
1023 {
1024 	switch (state) {
1025 		case B_THREAD_READY:
1026 			return "ready";
1027 
1028 		case B_THREAD_RUNNING:
1029 			return "running";
1030 
1031 		case B_THREAD_WAITING:
1032 		{
1033 			if (thread != NULL) {
1034 				switch (thread->wait.type) {
1035 					case THREAD_BLOCK_TYPE_SNOOZE:
1036 						return "zzz";
1037 
1038 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1039 					{
1040 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1041 						if (sem == thread->msg.read_sem)
1042 							return "receive";
1043 						break;
1044 					}
1045 				}
1046 			}
1047 
1048 			return "waiting";
1049 		}
1050 
1051 		case B_THREAD_SUSPENDED:
1052 			return "suspended";
1053 
1054 		case THREAD_STATE_FREE_ON_RESCHED:
1055 			return "death";
1056 
1057 		default:
1058 			return "UNKNOWN";
1059 	}
1060 }
1061 
1062 
1063 static void
1064 print_thread_list_table_head()
1065 {
1066 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1067 		"  team  name\n");
1068 }
1069 
1070 
1071 static void
1072 _dump_thread_info(struct thread *thread, bool shortInfo)
1073 {
1074 	if (shortInfo) {
1075 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1076 			thread->state));
1077 
1078 		// does it block on a semaphore or a condition variable?
1079 		if (thread->state == B_THREAD_WAITING) {
1080 			switch (thread->wait.type) {
1081 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1082 				{
1083 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1084 					if (sem == thread->msg.read_sem)
1085 						kprintf("                    ");
1086 					else
1087 						kprintf("sem  %12ld   ", sem);
1088 					break;
1089 				}
1090 
1091 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1092 					kprintf("cvar   %p   ", thread->wait.object);
1093 					break;
1094 
1095 				case THREAD_BLOCK_TYPE_SNOOZE:
1096 					kprintf("                    ");
1097 					break;
1098 
1099 				case THREAD_BLOCK_TYPE_SIGNAL:
1100 					kprintf("signal              ");
1101 					break;
1102 
1103 				case THREAD_BLOCK_TYPE_MUTEX:
1104 					kprintf("mutex  %p   ", thread->wait.object);
1105 					break;
1106 
1107 				case THREAD_BLOCK_TYPE_RW_LOCK:
1108 					kprintf("rwlock %p   ", thread->wait.object);
1109 					break;
1110 
1111 				case THREAD_BLOCK_TYPE_OTHER:
1112 					kprintf("other               ");
1113 					break;
1114 
1115 				default:
1116 					kprintf("???    %p   ", thread->wait.object);
1117 					break;
1118 			}
1119 		} else
1120 			kprintf("        -           ");
1121 
1122 		// on which CPU does it run?
1123 		if (thread->cpu)
1124 			kprintf("%2d", thread->cpu->cpu_num);
1125 		else
1126 			kprintf(" -");
1127 
1128 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1129 			(void *)thread->kernel_stack_base, thread->team->id,
1130 			thread->name != NULL ? thread->name : "<NULL>");
1131 
1132 		return;
1133 	}
1134 
1135 	// print the long info
1136 
1137 	struct death_entry *death = NULL;
1138 
1139 	kprintf("THREAD: %p\n", thread);
1140 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1141 	kprintf("name:               \"%s\"\n", thread->name);
1142 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1143 		thread->all_next, thread->team_next, thread->queue_next);
1144 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1145 		thread->next_priority, thread->io_priority);
1146 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1147 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1148 	kprintf("cpu:                %p ", thread->cpu);
1149 	if (thread->cpu)
1150 		kprintf("(%d)\n", thread->cpu->cpu_num);
1151 	else
1152 		kprintf("\n");
1153 	kprintf("sig_pending:        %#" B_PRIx32 " (blocked: %#" B_PRIx32
1154 		", temp enabled: %#" B_PRIx32 ")\n", thread->sig_pending,
1155 		thread->sig_block_mask, thread->sig_temp_enabled);
1156 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1157 
1158 	if (thread->state == B_THREAD_WAITING) {
1159 		kprintf("waiting for:        ");
1160 
1161 		switch (thread->wait.type) {
1162 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1163 			{
1164 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1165 				if (sem == thread->msg.read_sem)
1166 					kprintf("data\n");
1167 				else
1168 					kprintf("semaphore %ld\n", sem);
1169 				break;
1170 			}
1171 
1172 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1173 				kprintf("condition variable %p\n", thread->wait.object);
1174 				break;
1175 
1176 			case THREAD_BLOCK_TYPE_SNOOZE:
1177 				kprintf("snooze()\n");
1178 				break;
1179 
1180 			case THREAD_BLOCK_TYPE_SIGNAL:
1181 				kprintf("signal\n");
1182 				break;
1183 
1184 			case THREAD_BLOCK_TYPE_MUTEX:
1185 				kprintf("mutex %p\n", thread->wait.object);
1186 				break;
1187 
1188 			case THREAD_BLOCK_TYPE_RW_LOCK:
1189 				kprintf("rwlock %p\n", thread->wait.object);
1190 				break;
1191 
1192 			case THREAD_BLOCK_TYPE_OTHER:
1193 				kprintf("other (%s)\n", (char*)thread->wait.object);
1194 				break;
1195 
1196 			default:
1197 				kprintf("unknown (%p)\n", thread->wait.object);
1198 				break;
1199 		}
1200 	}
1201 
1202 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1203 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1204 	kprintf("entry:              %p\n", (void *)thread->entry);
1205 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1206 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1207 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1208 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1209 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1210 	kprintf("  exit.waiters:\n");
1211 	while ((death = (struct death_entry*)list_get_next_item(
1212 			&thread->exit.waiters, death)) != NULL) {
1213 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1214 	}
1215 
1216 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1217 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1218 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1219 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1220 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1221 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1222 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1223 		strerror(thread->kernel_errno));
1224 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1225 	kprintf("user_time:          %Ld\n", thread->user_time);
1226 	kprintf("flags:              0x%lx\n", thread->flags);
1227 	kprintf("architecture dependant section:\n");
1228 	arch_thread_dump_info(&thread->arch_info);
1229 }
1230 
1231 
1232 static int
1233 dump_thread_info(int argc, char **argv)
1234 {
1235 	bool shortInfo = false;
1236 	int argi = 1;
1237 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1238 		shortInfo = true;
1239 		print_thread_list_table_head();
1240 		argi++;
1241 	}
1242 
1243 	if (argi == argc) {
1244 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1245 		return 0;
1246 	}
1247 
1248 	for (; argi < argc; argi++) {
1249 		const char *name = argv[argi];
1250 		int32 id = strtoul(name, NULL, 0);
1251 
1252 		if (IS_KERNEL_ADDRESS(id)) {
1253 			// semi-hack
1254 			_dump_thread_info((struct thread *)id, shortInfo);
1255 			continue;
1256 		}
1257 
1258 		// walk through the thread list, trying to match name or id
1259 		bool found = false;
1260 		struct hash_iterator i;
1261 		hash_open(sThreadHash, &i);
1262 		struct thread *thread;
1263 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1264 			if (!strcmp(name, thread->name) || thread->id == id) {
1265 				_dump_thread_info(thread, shortInfo);
1266 				found = true;
1267 				break;
1268 			}
1269 		}
1270 		hash_close(sThreadHash, &i, false);
1271 
1272 		if (!found)
1273 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 
1280 static int
1281 dump_thread_list(int argc, char **argv)
1282 {
1283 	struct thread *thread;
1284 	struct hash_iterator i;
1285 	bool realTimeOnly = false;
1286 	bool calling = false;
1287 	const char *callSymbol = NULL;
1288 	addr_t callStart = 0;
1289 	addr_t callEnd = 0;
1290 	int32 requiredState = 0;
1291 	team_id team = -1;
1292 	sem_id sem = -1;
1293 
1294 	if (!strcmp(argv[0], "realtime"))
1295 		realTimeOnly = true;
1296 	else if (!strcmp(argv[0], "ready"))
1297 		requiredState = B_THREAD_READY;
1298 	else if (!strcmp(argv[0], "running"))
1299 		requiredState = B_THREAD_RUNNING;
1300 	else if (!strcmp(argv[0], "waiting")) {
1301 		requiredState = B_THREAD_WAITING;
1302 
1303 		if (argc > 1) {
1304 			sem = strtoul(argv[1], NULL, 0);
1305 			if (sem == 0)
1306 				kprintf("ignoring invalid semaphore argument.\n");
1307 		}
1308 	} else if (!strcmp(argv[0], "calling")) {
1309 		if (argc < 2) {
1310 			kprintf("Need to give a symbol name or start and end arguments.\n");
1311 			return 0;
1312 		} else if (argc == 3) {
1313 			callStart = parse_expression(argv[1]);
1314 			callEnd = parse_expression(argv[2]);
1315 		} else
1316 			callSymbol = argv[1];
1317 
1318 		calling = true;
1319 	} else if (argc > 1) {
1320 		team = strtoul(argv[1], NULL, 0);
1321 		if (team == 0)
1322 			kprintf("ignoring invalid team argument.\n");
1323 	}
1324 
1325 	print_thread_list_table_head();
1326 
1327 	hash_open(sThreadHash, &i);
1328 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1329 		// filter out threads not matching the search criteria
1330 		if ((requiredState && thread->state != requiredState)
1331 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1332 					callStart, callEnd))
1333 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1334 			|| (team > 0 && thread->team->id != team)
1335 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1336 			continue;
1337 
1338 		_dump_thread_info(thread, true);
1339 	}
1340 	hash_close(sThreadHash, &i, false);
1341 	return 0;
1342 }
1343 
1344 
1345 //	#pragma mark - private kernel API
1346 
1347 
1348 void
1349 thread_exit(void)
1350 {
1351 	cpu_status state;
1352 	struct thread *thread = thread_get_current_thread();
1353 	struct team *team = thread->team;
1354 	thread_id parentID = -1;
1355 	status_t status;
1356 	struct thread_debug_info debugInfo;
1357 	team_id teamID = team->id;
1358 
1359 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1360 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1361 			? "due to signal" : "normally", thread->exit.status));
1362 
1363 	if (!are_interrupts_enabled())
1364 		panic("thread_exit() called with interrupts disabled!\n");
1365 
1366 	// boost our priority to get this over with
1367 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1368 
1369 	// Cancel previously installed alarm timer, if any
1370 	cancel_timer(&thread->alarm);
1371 
1372 	// remember the user stack area -- we will delete it below
1373 	area_id userStackArea = -1;
1374 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1375 		userStackArea = thread->user_stack_area;
1376 		thread->user_stack_area = -1;
1377 	}
1378 
1379 	struct job_control_entry *death = NULL;
1380 	struct death_entry* threadDeathEntry = NULL;
1381 	bool deleteTeam = false;
1382 	port_id debuggerPort = -1;
1383 
1384 	if (team != team_get_kernel_team()) {
1385 		user_debug_thread_exiting(thread);
1386 
1387 		if (team->main_thread == thread) {
1388 			// The main thread is exiting. Shut down the whole team.
1389 			deleteTeam = true;
1390 		} else {
1391 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1392 			team_free_user_thread(thread);
1393 		}
1394 
1395 		// remove this thread from the current team and add it to the kernel
1396 		// put the thread into the kernel team until it dies
1397 		state = disable_interrupts();
1398 		GRAB_TEAM_LOCK();
1399 
1400 		if (deleteTeam)
1401 			debuggerPort = team_shutdown_team(team, state);
1402 
1403 		GRAB_THREAD_LOCK();
1404 			// removing the thread and putting its death entry to the parent
1405 			// team needs to be an atomic operation
1406 
1407 		// remember how long this thread lasted
1408 		team->dead_threads_kernel_time += thread->kernel_time;
1409 		team->dead_threads_user_time += thread->user_time;
1410 
1411 		remove_thread_from_team(team, thread);
1412 		insert_thread_into_team(team_get_kernel_team(), thread);
1413 
1414 		if (team->death_entry != NULL) {
1415 			if (--team->death_entry->remaining_threads == 0)
1416 				team->death_entry->condition.NotifyOne(true, B_OK);
1417 		}
1418 
1419 		if (deleteTeam) {
1420 			struct team *parent = team->parent;
1421 
1422 			// remember who our parent was so we can send a signal
1423 			parentID = parent->id;
1424 
1425 			// Set the team job control state to "dead" and detach the job
1426 			// control entry from our team struct.
1427 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1428 			death = team->job_control_entry;
1429 			team->job_control_entry = NULL;
1430 
1431 			if (death != NULL) {
1432 				death->InitDeadState();
1433 
1434 				// team_set_job_control_state() already moved our entry
1435 				// into the parent's list. We just check the soft limit of
1436 				// death entries.
1437 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1438 					death = parent->dead_children->entries.RemoveHead();
1439 					parent->dead_children->count--;
1440 				} else
1441 					death = NULL;
1442 
1443 				RELEASE_THREAD_LOCK();
1444 			} else
1445 				RELEASE_THREAD_LOCK();
1446 
1447 			team_remove_team(team);
1448 
1449 			send_signal_etc(parentID, SIGCHLD,
1450 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1451 		} else {
1452 			// The thread is not the main thread. We store a thread death
1453 			// entry for it, unless someone is already waiting it.
1454 			if (threadDeathEntry != NULL
1455 				&& list_is_empty(&thread->exit.waiters)) {
1456 				threadDeathEntry->thread = thread->id;
1457 				threadDeathEntry->status = thread->exit.status;
1458 				threadDeathEntry->reason = thread->exit.reason;
1459 				threadDeathEntry->signal = thread->exit.signal;
1460 
1461 				// add entry -- remove and old one, if we hit the limit
1462 				list_add_item(&team->dead_threads, threadDeathEntry);
1463 				team->dead_threads_count++;
1464 				threadDeathEntry = NULL;
1465 
1466 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1467 					threadDeathEntry = (death_entry*)list_remove_head_item(
1468 						&team->dead_threads);
1469 					team->dead_threads_count--;
1470 				}
1471 			}
1472 
1473 			RELEASE_THREAD_LOCK();
1474 		}
1475 
1476 		RELEASE_TEAM_LOCK();
1477 
1478 		// swap address spaces, to make sure we're running on the kernel's pgdir
1479 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1480 		restore_interrupts(state);
1481 
1482 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1483 	}
1484 
1485 	free(threadDeathEntry);
1486 
1487 	// delete the team if we're its main thread
1488 	if (deleteTeam) {
1489 		team_delete_team(team, debuggerPort);
1490 
1491 		// we need to delete any death entry that made it to here
1492 		delete death;
1493 	}
1494 
1495 	state = disable_interrupts();
1496 	GRAB_THREAD_LOCK();
1497 
1498 	// remove thread from hash, so it's no longer accessible
1499 	hash_remove(sThreadHash, thread);
1500 	sUsedThreads--;
1501 
1502 	// Stop debugging for this thread
1503 	debugInfo = thread->debug_info;
1504 	clear_thread_debug_info(&thread->debug_info, true);
1505 
1506 	// Remove the select infos. We notify them a little later.
1507 	select_info* selectInfos = thread->select_infos;
1508 	thread->select_infos = NULL;
1509 
1510 	RELEASE_THREAD_LOCK();
1511 	restore_interrupts(state);
1512 
1513 	destroy_thread_debug_info(&debugInfo);
1514 
1515 	// notify select infos
1516 	select_info* info = selectInfos;
1517 	while (info != NULL) {
1518 		select_sync* sync = info->sync;
1519 
1520 		notify_select_events(info, B_EVENT_INVALID);
1521 		info = info->next;
1522 		put_select_sync(sync);
1523 	}
1524 
1525 	// notify listeners
1526 	sNotificationService.Notify(THREAD_REMOVED, thread);
1527 
1528 	// shutdown the thread messaging
1529 
1530 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1531 	if (status == B_WOULD_BLOCK) {
1532 		// there is data waiting for us, so let us eat it
1533 		thread_id sender;
1534 
1535 		delete_sem(thread->msg.write_sem);
1536 			// first, let's remove all possibly waiting writers
1537 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1538 	} else {
1539 		// we probably own the semaphore here, and we're the last to do so
1540 		delete_sem(thread->msg.write_sem);
1541 	}
1542 	// now we can safely remove the msg.read_sem
1543 	delete_sem(thread->msg.read_sem);
1544 
1545 	// fill all death entries and delete the sem that others will use to wait on us
1546 	{
1547 		sem_id cachedExitSem = thread->exit.sem;
1548 		cpu_status state;
1549 
1550 		state = disable_interrupts();
1551 		GRAB_THREAD_LOCK();
1552 
1553 		// make sure no one will grab this semaphore again
1554 		thread->exit.sem = -1;
1555 
1556 		// fill all death entries
1557 		death_entry* entry = NULL;
1558 		while ((entry = (struct death_entry*)list_get_next_item(
1559 				&thread->exit.waiters, entry)) != NULL) {
1560 			entry->status = thread->exit.status;
1561 			entry->reason = thread->exit.reason;
1562 			entry->signal = thread->exit.signal;
1563 		}
1564 
1565 		RELEASE_THREAD_LOCK();
1566 		restore_interrupts(state);
1567 
1568 		delete_sem(cachedExitSem);
1569 	}
1570 
1571 	// delete the user stack, if this was a user thread
1572 	if (!deleteTeam && userStackArea >= 0) {
1573 		// We postponed deleting the user stack until now, since this way all
1574 		// notifications for the thread's death are out already and all other
1575 		// threads waiting for this thread's death and some object on its stack
1576 		// will wake up before we (try to) delete the stack area. Of most
1577 		// relevance is probably the case where this is the main thread and
1578 		// other threads use objects on its stack -- so we want them terminated
1579 		// first.
1580 		// When the team is deleted, all areas are deleted anyway, so we don't
1581 		// need to do that explicitly in that case.
1582 		vm_delete_area(teamID, userStackArea, true);
1583 	}
1584 
1585 	// notify the debugger
1586 	if (teamID != team_get_kernel_team_id())
1587 		user_debug_thread_deleted(teamID, thread->id);
1588 
1589 	// enqueue in the undertaker list and reschedule for the last time
1590 	UndertakerEntry undertakerEntry(thread, teamID);
1591 
1592 	disable_interrupts();
1593 	GRAB_THREAD_LOCK();
1594 
1595 	sUndertakerEntries.Add(&undertakerEntry);
1596 	sUndertakerCondition.NotifyOne(true);
1597 
1598 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1599 	scheduler_reschedule();
1600 
1601 	panic("never can get here\n");
1602 }
1603 
1604 
1605 struct thread *
1606 thread_get_thread_struct(thread_id id)
1607 {
1608 	struct thread *thread;
1609 	cpu_status state;
1610 
1611 	state = disable_interrupts();
1612 	GRAB_THREAD_LOCK();
1613 
1614 	thread = thread_get_thread_struct_locked(id);
1615 
1616 	RELEASE_THREAD_LOCK();
1617 	restore_interrupts(state);
1618 
1619 	return thread;
1620 }
1621 
1622 
1623 struct thread *
1624 thread_get_thread_struct_locked(thread_id id)
1625 {
1626 	struct thread_key key;
1627 
1628 	key.id = id;
1629 
1630 	return (struct thread*)hash_lookup(sThreadHash, &key);
1631 }
1632 
1633 
1634 /*!	Called in the interrupt handler code when a thread enters
1635 	the kernel for any reason.
1636 	Only tracks time for now.
1637 	Interrupts are disabled.
1638 */
1639 void
1640 thread_at_kernel_entry(bigtime_t now)
1641 {
1642 	struct thread *thread = thread_get_current_thread();
1643 
1644 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1645 
1646 	// track user time
1647 	thread->user_time += now - thread->last_time;
1648 	thread->last_time = now;
1649 
1650 	thread->in_kernel = true;
1651 }
1652 
1653 
1654 /*!	Called whenever a thread exits kernel space to user space.
1655 	Tracks time, handles signals, ...
1656 	Interrupts must be enabled. When the function returns, interrupts will be
1657 	disabled.
1658 */
1659 void
1660 thread_at_kernel_exit(void)
1661 {
1662 	struct thread *thread = thread_get_current_thread();
1663 
1664 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1665 
1666 	while (handle_signals(thread)) {
1667 		InterruptsSpinLocker _(gThreadSpinlock);
1668 		scheduler_reschedule();
1669 	}
1670 
1671 	disable_interrupts();
1672 
1673 	thread->in_kernel = false;
1674 
1675 	// track kernel time
1676 	bigtime_t now = system_time();
1677 	thread->kernel_time += now - thread->last_time;
1678 	thread->last_time = now;
1679 }
1680 
1681 
1682 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1683 	and no debugging shall be done.
1684 	Interrupts must be disabled.
1685 */
1686 void
1687 thread_at_kernel_exit_no_signals(void)
1688 {
1689 	struct thread *thread = thread_get_current_thread();
1690 
1691 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1692 
1693 	thread->in_kernel = false;
1694 
1695 	// track kernel time
1696 	bigtime_t now = system_time();
1697 	thread->kernel_time += now - thread->last_time;
1698 	thread->last_time = now;
1699 }
1700 
1701 
1702 void
1703 thread_reset_for_exec(void)
1704 {
1705 	struct thread *thread = thread_get_current_thread();
1706 
1707 	reset_signals(thread);
1708 
1709 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
1710 }
1711 
1712 
1713 /*! Insert a thread to the tail of a queue */
1714 void
1715 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1716 {
1717 	thread->queue_next = NULL;
1718 	if (queue->head == NULL) {
1719 		queue->head = thread;
1720 		queue->tail = thread;
1721 	} else {
1722 		queue->tail->queue_next = thread;
1723 		queue->tail = thread;
1724 	}
1725 }
1726 
1727 
1728 struct thread *
1729 thread_lookat_queue(struct thread_queue *queue)
1730 {
1731 	return queue->head;
1732 }
1733 
1734 
1735 struct thread *
1736 thread_dequeue(struct thread_queue *queue)
1737 {
1738 	struct thread *thread = queue->head;
1739 
1740 	if (thread != NULL) {
1741 		queue->head = thread->queue_next;
1742 		if (queue->tail == thread)
1743 			queue->tail = NULL;
1744 	}
1745 	return thread;
1746 }
1747 
1748 
1749 struct thread *
1750 thread_dequeue_id(struct thread_queue *q, thread_id id)
1751 {
1752 	struct thread *thread;
1753 	struct thread *last = NULL;
1754 
1755 	thread = q->head;
1756 	while (thread != NULL) {
1757 		if (thread->id == id) {
1758 			if (last == NULL)
1759 				q->head = thread->queue_next;
1760 			else
1761 				last->queue_next = thread->queue_next;
1762 
1763 			if (q->tail == thread)
1764 				q->tail = last;
1765 			break;
1766 		}
1767 		last = thread;
1768 		thread = thread->queue_next;
1769 	}
1770 	return thread;
1771 }
1772 
1773 
1774 struct thread*
1775 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1776 {
1777 	struct hash_iterator iterator;
1778 	hash_open(sThreadHash, &iterator);
1779 
1780 	struct thread* thread;
1781 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1782 			!= NULL) {
1783 		if (callback(thread, cookie))
1784 			break;
1785 	}
1786 
1787 	hash_close(sThreadHash, &iterator, false);
1788 
1789 	return thread;
1790 }
1791 
1792 
1793 thread_id
1794 allocate_thread_id(void)
1795 {
1796 	return atomic_add(&sNextThreadID, 1);
1797 }
1798 
1799 
1800 thread_id
1801 peek_next_thread_id(void)
1802 {
1803 	return atomic_get(&sNextThreadID);
1804 }
1805 
1806 
1807 /*!	Yield the CPU to other threads.
1808 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1809 	If \c false, it will continue to run, if there's no other thread in ready
1810 	state, and if it has a higher priority than the other ready threads, it
1811 	still has a good chance to continue.
1812 */
1813 void
1814 thread_yield(bool force)
1815 {
1816 	if (force) {
1817 		// snooze for roughly 3 thread quantums
1818 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1819 #if 0
1820 		cpu_status state;
1821 
1822 		struct thread *thread = thread_get_current_thread();
1823 		if (thread == NULL)
1824 			return;
1825 
1826 		state = disable_interrupts();
1827 		GRAB_THREAD_LOCK();
1828 
1829 		// mark the thread as yielded, so it will not be scheduled next
1830 		//thread->was_yielded = true;
1831 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1832 		scheduler_reschedule();
1833 
1834 		RELEASE_THREAD_LOCK();
1835 		restore_interrupts(state);
1836 #endif
1837 	} else {
1838 		struct thread *thread = thread_get_current_thread();
1839 		if (thread == NULL)
1840 			return;
1841 
1842 		// Don't force the thread off the CPU, just reschedule.
1843 		InterruptsSpinLocker _(gThreadSpinlock);
1844 		scheduler_reschedule();
1845 	}
1846 }
1847 
1848 
1849 /*!	Kernel private thread creation function.
1850 
1851 	\param threadID The ID to be assigned to the new thread. If
1852 		  \code < 0 \endcode a fresh one is allocated.
1853 */
1854 thread_id
1855 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1856 	void *arg, team_id team, thread_id threadID)
1857 {
1858 	thread_creation_attributes attributes;
1859 	attributes.entry = (thread_entry_func)function;
1860 	attributes.name = name;
1861 	attributes.priority = priority;
1862 	attributes.args1 = arg;
1863 	attributes.args2 = NULL;
1864 	attributes.stack_address = NULL;
1865 	attributes.stack_size = 0;
1866 	attributes.team = team;
1867 	attributes.thread = threadID;
1868 
1869 	return create_thread(attributes, true);
1870 }
1871 
1872 
1873 status_t
1874 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1875 	status_t *_returnCode)
1876 {
1877 	sem_id exitSem = B_BAD_THREAD_ID;
1878 	struct death_entry death;
1879 	job_control_entry* freeDeath = NULL;
1880 	struct thread *thread;
1881 	cpu_status state;
1882 	status_t status = B_OK;
1883 
1884 	if (id < B_OK)
1885 		return B_BAD_THREAD_ID;
1886 
1887 	// we need to resume the thread we're waiting for first
1888 
1889 	state = disable_interrupts();
1890 	GRAB_THREAD_LOCK();
1891 
1892 	thread = thread_get_thread_struct_locked(id);
1893 	if (thread != NULL) {
1894 		// remember the semaphore we have to wait on and place our death entry
1895 		exitSem = thread->exit.sem;
1896 		list_add_link_to_head(&thread->exit.waiters, &death);
1897 	}
1898 
1899 	death_entry* threadDeathEntry = NULL;
1900 
1901 	RELEASE_THREAD_LOCK();
1902 
1903 	if (thread == NULL) {
1904 		// we couldn't find this thread - maybe it's already gone, and we'll
1905 		// find its death entry in our team
1906 		GRAB_TEAM_LOCK();
1907 
1908 		struct team* team = thread_get_current_thread()->team;
1909 
1910 		// check the child death entries first (i.e. main threads of child
1911 		// teams)
1912 		bool deleteEntry;
1913 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1914 		if (freeDeath != NULL) {
1915 			death.status = freeDeath->status;
1916 			if (!deleteEntry)
1917 				freeDeath = NULL;
1918 		} else {
1919 			// check the thread death entries of the team (non-main threads)
1920 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1921 					&team->dead_threads, threadDeathEntry)) != NULL) {
1922 				if (threadDeathEntry->thread == id) {
1923 					list_remove_item(&team->dead_threads, threadDeathEntry);
1924 					team->dead_threads_count--;
1925 					death.status = threadDeathEntry->status;
1926 					break;
1927 				}
1928 			}
1929 
1930 			if (threadDeathEntry == NULL)
1931 				status = B_BAD_THREAD_ID;
1932 		}
1933 
1934 		RELEASE_TEAM_LOCK();
1935 	}
1936 
1937 	restore_interrupts(state);
1938 
1939 	if (thread == NULL && status == B_OK) {
1940 		// we found the thread's death entry in our team
1941 		if (_returnCode)
1942 			*_returnCode = death.status;
1943 
1944 		delete freeDeath;
1945 		free(threadDeathEntry);
1946 		return B_OK;
1947 	}
1948 
1949 	// we need to wait for the death of the thread
1950 
1951 	if (exitSem < B_OK)
1952 		return B_BAD_THREAD_ID;
1953 
1954 	resume_thread(id);
1955 		// make sure we don't wait forever on a suspended thread
1956 
1957 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1958 
1959 	if (status == B_OK) {
1960 		// this should never happen as the thread deletes the semaphore on exit
1961 		panic("could acquire exit_sem for thread %ld\n", id);
1962 	} else if (status == B_BAD_SEM_ID) {
1963 		// this is the way the thread normally exits
1964 		status = B_OK;
1965 
1966 		if (_returnCode)
1967 			*_returnCode = death.status;
1968 	} else {
1969 		// We were probably interrupted; we need to remove our death entry now.
1970 		state = disable_interrupts();
1971 		GRAB_THREAD_LOCK();
1972 
1973 		thread = thread_get_thread_struct_locked(id);
1974 		if (thread != NULL)
1975 			list_remove_link(&death);
1976 
1977 		RELEASE_THREAD_LOCK();
1978 		restore_interrupts(state);
1979 
1980 		// If the thread is already gone, we need to wait for its exit semaphore
1981 		// to make sure our death entry stays valid - it won't take long
1982 		if (thread == NULL)
1983 			acquire_sem(exitSem);
1984 	}
1985 
1986 	return status;
1987 }
1988 
1989 
1990 status_t
1991 select_thread(int32 id, struct select_info* info, bool kernel)
1992 {
1993 	InterruptsSpinLocker locker(gThreadSpinlock);
1994 
1995 	// get thread
1996 	struct thread* thread = thread_get_thread_struct_locked(id);
1997 	if (thread == NULL)
1998 		return B_BAD_THREAD_ID;
1999 
2000 	// We support only B_EVENT_INVALID at the moment.
2001 	info->selected_events &= B_EVENT_INVALID;
2002 
2003 	// add info to list
2004 	if (info->selected_events != 0) {
2005 		info->next = thread->select_infos;
2006 		thread->select_infos = info;
2007 
2008 		// we need a sync reference
2009 		atomic_add(&info->sync->ref_count, 1);
2010 	}
2011 
2012 	return B_OK;
2013 }
2014 
2015 
2016 status_t
2017 deselect_thread(int32 id, struct select_info* info, bool kernel)
2018 {
2019 	InterruptsSpinLocker locker(gThreadSpinlock);
2020 
2021 	// get thread
2022 	struct thread* thread = thread_get_thread_struct_locked(id);
2023 	if (thread == NULL)
2024 		return B_BAD_THREAD_ID;
2025 
2026 	// remove info from list
2027 	select_info** infoLocation = &thread->select_infos;
2028 	while (*infoLocation != NULL && *infoLocation != info)
2029 		infoLocation = &(*infoLocation)->next;
2030 
2031 	if (*infoLocation != info)
2032 		return B_OK;
2033 
2034 	*infoLocation = info->next;
2035 
2036 	locker.Unlock();
2037 
2038 	// surrender sync reference
2039 	put_select_sync(info->sync);
2040 
2041 	return B_OK;
2042 }
2043 
2044 
2045 int32
2046 thread_max_threads(void)
2047 {
2048 	return sMaxThreads;
2049 }
2050 
2051 
2052 int32
2053 thread_used_threads(void)
2054 {
2055 	return sUsedThreads;
2056 }
2057 
2058 
2059 const char*
2060 thread_state_to_text(struct thread* thread, int32 state)
2061 {
2062 	return state_to_text(thread, state);
2063 }
2064 
2065 
2066 int32
2067 thread_get_io_priority(thread_id id)
2068 {
2069 	// take a shortcut, if it is the current thread
2070 	struct thread* thread = thread_get_current_thread();
2071 	int32 priority;
2072 	if (id == thread->id) {
2073 		int32 priority = thread->io_priority;
2074 		return priority < 0 ? thread->priority : priority;
2075 	}
2076 
2077 	// not the current thread -- get it
2078 	InterruptsSpinLocker locker(gThreadSpinlock);
2079 
2080 	thread = thread_get_thread_struct_locked(id);
2081 	if (thread == NULL)
2082 		return B_BAD_THREAD_ID;
2083 
2084 	priority = thread->io_priority;
2085 	return priority < 0 ? thread->priority : priority;
2086 }
2087 
2088 
2089 void
2090 thread_set_io_priority(int32 priority)
2091 {
2092 	struct thread* thread = thread_get_current_thread();
2093 	thread->io_priority = priority;
2094 }
2095 
2096 
2097 status_t
2098 thread_init(kernel_args *args)
2099 {
2100 	uint32 i;
2101 
2102 	TRACE(("thread_init: entry\n"));
2103 
2104 	// create the thread hash table
2105 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2106 		&thread_struct_compare, &thread_struct_hash);
2107 
2108 	// create the thread structure object cache
2109 	sThreadCache = create_object_cache("threads", sizeof(thread), 16, NULL,
2110 		NULL, NULL);
2111 		// Note: The x86 port requires 16 byte alignment of thread structures.
2112 	if (sThreadCache == NULL)
2113 		panic("thread_init(): failed to allocate thread object cache!");
2114 
2115 	if (arch_thread_init(args) < B_OK)
2116 		panic("arch_thread_init() failed!\n");
2117 
2118 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2119 	sNextThreadID = B_SYSTEM_TEAM + 1;
2120 
2121 	// create an idle thread for each cpu
2122 
2123 	for (i = 0; i < args->num_cpus; i++) {
2124 		struct thread *thread;
2125 		area_info info;
2126 		char name[64];
2127 
2128 		sprintf(name, "idle thread %lu", i + 1);
2129 		thread = create_thread_struct(&sIdleThreads[i], name,
2130 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2131 		if (thread == NULL) {
2132 			panic("error creating idle thread struct\n");
2133 			return B_NO_MEMORY;
2134 		}
2135 
2136 		gCPU[i].running_thread = thread;
2137 
2138 		thread->team = team_get_kernel_team();
2139 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2140 		thread->state = B_THREAD_RUNNING;
2141 		thread->next_state = B_THREAD_READY;
2142 		sprintf(name, "idle thread %lu kstack", i + 1);
2143 		thread->kernel_stack_area = find_area(name);
2144 		thread->entry = NULL;
2145 
2146 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2147 			panic("error finding idle kstack area\n");
2148 
2149 		thread->kernel_stack_base = (addr_t)info.address;
2150 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2151 
2152 		hash_insert(sThreadHash, thread);
2153 		insert_thread_into_team(thread->team, thread);
2154 	}
2155 	sUsedThreads = args->num_cpus;
2156 
2157 	// init the notification service
2158 	new(&sNotificationService) ThreadNotificationService();
2159 
2160 	// start the undertaker thread
2161 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2162 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2163 
2164 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2165 		B_DISPLAY_PRIORITY, NULL);
2166 	if (undertakerThread < 0)
2167 		panic("Failed to create undertaker thread!");
2168 	resume_thread(undertakerThread);
2169 
2170 	// set up some debugger commands
2171 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2172 		"[ <team> ]\n"
2173 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2174 		"all threads of the specified team.\n"
2175 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2176 	add_debugger_command_etc("ready", &dump_thread_list,
2177 		"List all ready threads",
2178 		"\n"
2179 		"Prints a list of all threads in ready state.\n", 0);
2180 	add_debugger_command_etc("running", &dump_thread_list,
2181 		"List all running threads",
2182 		"\n"
2183 		"Prints a list of all threads in running state.\n", 0);
2184 	add_debugger_command_etc("waiting", &dump_thread_list,
2185 		"List all waiting threads (optionally for a specific semaphore)",
2186 		"[ <sem> ]\n"
2187 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2188 		"specified, only the threads waiting on that semaphore are listed.\n"
2189 		"  <sem>  - ID of the semaphore.\n", 0);
2190 	add_debugger_command_etc("realtime", &dump_thread_list,
2191 		"List all realtime threads",
2192 		"\n"
2193 		"Prints a list of all threads with realtime priority.\n", 0);
2194 	add_debugger_command_etc("thread", &dump_thread_info,
2195 		"Dump info about a particular thread",
2196 		"[ -s ] ( <id> | <address> | <name> )*\n"
2197 		"Prints information about the specified thread. If no argument is\n"
2198 		"given the current thread is selected.\n"
2199 		"  -s         - Print info in compact table form (like \"threads\").\n"
2200 		"  <id>       - The ID of the thread.\n"
2201 		"  <address>  - The address of the thread structure.\n"
2202 		"  <name>     - The thread's name.\n", 0);
2203 	add_debugger_command_etc("calling", &dump_thread_list,
2204 		"Show all threads that have a specific address in their call chain",
2205 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2206 	add_debugger_command_etc("unreal", &make_thread_unreal,
2207 		"Set realtime priority threads to normal priority",
2208 		"[ <id> ]\n"
2209 		"Sets the priority of all realtime threads or, if given, the one\n"
2210 		"with the specified ID to \"normal\" priority.\n"
2211 		"  <id>  - The ID of the thread.\n", 0);
2212 	add_debugger_command_etc("suspend", &make_thread_suspended,
2213 		"Suspend a thread",
2214 		"[ <id> ]\n"
2215 		"Suspends the thread with the given ID. If no ID argument is given\n"
2216 		"the current thread is selected.\n"
2217 		"  <id>  - The ID of the thread.\n", 0);
2218 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2219 		"<id>\n"
2220 		"Resumes the specified thread, if it is currently suspended.\n"
2221 		"  <id>  - The ID of the thread.\n", 0);
2222 	add_debugger_command_etc("drop", &drop_into_debugger,
2223 		"Drop a thread into the userland debugger",
2224 		"<id>\n"
2225 		"Drops the specified (userland) thread into the userland debugger\n"
2226 		"after leaving the kernel debugger.\n"
2227 		"  <id>  - The ID of the thread.\n", 0);
2228 	add_debugger_command_etc("priority", &set_thread_prio,
2229 		"Set a thread's priority",
2230 		"<priority> [ <id> ]\n"
2231 		"Sets the priority of the thread with the specified ID to the given\n"
2232 		"priority. If no thread ID is given, the current thread is selected.\n"
2233 		"  <priority>  - The thread's new priority (0 - 120)\n"
2234 		"  <id>        - The ID of the thread.\n", 0);
2235 
2236 	return B_OK;
2237 }
2238 
2239 
2240 status_t
2241 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2242 {
2243 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2244 	// so that get_current_cpu and friends will work, which is crucial for
2245 	// a lot of low level routines
2246 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2247 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2248 	return B_OK;
2249 }
2250 
2251 
2252 //	#pragma mark - thread blocking API
2253 
2254 
2255 static status_t
2256 thread_block_timeout(timer* timer)
2257 {
2258 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2259 	// we're holding the thread lock already. This makes things comfortably
2260 	// easy.
2261 
2262 	struct thread* thread = (struct thread*)timer->user_data;
2263 	thread_unblock_locked(thread, B_TIMED_OUT);
2264 
2265 	return B_HANDLED_INTERRUPT;
2266 }
2267 
2268 
2269 status_t
2270 thread_block()
2271 {
2272 	InterruptsSpinLocker _(gThreadSpinlock);
2273 	return thread_block_locked(thread_get_current_thread());
2274 }
2275 
2276 
2277 void
2278 thread_unblock(status_t threadID, status_t status)
2279 {
2280 	InterruptsSpinLocker _(gThreadSpinlock);
2281 
2282 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2283 	if (thread != NULL)
2284 		thread_unblock_locked(thread, status);
2285 }
2286 
2287 
2288 status_t
2289 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2290 {
2291 	InterruptsSpinLocker _(gThreadSpinlock);
2292 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2293 }
2294 
2295 
2296 status_t
2297 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2298 {
2299 	struct thread* thread = thread_get_current_thread();
2300 
2301 	if (thread->wait.status != 1)
2302 		return thread->wait.status;
2303 
2304 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2305 		&& timeout != B_INFINITE_TIMEOUT;
2306 
2307 	if (useTimer) {
2308 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2309 		// avoids nasty race conditions and deadlock problems that could
2310 		// otherwise occur between our cancel_timer() and a concurrently
2311 		// executing thread_block_timeout().
2312 		uint32 timerFlags;
2313 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2314 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2315 		} else {
2316 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2317 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2318 				timeout -= rtc_boot_time();
2319 		}
2320 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2321 
2322 		// install the timer
2323 		thread->wait.unblock_timer.user_data = thread;
2324 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2325 			timerFlags);
2326 	}
2327 
2328 	// block
2329 	status_t error = thread_block_locked(thread);
2330 
2331 	// cancel timer, if it didn't fire
2332 	if (error != B_TIMED_OUT && useTimer)
2333 		cancel_timer(&thread->wait.unblock_timer);
2334 
2335 	return error;
2336 }
2337 
2338 
2339 /*!	Thread spinlock must be held.
2340 */
2341 static status_t
2342 user_unblock_thread(thread_id threadID, status_t status)
2343 {
2344 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2345 	if (thread == NULL)
2346 		return B_BAD_THREAD_ID;
2347 	if (thread->user_thread == NULL)
2348 		return B_NOT_ALLOWED;
2349 
2350 	if (thread->user_thread->wait_status > 0) {
2351 		thread->user_thread->wait_status = status;
2352 		thread_unblock_locked(thread, status);
2353 	}
2354 
2355 	return B_OK;
2356 }
2357 
2358 
2359 //	#pragma mark - public kernel API
2360 
2361 
2362 void
2363 exit_thread(status_t returnValue)
2364 {
2365 	struct thread *thread = thread_get_current_thread();
2366 
2367 	thread->exit.status = returnValue;
2368 	thread->exit.reason = THREAD_RETURN_EXIT;
2369 
2370 	// if called from a kernel thread, we don't deliver the signal,
2371 	// we just exit directly to keep the user space behaviour of
2372 	// this function
2373 	if (thread->team != team_get_kernel_team())
2374 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2375 	else
2376 		thread_exit();
2377 }
2378 
2379 
2380 status_t
2381 kill_thread(thread_id id)
2382 {
2383 	if (id <= 0)
2384 		return B_BAD_VALUE;
2385 
2386 	return send_signal(id, SIGKILLTHR);
2387 }
2388 
2389 
2390 status_t
2391 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2392 {
2393 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2394 }
2395 
2396 
2397 int32
2398 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2399 {
2400 	return receive_data_etc(sender, buffer, bufferSize, 0);
2401 }
2402 
2403 
2404 bool
2405 has_data(thread_id thread)
2406 {
2407 	int32 count;
2408 
2409 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2410 			&count) != B_OK)
2411 		return false;
2412 
2413 	return count == 0 ? false : true;
2414 }
2415 
2416 
2417 status_t
2418 _get_thread_info(thread_id id, thread_info *info, size_t size)
2419 {
2420 	status_t status = B_OK;
2421 	struct thread *thread;
2422 	cpu_status state;
2423 
2424 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2425 		return B_BAD_VALUE;
2426 
2427 	state = disable_interrupts();
2428 	GRAB_THREAD_LOCK();
2429 
2430 	thread = thread_get_thread_struct_locked(id);
2431 	if (thread == NULL) {
2432 		status = B_BAD_VALUE;
2433 		goto err;
2434 	}
2435 
2436 	fill_thread_info(thread, info, size);
2437 
2438 err:
2439 	RELEASE_THREAD_LOCK();
2440 	restore_interrupts(state);
2441 
2442 	return status;
2443 }
2444 
2445 
2446 status_t
2447 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
2448 	size_t size)
2449 {
2450 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
2451 		return B_BAD_VALUE;
2452 
2453 	int32 lastID = *_cookie;
2454 
2455 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
2456 
2457 	struct team* team;
2458 	if (teamID == B_CURRENT_TEAM)
2459 		team = thread_get_current_thread()->team;
2460 	else
2461 		team = team_get_team_struct_locked(teamID);
2462 
2463 	if (team == NULL)
2464 		return B_BAD_VALUE;
2465 
2466 	struct thread* thread = NULL;
2467 
2468 	if (lastID == 0) {
2469 		// We start with the main thread
2470 		thread = team->main_thread;
2471 	} else {
2472 		// Find the one thread with an ID higher than ours
2473 		// (as long as the IDs don't overlap they are always sorted from
2474 		// highest to lowest).
2475 		for (struct thread* next = team->thread_list; next != NULL;
2476 				next = next->team_next) {
2477 			if (next->id <= lastID)
2478 				break;
2479 
2480 			thread = next;
2481 		}
2482 	}
2483 
2484 	if (thread == NULL)
2485 		return B_BAD_VALUE;
2486 
2487 	lastID = thread->id;
2488 	*_cookie = lastID;
2489 
2490 	SpinLocker threadLocker(gThreadSpinlock);
2491 	fill_thread_info(thread, info, size);
2492 
2493 	return B_OK;
2494 }
2495 
2496 
2497 thread_id
2498 find_thread(const char *name)
2499 {
2500 	struct hash_iterator iterator;
2501 	struct thread *thread;
2502 	cpu_status state;
2503 
2504 	if (name == NULL)
2505 		return thread_get_current_thread_id();
2506 
2507 	state = disable_interrupts();
2508 	GRAB_THREAD_LOCK();
2509 
2510 	// ToDo: this might not be in the same order as find_thread() in BeOS
2511 	//		which could be theoretically problematic.
2512 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2513 	//		cheap either - although this function is probably used very rarely.
2514 
2515 	hash_open(sThreadHash, &iterator);
2516 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2517 			!= NULL) {
2518 		// Search through hash
2519 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2520 			thread_id id = thread->id;
2521 
2522 			RELEASE_THREAD_LOCK();
2523 			restore_interrupts(state);
2524 			return id;
2525 		}
2526 	}
2527 
2528 	RELEASE_THREAD_LOCK();
2529 	restore_interrupts(state);
2530 
2531 	return B_NAME_NOT_FOUND;
2532 }
2533 
2534 
2535 status_t
2536 rename_thread(thread_id id, const char *name)
2537 {
2538 	struct thread *thread = thread_get_current_thread();
2539 	status_t status = B_BAD_THREAD_ID;
2540 	cpu_status state;
2541 
2542 	if (name == NULL)
2543 		return B_BAD_VALUE;
2544 
2545 	state = disable_interrupts();
2546 	GRAB_THREAD_LOCK();
2547 
2548 	if (thread->id != id)
2549 		thread = thread_get_thread_struct_locked(id);
2550 
2551 	if (thread != NULL) {
2552 		if (thread->team == thread_get_current_thread()->team) {
2553 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2554 			status = B_OK;
2555 		} else
2556 			status = B_NOT_ALLOWED;
2557 	}
2558 
2559 	RELEASE_THREAD_LOCK();
2560 	restore_interrupts(state);
2561 
2562 	return status;
2563 }
2564 
2565 
2566 status_t
2567 set_thread_priority(thread_id id, int32 priority)
2568 {
2569 	struct thread *thread;
2570 	int32 oldPriority;
2571 
2572 	// make sure the passed in priority is within bounds
2573 	if (priority > THREAD_MAX_SET_PRIORITY)
2574 		priority = THREAD_MAX_SET_PRIORITY;
2575 	if (priority < THREAD_MIN_SET_PRIORITY)
2576 		priority = THREAD_MIN_SET_PRIORITY;
2577 
2578 	thread = thread_get_current_thread();
2579 	if (thread->id == id) {
2580 		if (thread_is_idle_thread(thread))
2581 			return B_NOT_ALLOWED;
2582 
2583 		// It's ourself, so we know we aren't in the run queue, and we can
2584 		// manipulate our structure directly
2585 		oldPriority = thread->priority;
2586 			// Note that this might not return the correct value if we are
2587 			// preempted here, and another thread changes our priority before
2588 			// the next line is executed.
2589 		thread->priority = thread->next_priority = priority;
2590 	} else {
2591 		InterruptsSpinLocker _(gThreadSpinlock);
2592 
2593 		thread = thread_get_thread_struct_locked(id);
2594 		if (thread == NULL)
2595 			return B_BAD_THREAD_ID;
2596 
2597 		if (thread_is_idle_thread(thread))
2598 			return B_NOT_ALLOWED;
2599 
2600 		oldPriority = thread->priority;
2601 		scheduler_set_thread_priority(thread, priority);
2602 	}
2603 
2604 	return oldPriority;
2605 }
2606 
2607 
2608 status_t
2609 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2610 {
2611 	status_t status;
2612 
2613 	if (timebase != B_SYSTEM_TIMEBASE)
2614 		return B_BAD_VALUE;
2615 
2616 	InterruptsSpinLocker _(gThreadSpinlock);
2617 	struct thread* thread = thread_get_current_thread();
2618 
2619 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2620 	status = thread_block_with_timeout_locked(flags, timeout);
2621 
2622 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2623 		return B_OK;
2624 
2625 	return status;
2626 }
2627 
2628 
2629 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2630 status_t
2631 snooze(bigtime_t timeout)
2632 {
2633 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2634 }
2635 
2636 
2637 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
2638 	signals.
2639 */
2640 status_t
2641 snooze_until(bigtime_t timeout, int timebase)
2642 {
2643 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2644 }
2645 
2646 
2647 status_t
2648 wait_for_thread(thread_id thread, status_t *_returnCode)
2649 {
2650 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2651 }
2652 
2653 
2654 status_t
2655 suspend_thread(thread_id id)
2656 {
2657 	if (id <= 0)
2658 		return B_BAD_VALUE;
2659 
2660 	return send_signal(id, SIGSTOP);
2661 }
2662 
2663 
2664 status_t
2665 resume_thread(thread_id id)
2666 {
2667 	if (id <= 0)
2668 		return B_BAD_VALUE;
2669 
2670 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2671 		// This retains compatibility to BeOS which documents the
2672 		// combination of suspend_thread() and resume_thread() to
2673 		// interrupt threads waiting on semaphores.
2674 }
2675 
2676 
2677 thread_id
2678 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2679 	void *arg)
2680 {
2681 	thread_creation_attributes attributes;
2682 	attributes.entry = (thread_entry_func)function;
2683 	attributes.name = name;
2684 	attributes.priority = priority;
2685 	attributes.args1 = arg;
2686 	attributes.args2 = NULL;
2687 	attributes.stack_address = NULL;
2688 	attributes.stack_size = 0;
2689 	attributes.team = team_get_kernel_team()->id;
2690 	attributes.thread = -1;
2691 
2692 	return create_thread(attributes, true);
2693 }
2694 
2695 
2696 int
2697 getrlimit(int resource, struct rlimit * rlp)
2698 {
2699 	status_t error = common_getrlimit(resource, rlp);
2700 	if (error != B_OK) {
2701 		errno = error;
2702 		return -1;
2703 	}
2704 
2705 	return 0;
2706 }
2707 
2708 
2709 int
2710 setrlimit(int resource, const struct rlimit * rlp)
2711 {
2712 	status_t error = common_setrlimit(resource, rlp);
2713 	if (error != B_OK) {
2714 		errno = error;
2715 		return -1;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 
2722 //	#pragma mark - syscalls
2723 
2724 
2725 void
2726 _user_exit_thread(status_t returnValue)
2727 {
2728 	exit_thread(returnValue);
2729 }
2730 
2731 
2732 status_t
2733 _user_kill_thread(thread_id thread)
2734 {
2735 	return kill_thread(thread);
2736 }
2737 
2738 
2739 status_t
2740 _user_resume_thread(thread_id thread)
2741 {
2742 	return resume_thread(thread);
2743 }
2744 
2745 
2746 status_t
2747 _user_suspend_thread(thread_id thread)
2748 {
2749 	return suspend_thread(thread);
2750 }
2751 
2752 
2753 status_t
2754 _user_rename_thread(thread_id thread, const char *userName)
2755 {
2756 	char name[B_OS_NAME_LENGTH];
2757 
2758 	if (!IS_USER_ADDRESS(userName)
2759 		|| userName == NULL
2760 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2761 		return B_BAD_ADDRESS;
2762 
2763 	return rename_thread(thread, name);
2764 }
2765 
2766 
2767 int32
2768 _user_set_thread_priority(thread_id thread, int32 newPriority)
2769 {
2770 	return set_thread_priority(thread, newPriority);
2771 }
2772 
2773 
2774 thread_id
2775 _user_spawn_thread(thread_creation_attributes* userAttributes)
2776 {
2777 	thread_creation_attributes attributes;
2778 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2779 		|| user_memcpy(&attributes, userAttributes,
2780 				sizeof(attributes)) != B_OK) {
2781 		return B_BAD_ADDRESS;
2782 	}
2783 
2784 	if (attributes.stack_size != 0
2785 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2786 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2787 		return B_BAD_VALUE;
2788 	}
2789 
2790 	char name[B_OS_NAME_LENGTH];
2791 	thread_id threadID;
2792 
2793 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2794 		|| (attributes.stack_address != NULL
2795 			&& !IS_USER_ADDRESS(attributes.stack_address))
2796 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2797 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2798 		return B_BAD_ADDRESS;
2799 
2800 	attributes.name = attributes.name != NULL ? name : "user thread";
2801 	attributes.team = thread_get_current_thread()->team->id;
2802 	attributes.thread = -1;
2803 
2804 	threadID = create_thread(attributes, false);
2805 
2806 	if (threadID >= 0)
2807 		user_debug_thread_created(threadID);
2808 
2809 	return threadID;
2810 }
2811 
2812 
2813 status_t
2814 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2815 {
2816 	// NOTE: We only know the system timebase at the moment.
2817 	syscall_restart_handle_timeout_pre(flags, timeout);
2818 
2819 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2820 
2821 	return syscall_restart_handle_timeout_post(error, timeout);
2822 }
2823 
2824 
2825 void
2826 _user_thread_yield(void)
2827 {
2828 	thread_yield(true);
2829 }
2830 
2831 
2832 status_t
2833 _user_get_thread_info(thread_id id, thread_info *userInfo)
2834 {
2835 	thread_info info;
2836 	status_t status;
2837 
2838 	if (!IS_USER_ADDRESS(userInfo))
2839 		return B_BAD_ADDRESS;
2840 
2841 	status = _get_thread_info(id, &info, sizeof(thread_info));
2842 
2843 	if (status >= B_OK
2844 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2845 		return B_BAD_ADDRESS;
2846 
2847 	return status;
2848 }
2849 
2850 
2851 status_t
2852 _user_get_next_thread_info(team_id team, int32 *userCookie,
2853 	thread_info *userInfo)
2854 {
2855 	status_t status;
2856 	thread_info info;
2857 	int32 cookie;
2858 
2859 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2860 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2861 		return B_BAD_ADDRESS;
2862 
2863 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2864 	if (status < B_OK)
2865 		return status;
2866 
2867 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2868 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2869 		return B_BAD_ADDRESS;
2870 
2871 	return status;
2872 }
2873 
2874 
2875 thread_id
2876 _user_find_thread(const char *userName)
2877 {
2878 	char name[B_OS_NAME_LENGTH];
2879 
2880 	if (userName == NULL)
2881 		return find_thread(NULL);
2882 
2883 	if (!IS_USER_ADDRESS(userName)
2884 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2885 		return B_BAD_ADDRESS;
2886 
2887 	return find_thread(name);
2888 }
2889 
2890 
2891 status_t
2892 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2893 {
2894 	status_t returnCode;
2895 	status_t status;
2896 
2897 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2898 		return B_BAD_ADDRESS;
2899 
2900 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2901 
2902 	if (status == B_OK && userReturnCode != NULL
2903 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2904 		return B_BAD_ADDRESS;
2905 	}
2906 
2907 	return syscall_restart_handle_post(status);
2908 }
2909 
2910 
2911 bool
2912 _user_has_data(thread_id thread)
2913 {
2914 	return has_data(thread);
2915 }
2916 
2917 
2918 status_t
2919 _user_send_data(thread_id thread, int32 code, const void *buffer,
2920 	size_t bufferSize)
2921 {
2922 	if (!IS_USER_ADDRESS(buffer))
2923 		return B_BAD_ADDRESS;
2924 
2925 	return send_data_etc(thread, code, buffer, bufferSize,
2926 		B_KILL_CAN_INTERRUPT);
2927 		// supports userland buffers
2928 }
2929 
2930 
2931 status_t
2932 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2933 {
2934 	thread_id sender;
2935 	status_t code;
2936 
2937 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2938 		|| !IS_USER_ADDRESS(buffer))
2939 		return B_BAD_ADDRESS;
2940 
2941 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2942 		// supports userland buffers
2943 
2944 	if (_userSender != NULL)
2945 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2946 			return B_BAD_ADDRESS;
2947 
2948 	return code;
2949 }
2950 
2951 
2952 status_t
2953 _user_block_thread(uint32 flags, bigtime_t timeout)
2954 {
2955 	syscall_restart_handle_timeout_pre(flags, timeout);
2956 	flags |= B_CAN_INTERRUPT;
2957 
2958 	struct thread* thread = thread_get_current_thread();
2959 
2960 	InterruptsSpinLocker locker(gThreadSpinlock);
2961 
2962 	// check, if already done
2963 	if (thread->user_thread->wait_status <= 0)
2964 		return thread->user_thread->wait_status;
2965 
2966 	// nope, so wait
2967 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2968 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2969 	thread->user_thread->wait_status = status;
2970 
2971 	return syscall_restart_handle_timeout_post(status, timeout);
2972 }
2973 
2974 
2975 status_t
2976 _user_unblock_thread(thread_id threadID, status_t status)
2977 {
2978 	InterruptsSpinLocker locker(gThreadSpinlock);
2979 	status_t error = user_unblock_thread(threadID, status);
2980 	scheduler_reschedule_if_necessary_locked();
2981 	return error;
2982 }
2983 
2984 
2985 status_t
2986 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2987 {
2988 	enum {
2989 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2990 	};
2991 
2992 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2993 		return B_BAD_ADDRESS;
2994 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2995 		return B_BAD_VALUE;
2996 
2997 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2998 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2999 		return B_BAD_ADDRESS;
3000 
3001 	InterruptsSpinLocker locker(gThreadSpinlock);
3002 	for (uint32 i = 0; i < count; i++)
3003 		user_unblock_thread(threads[i], status);
3004 
3005 	scheduler_reschedule_if_necessary_locked();
3006 
3007 	return B_OK;
3008 }
3009 
3010 
3011 // TODO: the following two functions don't belong here
3012 
3013 
3014 int
3015 _user_getrlimit(int resource, struct rlimit *urlp)
3016 {
3017 	struct rlimit rl;
3018 	int ret;
3019 
3020 	if (urlp == NULL)
3021 		return EINVAL;
3022 
3023 	if (!IS_USER_ADDRESS(urlp))
3024 		return B_BAD_ADDRESS;
3025 
3026 	ret = common_getrlimit(resource, &rl);
3027 
3028 	if (ret == 0) {
3029 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3030 		if (ret < 0)
3031 			return ret;
3032 
3033 		return 0;
3034 	}
3035 
3036 	return ret;
3037 }
3038 
3039 
3040 int
3041 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3042 {
3043 	struct rlimit resourceLimit;
3044 
3045 	if (userResourceLimit == NULL)
3046 		return EINVAL;
3047 
3048 	if (!IS_USER_ADDRESS(userResourceLimit)
3049 		|| user_memcpy(&resourceLimit, userResourceLimit,
3050 			sizeof(struct rlimit)) < B_OK)
3051 		return B_BAD_ADDRESS;
3052 
3053 	return common_setrlimit(resource, &resourceLimit);
3054 }
3055