xref: /haiku/src/system/kernel/thread.cpp (revision 62f5ba006a08b0df30631375878effaf67ae5dbc)
1 /*
2  * Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/resource.h>
21 
22 #include <OS.h>
23 
24 #include <util/AutoLock.h>
25 #include <util/khash.h>
26 
27 #include <arch/debug.h>
28 #include <boot/kernel_args.h>
29 #include <condition_variable.h>
30 #include <cpu.h>
31 #include <int.h>
32 #include <kimage.h>
33 #include <kscheduler.h>
34 #include <ksignal.h>
35 #include <Notifications.h>
36 #include <real_time_clock.h>
37 #include <smp.h>
38 #include <syscalls.h>
39 #include <syscall_restart.h>
40 #include <team.h>
41 #include <tls.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <vfs.h>
45 #include <vm/vm.h>
46 #include <vm/VMAddressSpace.h>
47 #include <wait_for_objects.h>
48 
49 
50 //#define TRACE_THREAD
51 #ifdef TRACE_THREAD
52 #	define TRACE(x) dprintf x
53 #else
54 #	define TRACE(x) ;
55 #endif
56 
57 
58 #define THREAD_MAX_MESSAGE_SIZE		65536
59 
60 
61 struct thread_key {
62 	thread_id id;
63 };
64 
65 // global
66 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
67 
68 // thread list
69 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
70 static hash_table *sThreadHash = NULL;
71 static thread_id sNextThreadID = 1;
72 
73 // some arbitrary chosen limits - should probably depend on the available
74 // memory (the limit is not yet enforced)
75 static int32 sMaxThreads = 4096;
76 static int32 sUsedThreads = 0;
77 
78 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
79 	struct thread*	thread;
80 	team_id			teamID;
81 
82 	UndertakerEntry(struct thread* thread, team_id teamID)
83 		:
84 		thread(thread),
85 		teamID(teamID)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!	Inserts a thread into a team.
127 	You must hold the team lock when you call this function.
128 */
129 static void
130 insert_thread_into_team(struct team *team, struct thread *thread)
131 {
132 	thread->team_next = team->thread_list;
133 	team->thread_list = thread;
134 	team->num_threads++;
135 
136 	if (team->num_threads == 1) {
137 		// this was the first thread
138 		team->main_thread = thread;
139 	}
140 	thread->team = team;
141 }
142 
143 
144 /*!	Removes a thread from a team.
145 	You must hold the team lock when you call this function.
146 */
147 static void
148 remove_thread_from_team(struct team *team, struct thread *thread)
149 {
150 	struct thread *temp, *last = NULL;
151 
152 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
153 		if (temp == thread) {
154 			if (last == NULL)
155 				team->thread_list = temp->team_next;
156 			else
157 				last->team_next = temp->team_next;
158 
159 			team->num_threads--;
160 			break;
161 		}
162 		last = temp;
163 	}
164 }
165 
166 
167 static int
168 thread_struct_compare(void *_t, const void *_key)
169 {
170 	struct thread *thread = (struct thread*)_t;
171 	const struct thread_key *key = (const struct thread_key*)_key;
172 
173 	if (thread->id == key->id)
174 		return 0;
175 
176 	return 1;
177 }
178 
179 
180 static uint32
181 thread_struct_hash(void *_t, const void *_key, uint32 range)
182 {
183 	struct thread *thread = (struct thread*)_t;
184 	const struct thread_key *key = (const struct thread_key*)_key;
185 
186 	if (thread != NULL)
187 		return thread->id % range;
188 
189 	return (uint32)key->id % range;
190 }
191 
192 
193 static void
194 reset_signals(struct thread *thread)
195 {
196 	thread->sig_pending = 0;
197 	thread->sig_block_mask = 0;
198 	thread->sig_temp_enabled = 0;
199 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
200 	thread->signal_stack_base = 0;
201 	thread->signal_stack_size = 0;
202 	thread->signal_stack_enabled = false;
203 }
204 
205 
206 /*!	Allocates and fills in thread structure (or reuses one from the
207 	dead queue).
208 
209 	\param threadID The ID to be assigned to the new thread. If
210 		  \code < 0 \endcode a fresh one is allocated.
211 	\param thread initialize this thread struct if nonnull
212 */
213 
214 static struct thread *
215 create_thread_struct(struct thread *inthread, const char *name,
216 	thread_id threadID, struct cpu_ent *cpu)
217 {
218 	struct thread *thread;
219 	cpu_status state;
220 	char temp[64];
221 	bool recycled = false;
222 
223 	if (inthread == NULL) {
224 		// try to recycle one from the dead queue first
225 		state = disable_interrupts();
226 		GRAB_THREAD_LOCK();
227 		thread = thread_dequeue(&dead_q);
228 		RELEASE_THREAD_LOCK();
229 		restore_interrupts(state);
230 
231 		// if not, create a new one
232 		if (thread == NULL) {
233 			thread = (struct thread *)malloc(sizeof(struct thread));
234 			if (thread == NULL)
235 				return NULL;
236 		} else {
237 			recycled = true;
238 		}
239 	} else {
240 		thread = inthread;
241 	}
242 
243 	if (!recycled)
244 		scheduler_on_thread_create(thread);
245 
246 	if (name != NULL)
247 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
248 	else
249 		strcpy(thread->name, "unnamed thread");
250 
251 	thread->flags = 0;
252 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
253 	thread->team = NULL;
254 	thread->cpu = cpu;
255 	thread->previous_cpu = NULL;
256 	thread->pinned_to_cpu = 0;
257 	thread->fault_handler = 0;
258 	thread->page_faults_allowed = 1;
259 	thread->kernel_stack_area = -1;
260 	thread->kernel_stack_base = 0;
261 	thread->user_stack_area = -1;
262 	thread->user_stack_base = 0;
263 	thread->user_local_storage = 0;
264 	thread->kernel_errno = 0;
265 	thread->team_next = NULL;
266 	thread->queue_next = NULL;
267 	thread->priority = thread->next_priority = -1;
268 	thread->io_priority = -1;
269 	thread->args1 = NULL;  thread->args2 = NULL;
270 	thread->alarm.period = 0;
271 	reset_signals(thread);
272 	thread->in_kernel = true;
273 	thread->was_yielded = false;
274 	thread->user_time = 0;
275 	thread->kernel_time = 0;
276 	thread->last_time = 0;
277 	thread->exit.status = 0;
278 	thread->exit.reason = 0;
279 	thread->exit.signal = 0;
280 	list_init(&thread->exit.waiters);
281 	thread->select_infos = NULL;
282 	thread->post_interrupt_callback = NULL;
283 	thread->post_interrupt_data = NULL;
284 	thread->user_thread = NULL;
285 
286 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
287 	thread->exit.sem = create_sem(0, temp);
288 	if (thread->exit.sem < B_OK)
289 		goto err1;
290 
291 	sprintf(temp, "%s send", thread->name);
292 	thread->msg.write_sem = create_sem(1, temp);
293 	if (thread->msg.write_sem < B_OK)
294 		goto err2;
295 
296 	sprintf(temp, "%s receive", thread->name);
297 	thread->msg.read_sem = create_sem(0, temp);
298 	if (thread->msg.read_sem < B_OK)
299 		goto err3;
300 
301 	if (arch_thread_init_thread_struct(thread) < B_OK)
302 		goto err4;
303 
304 	return thread;
305 
306 err4:
307 	delete_sem(thread->msg.read_sem);
308 err3:
309 	delete_sem(thread->msg.write_sem);
310 err2:
311 	delete_sem(thread->exit.sem);
312 err1:
313 	// ToDo: put them in the dead queue instead?
314 	if (inthread == NULL) {
315 		scheduler_on_thread_destroy(thread);
316 		free(thread);
317 	}
318 
319 	return NULL;
320 }
321 
322 
323 static void
324 delete_thread_struct(struct thread *thread)
325 {
326 	delete_sem(thread->exit.sem);
327 	delete_sem(thread->msg.write_sem);
328 	delete_sem(thread->msg.read_sem);
329 
330 	scheduler_on_thread_destroy(thread);
331 
332 	// ToDo: put them in the dead queue instead?
333 	free(thread);
334 }
335 
336 
337 /*! This function gets run by a new thread before anything else */
338 static void
339 thread_kthread_entry(void)
340 {
341 	struct thread *thread = thread_get_current_thread();
342 
343 	// The thread is new and has been scheduled the first time. Notify the user
344 	// debugger code.
345 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
346 		user_debug_thread_scheduled(thread);
347 
348 	// simulates the thread spinlock release that would occur if the thread had been
349 	// rescheded from. The resched didn't happen because the thread is new.
350 	RELEASE_THREAD_LOCK();
351 
352 	// start tracking time
353 	thread->last_time = system_time();
354 
355 	enable_interrupts(); // this essentially simulates a return-from-interrupt
356 }
357 
358 
359 static void
360 thread_kthread_exit(void)
361 {
362 	struct thread *thread = thread_get_current_thread();
363 
364 	thread->exit.reason = THREAD_RETURN_EXIT;
365 	thread_exit();
366 }
367 
368 
369 /*!	Initializes the thread and jumps to its userspace entry point.
370 	This function is called at creation time of every user thread,
371 	but not for a team's main thread.
372 */
373 static int
374 _create_user_thread_kentry(void)
375 {
376 	struct thread *thread = thread_get_current_thread();
377 
378 	// jump to the entry point in user space
379 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
380 		thread->args1, thread->args2);
381 
382 	// only get here if the above call fails
383 	return 0;
384 }
385 
386 
387 /*! Initializes the thread and calls it kernel space entry point. */
388 static int
389 _create_kernel_thread_kentry(void)
390 {
391 	struct thread *thread = thread_get_current_thread();
392 	int (*func)(void *args) = (int (*)(void *))thread->entry;
393 
394 	// call the entry function with the appropriate args
395 	return func(thread->args1);
396 }
397 
398 
399 /*!	Creates a new thread in the team with the specified team ID.
400 
401 	\param threadID The ID to be assigned to the new thread. If
402 		  \code < 0 \endcode a fresh one is allocated.
403 */
404 static thread_id
405 create_thread(thread_creation_attributes& attributes, bool kernel)
406 {
407 	struct thread *thread, *currentThread;
408 	struct team *team;
409 	cpu_status state;
410 	char stack_name[B_OS_NAME_LENGTH];
411 	status_t status;
412 	bool abort = false;
413 	bool debugNewThread = false;
414 
415 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
416 		attributes.thread, kernel ? "kernel" : "user"));
417 
418 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
419 		NULL);
420 	if (thread == NULL)
421 		return B_NO_MEMORY;
422 
423 	thread->priority = attributes.priority == -1
424 		? B_NORMAL_PRIORITY : attributes.priority;
425 	thread->next_priority = thread->priority;
426 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
427 	thread->state = B_THREAD_SUSPENDED;
428 	thread->next_state = B_THREAD_SUSPENDED;
429 
430 	// init debug structure
431 	init_thread_debug_info(&thread->debug_info);
432 
433 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
434 		thread->id);
435 	thread->kernel_stack_area = create_area(stack_name,
436 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
437 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
438 		B_FULL_LOCK,
439 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
440 
441 	if (thread->kernel_stack_area < 0) {
442 		// we're not yet part of a team, so we can just bail out
443 		status = thread->kernel_stack_area;
444 
445 		dprintf("create_thread: error creating kernel stack: %s!\n",
446 			strerror(status));
447 
448 		delete_thread_struct(thread);
449 		return status;
450 	}
451 
452 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
453 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
454 
455 	state = disable_interrupts();
456 	GRAB_THREAD_LOCK();
457 
458 	// If the new thread belongs to the same team as the current thread,
459 	// it may inherit some of the thread debug flags.
460 	currentThread = thread_get_current_thread();
461 	if (currentThread && currentThread->team->id == attributes.team) {
462 		// inherit all user flags...
463 		int32 debugFlags = currentThread->debug_info.flags
464 			& B_THREAD_DEBUG_USER_FLAG_MASK;
465 
466 		// ... save the syscall tracing flags, unless explicitely specified
467 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
468 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
469 				| B_THREAD_DEBUG_POST_SYSCALL);
470 		}
471 
472 		thread->debug_info.flags = debugFlags;
473 
474 		// stop the new thread, if desired
475 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
476 	}
477 
478 	// insert into global list
479 	hash_insert(sThreadHash, thread);
480 	sUsedThreads++;
481 	scheduler_on_thread_init(thread);
482 	RELEASE_THREAD_LOCK();
483 
484 	GRAB_TEAM_LOCK();
485 	// look at the team, make sure it's not being deleted
486 	team = team_get_team_struct_locked(attributes.team);
487 
488 	if (team == NULL || team->state == TEAM_STATE_DEATH
489 		|| team->death_entry != NULL) {
490 		abort = true;
491 	}
492 
493 	if (!abort && !kernel) {
494 		thread->user_thread = team_allocate_user_thread(team);
495 		abort = thread->user_thread == NULL;
496 	}
497 
498 	if (!abort) {
499 		// Debug the new thread, if the parent thread required that (see above),
500 		// or the respective global team debug flag is set. But only, if a
501 		// debugger is installed for the team.
502 		debugNewThread |= (atomic_get(&team->debug_info.flags)
503 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
504 		if (debugNewThread
505 			&& (atomic_get(&team->debug_info.flags)
506 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
507 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
508 		}
509 
510 		insert_thread_into_team(team, thread);
511 	}
512 
513 	RELEASE_TEAM_LOCK();
514 	if (abort) {
515 		GRAB_THREAD_LOCK();
516 		hash_remove(sThreadHash, thread);
517 		RELEASE_THREAD_LOCK();
518 	}
519 	restore_interrupts(state);
520 	if (abort) {
521 		delete_area(thread->kernel_stack_area);
522 		delete_thread_struct(thread);
523 		return B_BAD_TEAM_ID;
524 	}
525 
526 	thread->args1 = attributes.args1;
527 	thread->args2 = attributes.args2;
528 	thread->entry = attributes.entry;
529 	status = thread->id;
530 
531 	// notify listeners
532 	sNotificationService.Notify(THREAD_ADDED, thread);
533 
534 	if (kernel) {
535 		// this sets up an initial kthread stack that runs the entry
536 
537 		// Note: whatever function wants to set up a user stack later for this
538 		// thread must initialize the TLS for it
539 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
540 			&thread_kthread_entry, &thread_kthread_exit);
541 	} else {
542 		// create user stack
543 
544 		// the stack will be between USER_STACK_REGION and the main thread stack
545 		// area (the user stack of the main thread is created in
546 		// team_create_team())
547 		if (attributes.stack_address == NULL) {
548 			thread->user_stack_base = USER_STACK_REGION;
549 			if (attributes.stack_size <= 0)
550 				thread->user_stack_size = USER_STACK_SIZE;
551 			else
552 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
553 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
554 
555 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
556 				attributes.name, thread->id);
557 			thread->user_stack_area = create_area_etc(team->id, stack_name,
558 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
559 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
560 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
561 			if (thread->user_stack_area < B_OK
562 				|| arch_thread_init_tls(thread) < B_OK) {
563 				// great, we have a fully running thread without a (usable)
564 				// stack
565 				dprintf("create_thread: unable to create proper user stack!\n");
566 				status = thread->user_stack_area;
567 				kill_thread(thread->id);
568 			}
569 		} else {
570 			thread->user_stack_base = (addr_t)attributes.stack_address;
571 			thread->user_stack_size = attributes.stack_size;
572 		}
573 
574 		user_debug_update_new_thread_flags(thread->id);
575 
576 		// copy the user entry over to the args field in the thread struct
577 		// the function this will call will immediately switch the thread into
578 		// user space.
579 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
580 			&thread_kthread_entry, &thread_kthread_exit);
581 	}
582 
583 	return status;
584 }
585 
586 
587 static status_t
588 undertaker(void* /*args*/)
589 {
590 	while (true) {
591 		// wait for a thread to bury
592 		InterruptsSpinLocker locker(gThreadSpinlock);
593 
594 		while (sUndertakerEntries.IsEmpty()) {
595 			ConditionVariableEntry conditionEntry;
596 			sUndertakerCondition.Add(&conditionEntry);
597 			locker.Unlock();
598 
599 			conditionEntry.Wait();
600 
601 			locker.Lock();
602 		}
603 
604 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
605 		locker.Unlock();
606 
607 		UndertakerEntry entry = *_entry;
608 			// we need a copy, since the original entry is on the thread's stack
609 
610 		// we've got an entry
611 		struct thread* thread = entry.thread;
612 
613 		// delete the old kernel stack area
614 		delete_area(thread->kernel_stack_area);
615 
616 		// remove this thread from all of the global lists
617 		disable_interrupts();
618 		GRAB_TEAM_LOCK();
619 
620 		remove_thread_from_team(team_get_kernel_team(), thread);
621 
622 		RELEASE_TEAM_LOCK();
623 		enable_interrupts();
624 			// needed for the debugger notification below
625 
626 		// free the thread structure
627 		locker.Lock();
628 		thread_enqueue(thread, &dead_q);
629 			// TODO: Use the slab allocator!
630 	}
631 
632 	// never can get here
633 	return B_OK;
634 }
635 
636 
637 static sem_id
638 get_thread_wait_sem(struct thread* thread)
639 {
640 	if (thread->state == B_THREAD_WAITING
641 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
642 		return (sem_id)(addr_t)thread->wait.object;
643 	}
644 	return -1;
645 }
646 
647 
648 /*!	Fills the thread_info structure with information from the specified
649 	thread.
650 	The thread lock must be held when called.
651 */
652 static void
653 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
654 {
655 	info->thread = thread->id;
656 	info->team = thread->team->id;
657 
658 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
659 
660 	if (thread->state == B_THREAD_WAITING) {
661 		info->state = B_THREAD_WAITING;
662 
663 		switch (thread->wait.type) {
664 			case THREAD_BLOCK_TYPE_SNOOZE:
665 				info->state = B_THREAD_ASLEEP;
666 				break;
667 
668 			case THREAD_BLOCK_TYPE_SEMAPHORE:
669 			{
670 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
671 				if (sem == thread->msg.read_sem)
672 					info->state = B_THREAD_RECEIVING;
673 				break;
674 			}
675 
676 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
677 			default:
678 				break;
679 		}
680 	} else
681 		info->state = (thread_state)thread->state;
682 
683 	info->priority = thread->priority;
684 	info->user_time = thread->user_time;
685 	info->kernel_time = thread->kernel_time;
686 	info->stack_base = (void *)thread->user_stack_base;
687 	info->stack_end = (void *)(thread->user_stack_base
688 		+ thread->user_stack_size);
689 	info->sem = get_thread_wait_sem(thread);
690 }
691 
692 
693 static status_t
694 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
695 	int32 flags)
696 {
697 	struct thread *target;
698 	sem_id cachedSem;
699 	cpu_status state;
700 	status_t status;
701 
702 	state = disable_interrupts();
703 	GRAB_THREAD_LOCK();
704 	target = thread_get_thread_struct_locked(id);
705 	if (!target) {
706 		RELEASE_THREAD_LOCK();
707 		restore_interrupts(state);
708 		return B_BAD_THREAD_ID;
709 	}
710 	cachedSem = target->msg.write_sem;
711 	RELEASE_THREAD_LOCK();
712 	restore_interrupts(state);
713 
714 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
715 		return B_NO_MEMORY;
716 
717 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
718 	if (status == B_INTERRUPTED) {
719 		// We got interrupted by a signal
720 		return status;
721 	}
722 	if (status != B_OK) {
723 		// Any other acquisition problems may be due to thread deletion
724 		return B_BAD_THREAD_ID;
725 	}
726 
727 	void* data;
728 	if (bufferSize > 0) {
729 		data = malloc(bufferSize);
730 		if (data == NULL)
731 			return B_NO_MEMORY;
732 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
733 			free(data);
734 			return B_BAD_DATA;
735 		}
736 	} else
737 		data = NULL;
738 
739 	state = disable_interrupts();
740 	GRAB_THREAD_LOCK();
741 
742 	// The target thread could have been deleted at this point
743 	target = thread_get_thread_struct_locked(id);
744 	if (target == NULL) {
745 		RELEASE_THREAD_LOCK();
746 		restore_interrupts(state);
747 		free(data);
748 		return B_BAD_THREAD_ID;
749 	}
750 
751 	// Save message informations
752 	target->msg.sender = thread_get_current_thread()->id;
753 	target->msg.code = code;
754 	target->msg.size = bufferSize;
755 	target->msg.buffer = data;
756 	cachedSem = target->msg.read_sem;
757 
758 	RELEASE_THREAD_LOCK();
759 	restore_interrupts(state);
760 
761 	release_sem(cachedSem);
762 	return B_OK;
763 }
764 
765 
766 static int32
767 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
768 	int32 flags)
769 {
770 	struct thread *thread = thread_get_current_thread();
771 	status_t status;
772 	size_t size;
773 	int32 code;
774 
775 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
776 	if (status < B_OK) {
777 		// Actually, we're not supposed to return error codes
778 		// but since the only reason this can fail is that we
779 		// were killed, it's probably okay to do so (but also
780 		// meaningless).
781 		return status;
782 	}
783 
784 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
785 		size = min_c(bufferSize, thread->msg.size);
786 		status = user_memcpy(buffer, thread->msg.buffer, size);
787 		if (status != B_OK) {
788 			free(thread->msg.buffer);
789 			release_sem(thread->msg.write_sem);
790 			return status;
791 		}
792 	}
793 
794 	*_sender = thread->msg.sender;
795 	code = thread->msg.code;
796 
797 	free(thread->msg.buffer);
798 	release_sem(thread->msg.write_sem);
799 
800 	return code;
801 }
802 
803 
804 static status_t
805 common_getrlimit(int resource, struct rlimit * rlp)
806 {
807 	if (!rlp)
808 		return B_BAD_ADDRESS;
809 
810 	switch (resource) {
811 		case RLIMIT_NOFILE:
812 		case RLIMIT_NOVMON:
813 			return vfs_getrlimit(resource, rlp);
814 
815 		case RLIMIT_CORE:
816 			rlp->rlim_cur = 0;
817 			rlp->rlim_max = 0;
818 			return B_OK;
819 
820 		case RLIMIT_STACK:
821 		{
822 			struct thread *thread = thread_get_current_thread();
823 			if (!thread)
824 				return B_ERROR;
825 			rlp->rlim_cur = thread->user_stack_size;
826 			rlp->rlim_max = thread->user_stack_size;
827 			return B_OK;
828 		}
829 
830 		default:
831 			return EINVAL;
832 	}
833 
834 	return B_OK;
835 }
836 
837 
838 static status_t
839 common_setrlimit(int resource, const struct rlimit * rlp)
840 {
841 	if (!rlp)
842 		return B_BAD_ADDRESS;
843 
844 	switch (resource) {
845 		case RLIMIT_NOFILE:
846 		case RLIMIT_NOVMON:
847 			return vfs_setrlimit(resource, rlp);
848 
849 		case RLIMIT_CORE:
850 			// We don't support core file, so allow settings to 0/0 only.
851 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
852 				return EINVAL;
853 			return B_OK;
854 
855 		default:
856 			return EINVAL;
857 	}
858 
859 	return B_OK;
860 }
861 
862 
863 //	#pragma mark - debugger calls
864 
865 
866 static int
867 make_thread_unreal(int argc, char **argv)
868 {
869 	struct thread *thread;
870 	struct hash_iterator i;
871 	int32 id = -1;
872 
873 	if (argc > 2) {
874 		print_debugger_command_usage(argv[0]);
875 		return 0;
876 	}
877 
878 	if (argc > 1)
879 		id = strtoul(argv[1], NULL, 0);
880 
881 	hash_open(sThreadHash, &i);
882 
883 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
884 		if (id != -1 && thread->id != id)
885 			continue;
886 
887 		if (thread->priority > B_DISPLAY_PRIORITY) {
888 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
889 			kprintf("thread %ld made unreal\n", thread->id);
890 		}
891 	}
892 
893 	hash_close(sThreadHash, &i, false);
894 	return 0;
895 }
896 
897 
898 static int
899 set_thread_prio(int argc, char **argv)
900 {
901 	struct thread *thread;
902 	struct hash_iterator i;
903 	int32 id;
904 	int32 prio;
905 
906 	if (argc > 3 || argc < 2) {
907 		print_debugger_command_usage(argv[0]);
908 		return 0;
909 	}
910 
911 	prio = strtoul(argv[1], NULL, 0);
912 	if (prio > THREAD_MAX_SET_PRIORITY)
913 		prio = THREAD_MAX_SET_PRIORITY;
914 	if (prio < THREAD_MIN_SET_PRIORITY)
915 		prio = THREAD_MIN_SET_PRIORITY;
916 
917 	if (argc > 2)
918 		id = strtoul(argv[2], NULL, 0);
919 	else
920 		id = thread_get_current_thread()->id;
921 
922 	hash_open(sThreadHash, &i);
923 
924 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
925 		if (thread->id != id)
926 			continue;
927 		thread->priority = thread->next_priority = prio;
928 		kprintf("thread %ld set to priority %ld\n", id, prio);
929 		break;
930 	}
931 	if (!thread)
932 		kprintf("thread %ld (%#lx) not found\n", id, id);
933 
934 	hash_close(sThreadHash, &i, false);
935 	return 0;
936 }
937 
938 
939 static int
940 make_thread_suspended(int argc, char **argv)
941 {
942 	struct thread *thread;
943 	struct hash_iterator i;
944 	int32 id;
945 
946 	if (argc > 2) {
947 		print_debugger_command_usage(argv[0]);
948 		return 0;
949 	}
950 
951 	if (argc == 1)
952 		id = thread_get_current_thread()->id;
953 	else
954 		id = strtoul(argv[1], NULL, 0);
955 
956 	hash_open(sThreadHash, &i);
957 
958 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
959 		if (thread->id != id)
960 			continue;
961 
962 		thread->next_state = B_THREAD_SUSPENDED;
963 		kprintf("thread %ld suspended\n", id);
964 		break;
965 	}
966 	if (!thread)
967 		kprintf("thread %ld (%#lx) not found\n", id, id);
968 
969 	hash_close(sThreadHash, &i, false);
970 	return 0;
971 }
972 
973 
974 static int
975 make_thread_resumed(int argc, char **argv)
976 {
977 	struct thread *thread;
978 	struct hash_iterator i;
979 	int32 id;
980 
981 	if (argc != 2) {
982 		print_debugger_command_usage(argv[0]);
983 		return 0;
984 	}
985 
986 	// force user to enter a thread id, as using
987 	// the current thread is usually not intended
988 	id = strtoul(argv[1], NULL, 0);
989 
990 	hash_open(sThreadHash, &i);
991 
992 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
993 		if (thread->id != id)
994 			continue;
995 
996 		if (thread->state == B_THREAD_SUSPENDED) {
997 			scheduler_enqueue_in_run_queue(thread);
998 			kprintf("thread %ld resumed\n", thread->id);
999 		}
1000 		break;
1001 	}
1002 	if (!thread)
1003 		kprintf("thread %ld (%#lx) not found\n", id, id);
1004 
1005 	hash_close(sThreadHash, &i, false);
1006 	return 0;
1007 }
1008 
1009 
1010 static int
1011 drop_into_debugger(int argc, char **argv)
1012 {
1013 	status_t err;
1014 	int32 id;
1015 
1016 	if (argc > 2) {
1017 		print_debugger_command_usage(argv[0]);
1018 		return 0;
1019 	}
1020 
1021 	if (argc == 1)
1022 		id = thread_get_current_thread()->id;
1023 	else
1024 		id = strtoul(argv[1], NULL, 0);
1025 
1026 	err = _user_debug_thread(id);
1027 	if (err)
1028 		kprintf("drop failed\n");
1029 	else
1030 		kprintf("thread %ld dropped into user debugger\n", id);
1031 
1032 	return 0;
1033 }
1034 
1035 
1036 static const char *
1037 state_to_text(struct thread *thread, int32 state)
1038 {
1039 	switch (state) {
1040 		case B_THREAD_READY:
1041 			return "ready";
1042 
1043 		case B_THREAD_RUNNING:
1044 			return "running";
1045 
1046 		case B_THREAD_WAITING:
1047 		{
1048 			if (thread != NULL) {
1049 				switch (thread->wait.type) {
1050 					case THREAD_BLOCK_TYPE_SNOOZE:
1051 						return "zzz";
1052 
1053 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1054 					{
1055 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1056 						if (sem == thread->msg.read_sem)
1057 							return "receive";
1058 						break;
1059 					}
1060 				}
1061 			}
1062 
1063 			return "waiting";
1064 		}
1065 
1066 		case B_THREAD_SUSPENDED:
1067 			return "suspended";
1068 
1069 		case THREAD_STATE_FREE_ON_RESCHED:
1070 			return "death";
1071 
1072 		default:
1073 			return "UNKNOWN";
1074 	}
1075 }
1076 
1077 
1078 static void
1079 print_thread_list_table_head()
1080 {
1081 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1082 		"  team  name\n");
1083 }
1084 
1085 
1086 static void
1087 _dump_thread_info(struct thread *thread, bool shortInfo)
1088 {
1089 	if (shortInfo) {
1090 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1091 			thread->state));
1092 
1093 		// does it block on a semaphore or a condition variable?
1094 		if (thread->state == B_THREAD_WAITING) {
1095 			switch (thread->wait.type) {
1096 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1097 				{
1098 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1099 					if (sem == thread->msg.read_sem)
1100 						kprintf("                    ");
1101 					else
1102 						kprintf("sem  %12ld   ", sem);
1103 					break;
1104 				}
1105 
1106 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1107 					kprintf("cvar   %p   ", thread->wait.object);
1108 					break;
1109 
1110 				case THREAD_BLOCK_TYPE_SNOOZE:
1111 					kprintf("                    ");
1112 					break;
1113 
1114 				case THREAD_BLOCK_TYPE_SIGNAL:
1115 					kprintf("signal              ");
1116 					break;
1117 
1118 				case THREAD_BLOCK_TYPE_MUTEX:
1119 					kprintf("mutex  %p   ", thread->wait.object);
1120 					break;
1121 
1122 				case THREAD_BLOCK_TYPE_RW_LOCK:
1123 					kprintf("rwlock %p   ", thread->wait.object);
1124 					break;
1125 
1126 				case THREAD_BLOCK_TYPE_OTHER:
1127 					kprintf("other               ");
1128 					break;
1129 
1130 				default:
1131 					kprintf("???    %p   ", thread->wait.object);
1132 					break;
1133 			}
1134 		} else
1135 			kprintf("        -           ");
1136 
1137 		// on which CPU does it run?
1138 		if (thread->cpu)
1139 			kprintf("%2d", thread->cpu->cpu_num);
1140 		else
1141 			kprintf(" -");
1142 
1143 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1144 			(void *)thread->kernel_stack_base, thread->team->id,
1145 			thread->name != NULL ? thread->name : "<NULL>");
1146 
1147 		return;
1148 	}
1149 
1150 	// print the long info
1151 
1152 	struct death_entry *death = NULL;
1153 
1154 	kprintf("THREAD: %p\n", thread);
1155 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1156 	kprintf("name:               \"%s\"\n", thread->name);
1157 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1158 		thread->all_next, thread->team_next, thread->queue_next);
1159 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1160 		thread->next_priority, thread->io_priority);
1161 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1162 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1163 	kprintf("cpu:                %p ", thread->cpu);
1164 	if (thread->cpu)
1165 		kprintf("(%d)\n", thread->cpu->cpu_num);
1166 	else
1167 		kprintf("\n");
1168 	kprintf("sig_pending:        %#" B_PRIx32 " (blocked: %#" B_PRIx32
1169 		", temp enabled: %#" B_PRIx32 ")\n", thread->sig_pending,
1170 		thread->sig_block_mask, thread->sig_temp_enabled);
1171 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1172 
1173 	if (thread->state == B_THREAD_WAITING) {
1174 		kprintf("waiting for:        ");
1175 
1176 		switch (thread->wait.type) {
1177 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1178 			{
1179 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1180 				if (sem == thread->msg.read_sem)
1181 					kprintf("data\n");
1182 				else
1183 					kprintf("semaphore %ld\n", sem);
1184 				break;
1185 			}
1186 
1187 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1188 				kprintf("condition variable %p\n", thread->wait.object);
1189 				break;
1190 
1191 			case THREAD_BLOCK_TYPE_SNOOZE:
1192 				kprintf("snooze()\n");
1193 				break;
1194 
1195 			case THREAD_BLOCK_TYPE_SIGNAL:
1196 				kprintf("signal\n");
1197 				break;
1198 
1199 			case THREAD_BLOCK_TYPE_MUTEX:
1200 				kprintf("mutex %p\n", thread->wait.object);
1201 				break;
1202 
1203 			case THREAD_BLOCK_TYPE_RW_LOCK:
1204 				kprintf("rwlock %p\n", thread->wait.object);
1205 				break;
1206 
1207 			case THREAD_BLOCK_TYPE_OTHER:
1208 				kprintf("other (%s)\n", (char*)thread->wait.object);
1209 				break;
1210 
1211 			default:
1212 				kprintf("unknown (%p)\n", thread->wait.object);
1213 				break;
1214 		}
1215 	}
1216 
1217 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1218 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1219 	kprintf("entry:              %p\n", (void *)thread->entry);
1220 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1221 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1222 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1223 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1224 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1225 	kprintf("  exit.waiters:\n");
1226 	while ((death = (struct death_entry*)list_get_next_item(
1227 			&thread->exit.waiters, death)) != NULL) {
1228 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1229 	}
1230 
1231 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1232 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1233 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1234 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1235 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1236 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1237 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1238 		strerror(thread->kernel_errno));
1239 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1240 	kprintf("user_time:          %Ld\n", thread->user_time);
1241 	kprintf("flags:              0x%lx\n", thread->flags);
1242 	kprintf("architecture dependant section:\n");
1243 	arch_thread_dump_info(&thread->arch_info);
1244 }
1245 
1246 
1247 static int
1248 dump_thread_info(int argc, char **argv)
1249 {
1250 	bool shortInfo = false;
1251 	int argi = 1;
1252 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1253 		shortInfo = true;
1254 		print_thread_list_table_head();
1255 		argi++;
1256 	}
1257 
1258 	if (argi == argc) {
1259 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1260 		return 0;
1261 	}
1262 
1263 	for (; argi < argc; argi++) {
1264 		const char *name = argv[argi];
1265 		int32 id = strtoul(name, NULL, 0);
1266 
1267 		if (IS_KERNEL_ADDRESS(id)) {
1268 			// semi-hack
1269 			_dump_thread_info((struct thread *)id, shortInfo);
1270 			continue;
1271 		}
1272 
1273 		// walk through the thread list, trying to match name or id
1274 		bool found = false;
1275 		struct hash_iterator i;
1276 		hash_open(sThreadHash, &i);
1277 		struct thread *thread;
1278 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1279 			if (!strcmp(name, thread->name) || thread->id == id) {
1280 				_dump_thread_info(thread, shortInfo);
1281 				found = true;
1282 				break;
1283 			}
1284 		}
1285 		hash_close(sThreadHash, &i, false);
1286 
1287 		if (!found)
1288 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 
1295 static int
1296 dump_thread_list(int argc, char **argv)
1297 {
1298 	struct thread *thread;
1299 	struct hash_iterator i;
1300 	bool realTimeOnly = false;
1301 	bool calling = false;
1302 	const char *callSymbol = NULL;
1303 	addr_t callStart = 0;
1304 	addr_t callEnd = 0;
1305 	int32 requiredState = 0;
1306 	team_id team = -1;
1307 	sem_id sem = -1;
1308 
1309 	if (!strcmp(argv[0], "realtime"))
1310 		realTimeOnly = true;
1311 	else if (!strcmp(argv[0], "ready"))
1312 		requiredState = B_THREAD_READY;
1313 	else if (!strcmp(argv[0], "running"))
1314 		requiredState = B_THREAD_RUNNING;
1315 	else if (!strcmp(argv[0], "waiting")) {
1316 		requiredState = B_THREAD_WAITING;
1317 
1318 		if (argc > 1) {
1319 			sem = strtoul(argv[1], NULL, 0);
1320 			if (sem == 0)
1321 				kprintf("ignoring invalid semaphore argument.\n");
1322 		}
1323 	} else if (!strcmp(argv[0], "calling")) {
1324 		if (argc < 2) {
1325 			kprintf("Need to give a symbol name or start and end arguments.\n");
1326 			return 0;
1327 		} else if (argc == 3) {
1328 			callStart = parse_expression(argv[1]);
1329 			callEnd = parse_expression(argv[2]);
1330 		} else
1331 			callSymbol = argv[1];
1332 
1333 		calling = true;
1334 	} else if (argc > 1) {
1335 		team = strtoul(argv[1], NULL, 0);
1336 		if (team == 0)
1337 			kprintf("ignoring invalid team argument.\n");
1338 	}
1339 
1340 	print_thread_list_table_head();
1341 
1342 	hash_open(sThreadHash, &i);
1343 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1344 		// filter out threads not matching the search criteria
1345 		if ((requiredState && thread->state != requiredState)
1346 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1347 					callStart, callEnd))
1348 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1349 			|| (team > 0 && thread->team->id != team)
1350 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1351 			continue;
1352 
1353 		_dump_thread_info(thread, true);
1354 	}
1355 	hash_close(sThreadHash, &i, false);
1356 	return 0;
1357 }
1358 
1359 
1360 //	#pragma mark - private kernel API
1361 
1362 
1363 void
1364 thread_exit(void)
1365 {
1366 	cpu_status state;
1367 	struct thread *thread = thread_get_current_thread();
1368 	struct team *team = thread->team;
1369 	thread_id parentID = -1;
1370 	status_t status;
1371 	struct thread_debug_info debugInfo;
1372 	team_id teamID = team->id;
1373 
1374 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1375 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1376 			? "due to signal" : "normally", thread->exit.status));
1377 
1378 	if (!are_interrupts_enabled())
1379 		panic("thread_exit() called with interrupts disabled!\n");
1380 
1381 	// boost our priority to get this over with
1382 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1383 
1384 	// Cancel previously installed alarm timer, if any
1385 	cancel_timer(&thread->alarm);
1386 
1387 	// delete the user stack area first, we won't need it anymore
1388 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1389 		area_id area = thread->user_stack_area;
1390 		thread->user_stack_area = -1;
1391 		vm_delete_area(team->id, area, true);
1392 	}
1393 
1394 	struct job_control_entry *death = NULL;
1395 	struct death_entry* threadDeathEntry = NULL;
1396 	bool deleteTeam = false;
1397 	port_id debuggerPort = -1;
1398 
1399 	if (team != team_get_kernel_team()) {
1400 		user_debug_thread_exiting(thread);
1401 
1402 		if (team->main_thread == thread) {
1403 			// The main thread is exiting. Shut down the whole team.
1404 			deleteTeam = true;
1405 		} else {
1406 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1407 			team_free_user_thread(thread);
1408 		}
1409 
1410 		// remove this thread from the current team and add it to the kernel
1411 		// put the thread into the kernel team until it dies
1412 		state = disable_interrupts();
1413 		GRAB_TEAM_LOCK();
1414 
1415 		if (deleteTeam)
1416 			debuggerPort = team_shutdown_team(team, state);
1417 
1418 		GRAB_THREAD_LOCK();
1419 			// removing the thread and putting its death entry to the parent
1420 			// team needs to be an atomic operation
1421 
1422 		// remember how long this thread lasted
1423 		team->dead_threads_kernel_time += thread->kernel_time;
1424 		team->dead_threads_user_time += thread->user_time;
1425 
1426 		remove_thread_from_team(team, thread);
1427 		insert_thread_into_team(team_get_kernel_team(), thread);
1428 
1429 		if (team->death_entry != NULL) {
1430 			if (--team->death_entry->remaining_threads == 0)
1431 				team->death_entry->condition.NotifyOne(true, B_OK);
1432 		}
1433 
1434 		if (deleteTeam) {
1435 			struct team *parent = team->parent;
1436 
1437 			// remember who our parent was so we can send a signal
1438 			parentID = parent->id;
1439 
1440 			// Set the team job control state to "dead" and detach the job
1441 			// control entry from our team struct.
1442 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1443 			death = team->job_control_entry;
1444 			team->job_control_entry = NULL;
1445 
1446 			if (death != NULL) {
1447 				death->InitDeadState();
1448 
1449 				// team_set_job_control_state() already moved our entry
1450 				// into the parent's list. We just check the soft limit of
1451 				// death entries.
1452 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1453 					death = parent->dead_children->entries.RemoveHead();
1454 					parent->dead_children->count--;
1455 				} else
1456 					death = NULL;
1457 
1458 				RELEASE_THREAD_LOCK();
1459 			} else
1460 				RELEASE_THREAD_LOCK();
1461 
1462 			team_remove_team(team);
1463 
1464 			send_signal_etc(parentID, SIGCHLD,
1465 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1466 		} else {
1467 			// The thread is not the main thread. We store a thread death
1468 			// entry for it, unless someone is already waiting it.
1469 			if (threadDeathEntry != NULL
1470 				&& list_is_empty(&thread->exit.waiters)) {
1471 				threadDeathEntry->thread = thread->id;
1472 				threadDeathEntry->status = thread->exit.status;
1473 				threadDeathEntry->reason = thread->exit.reason;
1474 				threadDeathEntry->signal = thread->exit.signal;
1475 
1476 				// add entry -- remove and old one, if we hit the limit
1477 				list_add_item(&team->dead_threads, threadDeathEntry);
1478 				team->dead_threads_count++;
1479 				threadDeathEntry = NULL;
1480 
1481 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1482 					threadDeathEntry = (death_entry*)list_remove_head_item(
1483 						&team->dead_threads);
1484 					team->dead_threads_count--;
1485 				}
1486 			}
1487 
1488 			RELEASE_THREAD_LOCK();
1489 		}
1490 
1491 		RELEASE_TEAM_LOCK();
1492 
1493 		// swap address spaces, to make sure we're running on the kernel's pgdir
1494 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1495 		restore_interrupts(state);
1496 
1497 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1498 	}
1499 
1500 	free(threadDeathEntry);
1501 
1502 	// delete the team if we're its main thread
1503 	if (deleteTeam) {
1504 		team_delete_team(team, debuggerPort);
1505 
1506 		// we need to delete any death entry that made it to here
1507 		delete death;
1508 	}
1509 
1510 	state = disable_interrupts();
1511 	GRAB_THREAD_LOCK();
1512 
1513 	// remove thread from hash, so it's no longer accessible
1514 	hash_remove(sThreadHash, thread);
1515 	sUsedThreads--;
1516 
1517 	// Stop debugging for this thread
1518 	debugInfo = thread->debug_info;
1519 	clear_thread_debug_info(&thread->debug_info, true);
1520 
1521 	// Remove the select infos. We notify them a little later.
1522 	select_info* selectInfos = thread->select_infos;
1523 	thread->select_infos = NULL;
1524 
1525 	RELEASE_THREAD_LOCK();
1526 	restore_interrupts(state);
1527 
1528 	destroy_thread_debug_info(&debugInfo);
1529 
1530 	// notify select infos
1531 	select_info* info = selectInfos;
1532 	while (info != NULL) {
1533 		select_sync* sync = info->sync;
1534 
1535 		notify_select_events(info, B_EVENT_INVALID);
1536 		info = info->next;
1537 		put_select_sync(sync);
1538 	}
1539 
1540 	// notify listeners
1541 	sNotificationService.Notify(THREAD_REMOVED, thread);
1542 
1543 	// shutdown the thread messaging
1544 
1545 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1546 	if (status == B_WOULD_BLOCK) {
1547 		// there is data waiting for us, so let us eat it
1548 		thread_id sender;
1549 
1550 		delete_sem(thread->msg.write_sem);
1551 			// first, let's remove all possibly waiting writers
1552 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1553 	} else {
1554 		// we probably own the semaphore here, and we're the last to do so
1555 		delete_sem(thread->msg.write_sem);
1556 	}
1557 	// now we can safely remove the msg.read_sem
1558 	delete_sem(thread->msg.read_sem);
1559 
1560 	// fill all death entries and delete the sem that others will use to wait on us
1561 	{
1562 		sem_id cachedExitSem = thread->exit.sem;
1563 		cpu_status state;
1564 
1565 		state = disable_interrupts();
1566 		GRAB_THREAD_LOCK();
1567 
1568 		// make sure no one will grab this semaphore again
1569 		thread->exit.sem = -1;
1570 
1571 		// fill all death entries
1572 		death_entry* entry = NULL;
1573 		while ((entry = (struct death_entry*)list_get_next_item(
1574 				&thread->exit.waiters, entry)) != NULL) {
1575 			entry->status = thread->exit.status;
1576 			entry->reason = thread->exit.reason;
1577 			entry->signal = thread->exit.signal;
1578 		}
1579 
1580 		RELEASE_THREAD_LOCK();
1581 		restore_interrupts(state);
1582 
1583 		delete_sem(cachedExitSem);
1584 	}
1585 
1586 	// notify the debugger
1587 	if (teamID != team_get_kernel_team_id())
1588 		user_debug_thread_deleted(teamID, thread->id);
1589 
1590 	// enqueue in the undertaker list and reschedule for the last time
1591 	UndertakerEntry undertakerEntry(thread, teamID);
1592 
1593 	disable_interrupts();
1594 	GRAB_THREAD_LOCK();
1595 
1596 	sUndertakerEntries.Add(&undertakerEntry);
1597 	sUndertakerCondition.NotifyOne(true);
1598 
1599 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1600 	scheduler_reschedule();
1601 
1602 	panic("never can get here\n");
1603 }
1604 
1605 
1606 struct thread *
1607 thread_get_thread_struct(thread_id id)
1608 {
1609 	struct thread *thread;
1610 	cpu_status state;
1611 
1612 	state = disable_interrupts();
1613 	GRAB_THREAD_LOCK();
1614 
1615 	thread = thread_get_thread_struct_locked(id);
1616 
1617 	RELEASE_THREAD_LOCK();
1618 	restore_interrupts(state);
1619 
1620 	return thread;
1621 }
1622 
1623 
1624 struct thread *
1625 thread_get_thread_struct_locked(thread_id id)
1626 {
1627 	struct thread_key key;
1628 
1629 	key.id = id;
1630 
1631 	return (struct thread*)hash_lookup(sThreadHash, &key);
1632 }
1633 
1634 
1635 /*!	Called in the interrupt handler code when a thread enters
1636 	the kernel for any reason.
1637 	Only tracks time for now.
1638 	Interrupts are disabled.
1639 */
1640 void
1641 thread_at_kernel_entry(bigtime_t now)
1642 {
1643 	struct thread *thread = thread_get_current_thread();
1644 
1645 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1646 
1647 	// track user time
1648 	thread->user_time += now - thread->last_time;
1649 	thread->last_time = now;
1650 
1651 	thread->in_kernel = true;
1652 }
1653 
1654 
1655 /*!	Called whenever a thread exits kernel space to user space.
1656 	Tracks time, handles signals, ...
1657 	Interrupts must be enabled. When the function returns, interrupts will be
1658 	disabled.
1659 */
1660 void
1661 thread_at_kernel_exit(void)
1662 {
1663 	struct thread *thread = thread_get_current_thread();
1664 
1665 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1666 
1667 	while (handle_signals(thread)) {
1668 		InterruptsSpinLocker _(gThreadSpinlock);
1669 		scheduler_reschedule();
1670 	}
1671 
1672 	disable_interrupts();
1673 
1674 	thread->in_kernel = false;
1675 
1676 	// track kernel time
1677 	bigtime_t now = system_time();
1678 	thread->kernel_time += now - thread->last_time;
1679 	thread->last_time = now;
1680 }
1681 
1682 
1683 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1684 	and no debugging shall be done.
1685 	Interrupts must be disabled.
1686 */
1687 void
1688 thread_at_kernel_exit_no_signals(void)
1689 {
1690 	struct thread *thread = thread_get_current_thread();
1691 
1692 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1693 
1694 	thread->in_kernel = false;
1695 
1696 	// track kernel time
1697 	bigtime_t now = system_time();
1698 	thread->kernel_time += now - thread->last_time;
1699 	thread->last_time = now;
1700 }
1701 
1702 
1703 void
1704 thread_reset_for_exec(void)
1705 {
1706 	struct thread *thread = thread_get_current_thread();
1707 
1708 	cancel_timer(&thread->alarm);
1709 	reset_signals(thread);
1710 }
1711 
1712 
1713 /*! Insert a thread to the tail of a queue */
1714 void
1715 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1716 {
1717 	thread->queue_next = NULL;
1718 	if (queue->head == NULL) {
1719 		queue->head = thread;
1720 		queue->tail = thread;
1721 	} else {
1722 		queue->tail->queue_next = thread;
1723 		queue->tail = thread;
1724 	}
1725 }
1726 
1727 
1728 struct thread *
1729 thread_lookat_queue(struct thread_queue *queue)
1730 {
1731 	return queue->head;
1732 }
1733 
1734 
1735 struct thread *
1736 thread_dequeue(struct thread_queue *queue)
1737 {
1738 	struct thread *thread = queue->head;
1739 
1740 	if (thread != NULL) {
1741 		queue->head = thread->queue_next;
1742 		if (queue->tail == thread)
1743 			queue->tail = NULL;
1744 	}
1745 	return thread;
1746 }
1747 
1748 
1749 struct thread *
1750 thread_dequeue_id(struct thread_queue *q, thread_id id)
1751 {
1752 	struct thread *thread;
1753 	struct thread *last = NULL;
1754 
1755 	thread = q->head;
1756 	while (thread != NULL) {
1757 		if (thread->id == id) {
1758 			if (last == NULL)
1759 				q->head = thread->queue_next;
1760 			else
1761 				last->queue_next = thread->queue_next;
1762 
1763 			if (q->tail == thread)
1764 				q->tail = last;
1765 			break;
1766 		}
1767 		last = thread;
1768 		thread = thread->queue_next;
1769 	}
1770 	return thread;
1771 }
1772 
1773 
1774 struct thread*
1775 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1776 {
1777 	struct hash_iterator iterator;
1778 	hash_open(sThreadHash, &iterator);
1779 
1780 	struct thread* thread;
1781 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1782 			!= NULL) {
1783 		if (callback(thread, cookie))
1784 			break;
1785 	}
1786 
1787 	hash_close(sThreadHash, &iterator, false);
1788 
1789 	return thread;
1790 }
1791 
1792 
1793 thread_id
1794 allocate_thread_id(void)
1795 {
1796 	return atomic_add(&sNextThreadID, 1);
1797 }
1798 
1799 
1800 thread_id
1801 peek_next_thread_id(void)
1802 {
1803 	return atomic_get(&sNextThreadID);
1804 }
1805 
1806 
1807 /*!	Yield the CPU to other threads.
1808 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1809 	If \c false, it will continue to run, if there's no other thread in ready
1810 	state, and if it has a higher priority than the other ready threads, it
1811 	still has a good chance to continue.
1812 */
1813 void
1814 thread_yield(bool force)
1815 {
1816 	if (force) {
1817 		// snooze for roughly 3 thread quantums
1818 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1819 #if 0
1820 		cpu_status state;
1821 
1822 		struct thread *thread = thread_get_current_thread();
1823 		if (thread == NULL)
1824 			return;
1825 
1826 		state = disable_interrupts();
1827 		GRAB_THREAD_LOCK();
1828 
1829 		// mark the thread as yielded, so it will not be scheduled next
1830 		//thread->was_yielded = true;
1831 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1832 		scheduler_reschedule();
1833 
1834 		RELEASE_THREAD_LOCK();
1835 		restore_interrupts(state);
1836 #endif
1837 	} else {
1838 		struct thread *thread = thread_get_current_thread();
1839 		if (thread == NULL)
1840 			return;
1841 
1842 		// Don't force the thread off the CPU, just reschedule.
1843 		InterruptsSpinLocker _(gThreadSpinlock);
1844 		scheduler_reschedule();
1845 	}
1846 }
1847 
1848 
1849 /*!	Kernel private thread creation function.
1850 
1851 	\param threadID The ID to be assigned to the new thread. If
1852 		  \code < 0 \endcode a fresh one is allocated.
1853 */
1854 thread_id
1855 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1856 	void *arg, team_id team, thread_id threadID)
1857 {
1858 	thread_creation_attributes attributes;
1859 	attributes.entry = (thread_entry_func)function;
1860 	attributes.name = name;
1861 	attributes.priority = priority;
1862 	attributes.args1 = arg;
1863 	attributes.args2 = NULL;
1864 	attributes.stack_address = NULL;
1865 	attributes.stack_size = 0;
1866 	attributes.team = team;
1867 	attributes.thread = threadID;
1868 
1869 	return create_thread(attributes, true);
1870 }
1871 
1872 
1873 status_t
1874 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1875 	status_t *_returnCode)
1876 {
1877 	sem_id exitSem = B_BAD_THREAD_ID;
1878 	struct death_entry death;
1879 	job_control_entry* freeDeath = NULL;
1880 	struct thread *thread;
1881 	cpu_status state;
1882 	status_t status = B_OK;
1883 
1884 	if (id < B_OK)
1885 		return B_BAD_THREAD_ID;
1886 
1887 	// we need to resume the thread we're waiting for first
1888 
1889 	state = disable_interrupts();
1890 	GRAB_THREAD_LOCK();
1891 
1892 	thread = thread_get_thread_struct_locked(id);
1893 	if (thread != NULL) {
1894 		// remember the semaphore we have to wait on and place our death entry
1895 		exitSem = thread->exit.sem;
1896 		list_add_link_to_head(&thread->exit.waiters, &death);
1897 	}
1898 
1899 	death_entry* threadDeathEntry = NULL;
1900 
1901 	RELEASE_THREAD_LOCK();
1902 
1903 	if (thread == NULL) {
1904 		// we couldn't find this thread - maybe it's already gone, and we'll
1905 		// find its death entry in our team
1906 		GRAB_TEAM_LOCK();
1907 
1908 		struct team* team = thread_get_current_thread()->team;
1909 
1910 		// check the child death entries first (i.e. main threads of child
1911 		// teams)
1912 		bool deleteEntry;
1913 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1914 		if (freeDeath != NULL) {
1915 			death.status = freeDeath->status;
1916 			if (!deleteEntry)
1917 				freeDeath = NULL;
1918 		} else {
1919 			// check the thread death entries of the team (non-main threads)
1920 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1921 					&team->dead_threads, threadDeathEntry)) != NULL) {
1922 				if (threadDeathEntry->thread == id) {
1923 					list_remove_item(&team->dead_threads, threadDeathEntry);
1924 					team->dead_threads_count--;
1925 					death.status = threadDeathEntry->status;
1926 					break;
1927 				}
1928 			}
1929 
1930 			if (threadDeathEntry == NULL)
1931 				status = B_BAD_THREAD_ID;
1932 		}
1933 
1934 		RELEASE_TEAM_LOCK();
1935 	}
1936 
1937 	restore_interrupts(state);
1938 
1939 	if (thread == NULL && status == B_OK) {
1940 		// we found the thread's death entry in our team
1941 		if (_returnCode)
1942 			*_returnCode = death.status;
1943 
1944 		delete freeDeath;
1945 		free(threadDeathEntry);
1946 		return B_OK;
1947 	}
1948 
1949 	// we need to wait for the death of the thread
1950 
1951 	if (exitSem < B_OK)
1952 		return B_BAD_THREAD_ID;
1953 
1954 	resume_thread(id);
1955 		// make sure we don't wait forever on a suspended thread
1956 
1957 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1958 
1959 	if (status == B_OK) {
1960 		// this should never happen as the thread deletes the semaphore on exit
1961 		panic("could acquire exit_sem for thread %ld\n", id);
1962 	} else if (status == B_BAD_SEM_ID) {
1963 		// this is the way the thread normally exits
1964 		status = B_OK;
1965 
1966 		if (_returnCode)
1967 			*_returnCode = death.status;
1968 	} else {
1969 		// We were probably interrupted; we need to remove our death entry now.
1970 		state = disable_interrupts();
1971 		GRAB_THREAD_LOCK();
1972 
1973 		thread = thread_get_thread_struct_locked(id);
1974 		if (thread != NULL)
1975 			list_remove_link(&death);
1976 
1977 		RELEASE_THREAD_LOCK();
1978 		restore_interrupts(state);
1979 
1980 		// If the thread is already gone, we need to wait for its exit semaphore
1981 		// to make sure our death entry stays valid - it won't take long
1982 		if (thread == NULL)
1983 			acquire_sem(exitSem);
1984 	}
1985 
1986 	return status;
1987 }
1988 
1989 
1990 status_t
1991 select_thread(int32 id, struct select_info* info, bool kernel)
1992 {
1993 	InterruptsSpinLocker locker(gThreadSpinlock);
1994 
1995 	// get thread
1996 	struct thread* thread = thread_get_thread_struct_locked(id);
1997 	if (thread == NULL)
1998 		return B_BAD_THREAD_ID;
1999 
2000 	// We support only B_EVENT_INVALID at the moment.
2001 	info->selected_events &= B_EVENT_INVALID;
2002 
2003 	// add info to list
2004 	if (info->selected_events != 0) {
2005 		info->next = thread->select_infos;
2006 		thread->select_infos = info;
2007 
2008 		// we need a sync reference
2009 		atomic_add(&info->sync->ref_count, 1);
2010 	}
2011 
2012 	return B_OK;
2013 }
2014 
2015 
2016 status_t
2017 deselect_thread(int32 id, struct select_info* info, bool kernel)
2018 {
2019 	InterruptsSpinLocker locker(gThreadSpinlock);
2020 
2021 	// get thread
2022 	struct thread* thread = thread_get_thread_struct_locked(id);
2023 	if (thread == NULL)
2024 		return B_BAD_THREAD_ID;
2025 
2026 	// remove info from list
2027 	select_info** infoLocation = &thread->select_infos;
2028 	while (*infoLocation != NULL && *infoLocation != info)
2029 		infoLocation = &(*infoLocation)->next;
2030 
2031 	if (*infoLocation != info)
2032 		return B_OK;
2033 
2034 	*infoLocation = info->next;
2035 
2036 	locker.Unlock();
2037 
2038 	// surrender sync reference
2039 	put_select_sync(info->sync);
2040 
2041 	return B_OK;
2042 }
2043 
2044 
2045 int32
2046 thread_max_threads(void)
2047 {
2048 	return sMaxThreads;
2049 }
2050 
2051 
2052 int32
2053 thread_used_threads(void)
2054 {
2055 	return sUsedThreads;
2056 }
2057 
2058 
2059 const char*
2060 thread_state_to_text(struct thread* thread, int32 state)
2061 {
2062 	return state_to_text(thread, state);
2063 }
2064 
2065 
2066 int32
2067 thread_get_io_priority(thread_id id)
2068 {
2069 	// take a shortcut, if it is the current thread
2070 	struct thread* thread = thread_get_current_thread();
2071 	int32 priority;
2072 	if (id == thread->id) {
2073 		int32 priority = thread->io_priority;
2074 		return priority < 0 ? thread->priority : priority;
2075 	}
2076 
2077 	// not the current thread -- get it
2078 	InterruptsSpinLocker locker(gThreadSpinlock);
2079 
2080 	thread = thread_get_thread_struct_locked(id);
2081 	if (thread == NULL)
2082 		return B_BAD_THREAD_ID;
2083 
2084 	priority = thread->io_priority;
2085 	return priority < 0 ? thread->priority : priority;
2086 }
2087 
2088 
2089 void
2090 thread_set_io_priority(int32 priority)
2091 {
2092 	struct thread* thread = thread_get_current_thread();
2093 	thread->io_priority = priority;
2094 }
2095 
2096 
2097 status_t
2098 thread_init(kernel_args *args)
2099 {
2100 	uint32 i;
2101 
2102 	TRACE(("thread_init: entry\n"));
2103 
2104 	// create the thread hash table
2105 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2106 		&thread_struct_compare, &thread_struct_hash);
2107 
2108 	// zero out the dead thread structure q
2109 	memset(&dead_q, 0, sizeof(dead_q));
2110 
2111 	if (arch_thread_init(args) < B_OK)
2112 		panic("arch_thread_init() failed!\n");
2113 
2114 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2115 	sNextThreadID = B_SYSTEM_TEAM + 1;
2116 
2117 	// create an idle thread for each cpu
2118 
2119 	for (i = 0; i < args->num_cpus; i++) {
2120 		struct thread *thread;
2121 		area_info info;
2122 		char name[64];
2123 
2124 		sprintf(name, "idle thread %lu", i + 1);
2125 		thread = create_thread_struct(&sIdleThreads[i], name,
2126 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2127 		if (thread == NULL) {
2128 			panic("error creating idle thread struct\n");
2129 			return B_NO_MEMORY;
2130 		}
2131 
2132 		gCPU[i].running_thread = thread;
2133 
2134 		thread->team = team_get_kernel_team();
2135 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2136 		thread->state = B_THREAD_RUNNING;
2137 		thread->next_state = B_THREAD_READY;
2138 		sprintf(name, "idle thread %lu kstack", i + 1);
2139 		thread->kernel_stack_area = find_area(name);
2140 		thread->entry = NULL;
2141 
2142 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2143 			panic("error finding idle kstack area\n");
2144 
2145 		thread->kernel_stack_base = (addr_t)info.address;
2146 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2147 
2148 		hash_insert(sThreadHash, thread);
2149 		insert_thread_into_team(thread->team, thread);
2150 	}
2151 	sUsedThreads = args->num_cpus;
2152 
2153 	// init the notification service
2154 	new(&sNotificationService) ThreadNotificationService();
2155 
2156 	// start the undertaker thread
2157 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2158 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2159 
2160 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2161 		B_DISPLAY_PRIORITY, NULL);
2162 	if (undertakerThread < 0)
2163 		panic("Failed to create undertaker thread!");
2164 	send_signal_etc(undertakerThread, SIGCONT, B_DO_NOT_RESCHEDULE);
2165 
2166 	// set up some debugger commands
2167 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2168 		"[ <team> ]\n"
2169 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2170 		"all threads of the specified team.\n"
2171 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2172 	add_debugger_command_etc("ready", &dump_thread_list,
2173 		"List all ready threads",
2174 		"\n"
2175 		"Prints a list of all threads in ready state.\n", 0);
2176 	add_debugger_command_etc("running", &dump_thread_list,
2177 		"List all running threads",
2178 		"\n"
2179 		"Prints a list of all threads in running state.\n", 0);
2180 	add_debugger_command_etc("waiting", &dump_thread_list,
2181 		"List all waiting threads (optionally for a specific semaphore)",
2182 		"[ <sem> ]\n"
2183 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2184 		"specified, only the threads waiting on that semaphore are listed.\n"
2185 		"  <sem>  - ID of the semaphore.\n", 0);
2186 	add_debugger_command_etc("realtime", &dump_thread_list,
2187 		"List all realtime threads",
2188 		"\n"
2189 		"Prints a list of all threads with realtime priority.\n", 0);
2190 	add_debugger_command_etc("thread", &dump_thread_info,
2191 		"Dump info about a particular thread",
2192 		"[ -s ] ( <id> | <address> | <name> )*\n"
2193 		"Prints information about the specified thread. If no argument is\n"
2194 		"given the current thread is selected.\n"
2195 		"  -s         - Print info in compact table form (like \"threads\").\n"
2196 		"  <id>       - The ID of the thread.\n"
2197 		"  <address>  - The address of the thread structure.\n"
2198 		"  <name>     - The thread's name.\n", 0);
2199 	add_debugger_command_etc("calling", &dump_thread_list,
2200 		"Show all threads that have a specific address in their call chain",
2201 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2202 	add_debugger_command_etc("unreal", &make_thread_unreal,
2203 		"Set realtime priority threads to normal priority",
2204 		"[ <id> ]\n"
2205 		"Sets the priority of all realtime threads or, if given, the one\n"
2206 		"with the specified ID to \"normal\" priority.\n"
2207 		"  <id>  - The ID of the thread.\n", 0);
2208 	add_debugger_command_etc("suspend", &make_thread_suspended,
2209 		"Suspend a thread",
2210 		"[ <id> ]\n"
2211 		"Suspends the thread with the given ID. If no ID argument is given\n"
2212 		"the current thread is selected.\n"
2213 		"  <id>  - The ID of the thread.\n", 0);
2214 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2215 		"<id>\n"
2216 		"Resumes the specified thread, if it is currently suspended.\n"
2217 		"  <id>  - The ID of the thread.\n", 0);
2218 	add_debugger_command_etc("drop", &drop_into_debugger,
2219 		"Drop a thread into the userland debugger",
2220 		"<id>\n"
2221 		"Drops the specified (userland) thread into the userland debugger\n"
2222 		"after leaving the kernel debugger.\n"
2223 		"  <id>  - The ID of the thread.\n", 0);
2224 	add_debugger_command_etc("priority", &set_thread_prio,
2225 		"Set a thread's priority",
2226 		"<priority> [ <id> ]\n"
2227 		"Sets the priority of the thread with the specified ID to the given\n"
2228 		"priority. If no thread ID is given, the current thread is selected.\n"
2229 		"  <priority>  - The thread's new priority (0 - 120)\n"
2230 		"  <id>        - The ID of the thread.\n", 0);
2231 
2232 	return B_OK;
2233 }
2234 
2235 
2236 status_t
2237 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2238 {
2239 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2240 	// so that get_current_cpu and friends will work, which is crucial for
2241 	// a lot of low level routines
2242 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2243 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2244 	return B_OK;
2245 }
2246 
2247 
2248 //	#pragma mark - thread blocking API
2249 
2250 
2251 static status_t
2252 thread_block_timeout(timer* timer)
2253 {
2254 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2255 	// we're holding the thread lock already. This makes things comfortably
2256 	// easy.
2257 
2258 	struct thread* thread = (struct thread*)timer->user_data;
2259 	thread_unblock_locked(thread, B_TIMED_OUT);
2260 
2261 	return B_HANDLED_INTERRUPT;
2262 }
2263 
2264 
2265 status_t
2266 thread_block()
2267 {
2268 	InterruptsSpinLocker _(gThreadSpinlock);
2269 	return thread_block_locked(thread_get_current_thread());
2270 }
2271 
2272 
2273 void
2274 thread_unblock(status_t threadID, status_t status)
2275 {
2276 	InterruptsSpinLocker _(gThreadSpinlock);
2277 
2278 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2279 	if (thread != NULL)
2280 		thread_unblock_locked(thread, status);
2281 }
2282 
2283 
2284 status_t
2285 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2286 {
2287 	InterruptsSpinLocker _(gThreadSpinlock);
2288 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2289 }
2290 
2291 
2292 status_t
2293 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2294 {
2295 	struct thread* thread = thread_get_current_thread();
2296 
2297 	if (thread->wait.status != 1)
2298 		return thread->wait.status;
2299 
2300 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2301 		&& timeout != B_INFINITE_TIMEOUT;
2302 
2303 	if (useTimer) {
2304 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2305 		// avoids nasty race conditions and deadlock problems that could
2306 		// otherwise occur between our cancel_timer() and a concurrently
2307 		// executing thread_block_timeout().
2308 		uint32 timerFlags;
2309 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2310 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2311 		} else {
2312 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2313 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2314 				timeout -= rtc_boot_time();
2315 		}
2316 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2317 
2318 		// install the timer
2319 		thread->wait.unblock_timer.user_data = thread;
2320 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2321 			timerFlags);
2322 	}
2323 
2324 	// block
2325 	status_t error = thread_block_locked(thread);
2326 
2327 	// cancel timer, if it didn't fire
2328 	if (error != B_TIMED_OUT && useTimer)
2329 		cancel_timer(&thread->wait.unblock_timer);
2330 
2331 	return error;
2332 }
2333 
2334 
2335 /*!	Thread spinlock must be held.
2336 */
2337 static status_t
2338 user_unblock_thread(thread_id threadID, status_t status)
2339 {
2340 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2341 	if (thread == NULL)
2342 		return B_BAD_THREAD_ID;
2343 	if (thread->user_thread == NULL)
2344 		return B_NOT_ALLOWED;
2345 
2346 	if (thread->user_thread->wait_status > 0) {
2347 		thread->user_thread->wait_status = status;
2348 		thread_unblock_locked(thread, status);
2349 	}
2350 
2351 	return B_OK;
2352 }
2353 
2354 
2355 //	#pragma mark - public kernel API
2356 
2357 
2358 void
2359 exit_thread(status_t returnValue)
2360 {
2361 	struct thread *thread = thread_get_current_thread();
2362 
2363 	thread->exit.status = returnValue;
2364 	thread->exit.reason = THREAD_RETURN_EXIT;
2365 
2366 	// if called from a kernel thread, we don't deliver the signal,
2367 	// we just exit directly to keep the user space behaviour of
2368 	// this function
2369 	if (thread->team != team_get_kernel_team())
2370 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2371 	else
2372 		thread_exit();
2373 }
2374 
2375 
2376 status_t
2377 kill_thread(thread_id id)
2378 {
2379 	if (id <= 0)
2380 		return B_BAD_VALUE;
2381 
2382 	return send_signal(id, SIGKILLTHR);
2383 }
2384 
2385 
2386 status_t
2387 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2388 {
2389 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2390 }
2391 
2392 
2393 int32
2394 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2395 {
2396 	return receive_data_etc(sender, buffer, bufferSize, 0);
2397 }
2398 
2399 
2400 bool
2401 has_data(thread_id thread)
2402 {
2403 	int32 count;
2404 
2405 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2406 			&count) != B_OK)
2407 		return false;
2408 
2409 	return count == 0 ? false : true;
2410 }
2411 
2412 
2413 status_t
2414 _get_thread_info(thread_id id, thread_info *info, size_t size)
2415 {
2416 	status_t status = B_OK;
2417 	struct thread *thread;
2418 	cpu_status state;
2419 
2420 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2421 		return B_BAD_VALUE;
2422 
2423 	state = disable_interrupts();
2424 	GRAB_THREAD_LOCK();
2425 
2426 	thread = thread_get_thread_struct_locked(id);
2427 	if (thread == NULL) {
2428 		status = B_BAD_VALUE;
2429 		goto err;
2430 	}
2431 
2432 	fill_thread_info(thread, info, size);
2433 
2434 err:
2435 	RELEASE_THREAD_LOCK();
2436 	restore_interrupts(state);
2437 
2438 	return status;
2439 }
2440 
2441 
2442 status_t
2443 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
2444 	size_t size)
2445 {
2446 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
2447 		return B_BAD_VALUE;
2448 
2449 	int32 lastID = *_cookie;
2450 
2451 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
2452 
2453 	struct team* team;
2454 	if (teamID == B_CURRENT_TEAM)
2455 		team = thread_get_current_thread()->team;
2456 	else
2457 		team = team_get_team_struct_locked(teamID);
2458 
2459 	if (team == NULL)
2460 		return B_BAD_VALUE;
2461 
2462 	struct thread* thread = NULL;
2463 
2464 	if (lastID == 0) {
2465 		// We start with the main thread
2466 		thread = team->main_thread;
2467 	} else {
2468 		// Find the one thread with an ID higher than ours
2469 		// (as long as the IDs don't overlap they are always sorted from
2470 		// highest to lowest).
2471 		for (struct thread* next = team->thread_list; next != NULL;
2472 				next = next->team_next) {
2473 			if (next->id <= lastID)
2474 				break;
2475 
2476 			thread = next;
2477 		}
2478 	}
2479 
2480 	if (thread == NULL)
2481 		return B_BAD_VALUE;
2482 
2483 	lastID = thread->id;
2484 	*_cookie = lastID;
2485 
2486 	SpinLocker threadLocker(gThreadSpinlock);
2487 	fill_thread_info(thread, info, size);
2488 
2489 	return B_OK;
2490 }
2491 
2492 
2493 thread_id
2494 find_thread(const char *name)
2495 {
2496 	struct hash_iterator iterator;
2497 	struct thread *thread;
2498 	cpu_status state;
2499 
2500 	if (name == NULL)
2501 		return thread_get_current_thread_id();
2502 
2503 	state = disable_interrupts();
2504 	GRAB_THREAD_LOCK();
2505 
2506 	// ToDo: this might not be in the same order as find_thread() in BeOS
2507 	//		which could be theoretically problematic.
2508 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2509 	//		cheap either - although this function is probably used very rarely.
2510 
2511 	hash_open(sThreadHash, &iterator);
2512 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2513 			!= NULL) {
2514 		// Search through hash
2515 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2516 			thread_id id = thread->id;
2517 
2518 			RELEASE_THREAD_LOCK();
2519 			restore_interrupts(state);
2520 			return id;
2521 		}
2522 	}
2523 
2524 	RELEASE_THREAD_LOCK();
2525 	restore_interrupts(state);
2526 
2527 	return B_NAME_NOT_FOUND;
2528 }
2529 
2530 
2531 status_t
2532 rename_thread(thread_id id, const char *name)
2533 {
2534 	struct thread *thread = thread_get_current_thread();
2535 	status_t status = B_BAD_THREAD_ID;
2536 	cpu_status state;
2537 
2538 	if (name == NULL)
2539 		return B_BAD_VALUE;
2540 
2541 	state = disable_interrupts();
2542 	GRAB_THREAD_LOCK();
2543 
2544 	if (thread->id != id)
2545 		thread = thread_get_thread_struct_locked(id);
2546 
2547 	if (thread != NULL) {
2548 		if (thread->team == thread_get_current_thread()->team) {
2549 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2550 			status = B_OK;
2551 		} else
2552 			status = B_NOT_ALLOWED;
2553 	}
2554 
2555 	RELEASE_THREAD_LOCK();
2556 	restore_interrupts(state);
2557 
2558 	return status;
2559 }
2560 
2561 
2562 status_t
2563 set_thread_priority(thread_id id, int32 priority)
2564 {
2565 	struct thread *thread;
2566 	int32 oldPriority;
2567 
2568 	// make sure the passed in priority is within bounds
2569 	if (priority > THREAD_MAX_SET_PRIORITY)
2570 		priority = THREAD_MAX_SET_PRIORITY;
2571 	if (priority < THREAD_MIN_SET_PRIORITY)
2572 		priority = THREAD_MIN_SET_PRIORITY;
2573 
2574 	thread = thread_get_current_thread();
2575 	if (thread->id == id) {
2576 		if (thread_is_idle_thread(thread))
2577 			return B_NOT_ALLOWED;
2578 
2579 		// It's ourself, so we know we aren't in the run queue, and we can
2580 		// manipulate our structure directly
2581 		oldPriority = thread->priority;
2582 			// Note that this might not return the correct value if we are
2583 			// preempted here, and another thread changes our priority before
2584 			// the next line is executed.
2585 		thread->priority = thread->next_priority = priority;
2586 	} else {
2587 		InterruptsSpinLocker _(gThreadSpinlock);
2588 
2589 		thread = thread_get_thread_struct_locked(id);
2590 		if (thread == NULL)
2591 			return B_BAD_THREAD_ID;
2592 
2593 		if (thread_is_idle_thread(thread))
2594 			return B_NOT_ALLOWED;
2595 
2596 		oldPriority = thread->priority;
2597 		scheduler_set_thread_priority(thread, priority);
2598 	}
2599 
2600 	return oldPriority;
2601 }
2602 
2603 
2604 status_t
2605 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2606 {
2607 	status_t status;
2608 
2609 	if (timebase != B_SYSTEM_TIMEBASE)
2610 		return B_BAD_VALUE;
2611 
2612 	InterruptsSpinLocker _(gThreadSpinlock);
2613 	struct thread* thread = thread_get_current_thread();
2614 
2615 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2616 	status = thread_block_with_timeout_locked(flags, timeout);
2617 
2618 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2619 		return B_OK;
2620 
2621 	return status;
2622 }
2623 
2624 
2625 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2626 status_t
2627 snooze(bigtime_t timeout)
2628 {
2629 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2630 }
2631 
2632 
2633 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
2634 	signals.
2635 */
2636 status_t
2637 snooze_until(bigtime_t timeout, int timebase)
2638 {
2639 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2640 }
2641 
2642 
2643 status_t
2644 wait_for_thread(thread_id thread, status_t *_returnCode)
2645 {
2646 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2647 }
2648 
2649 
2650 status_t
2651 suspend_thread(thread_id id)
2652 {
2653 	if (id <= 0)
2654 		return B_BAD_VALUE;
2655 
2656 	return send_signal(id, SIGSTOP);
2657 }
2658 
2659 
2660 status_t
2661 resume_thread(thread_id id)
2662 {
2663 	if (id <= 0)
2664 		return B_BAD_VALUE;
2665 
2666 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2667 		// This retains compatibility to BeOS which documents the
2668 		// combination of suspend_thread() and resume_thread() to
2669 		// interrupt threads waiting on semaphores.
2670 }
2671 
2672 
2673 thread_id
2674 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2675 	void *arg)
2676 {
2677 	thread_creation_attributes attributes;
2678 	attributes.entry = (thread_entry_func)function;
2679 	attributes.name = name;
2680 	attributes.priority = priority;
2681 	attributes.args1 = arg;
2682 	attributes.args2 = NULL;
2683 	attributes.stack_address = NULL;
2684 	attributes.stack_size = 0;
2685 	attributes.team = team_get_kernel_team()->id;
2686 	attributes.thread = -1;
2687 
2688 	return create_thread(attributes, true);
2689 }
2690 
2691 
2692 int
2693 getrlimit(int resource, struct rlimit * rlp)
2694 {
2695 	status_t error = common_getrlimit(resource, rlp);
2696 	if (error != B_OK) {
2697 		errno = error;
2698 		return -1;
2699 	}
2700 
2701 	return 0;
2702 }
2703 
2704 
2705 int
2706 setrlimit(int resource, const struct rlimit * rlp)
2707 {
2708 	status_t error = common_setrlimit(resource, rlp);
2709 	if (error != B_OK) {
2710 		errno = error;
2711 		return -1;
2712 	}
2713 
2714 	return 0;
2715 }
2716 
2717 
2718 //	#pragma mark - syscalls
2719 
2720 
2721 void
2722 _user_exit_thread(status_t returnValue)
2723 {
2724 	exit_thread(returnValue);
2725 }
2726 
2727 
2728 status_t
2729 _user_kill_thread(thread_id thread)
2730 {
2731 	return kill_thread(thread);
2732 }
2733 
2734 
2735 status_t
2736 _user_resume_thread(thread_id thread)
2737 {
2738 	return resume_thread(thread);
2739 }
2740 
2741 
2742 status_t
2743 _user_suspend_thread(thread_id thread)
2744 {
2745 	return suspend_thread(thread);
2746 }
2747 
2748 
2749 status_t
2750 _user_rename_thread(thread_id thread, const char *userName)
2751 {
2752 	char name[B_OS_NAME_LENGTH];
2753 
2754 	if (!IS_USER_ADDRESS(userName)
2755 		|| userName == NULL
2756 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2757 		return B_BAD_ADDRESS;
2758 
2759 	return rename_thread(thread, name);
2760 }
2761 
2762 
2763 int32
2764 _user_set_thread_priority(thread_id thread, int32 newPriority)
2765 {
2766 	return set_thread_priority(thread, newPriority);
2767 }
2768 
2769 
2770 thread_id
2771 _user_spawn_thread(thread_creation_attributes* userAttributes)
2772 {
2773 	thread_creation_attributes attributes;
2774 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2775 		|| user_memcpy(&attributes, userAttributes,
2776 				sizeof(attributes)) != B_OK) {
2777 		return B_BAD_ADDRESS;
2778 	}
2779 
2780 	if (attributes.stack_size != 0
2781 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2782 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2783 		return B_BAD_VALUE;
2784 	}
2785 
2786 	char name[B_OS_NAME_LENGTH];
2787 	thread_id threadID;
2788 
2789 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2790 		|| (attributes.stack_address != NULL
2791 			&& !IS_USER_ADDRESS(attributes.stack_address))
2792 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2793 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2794 		return B_BAD_ADDRESS;
2795 
2796 	attributes.name = attributes.name != NULL ? name : "user thread";
2797 	attributes.team = thread_get_current_thread()->team->id;
2798 	attributes.thread = -1;
2799 
2800 	threadID = create_thread(attributes, false);
2801 
2802 	if (threadID >= 0)
2803 		user_debug_thread_created(threadID);
2804 
2805 	return threadID;
2806 }
2807 
2808 
2809 status_t
2810 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2811 {
2812 	// NOTE: We only know the system timebase at the moment.
2813 	syscall_restart_handle_timeout_pre(flags, timeout);
2814 
2815 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2816 
2817 	return syscall_restart_handle_timeout_post(error, timeout);
2818 }
2819 
2820 
2821 void
2822 _user_thread_yield(void)
2823 {
2824 	thread_yield(true);
2825 }
2826 
2827 
2828 status_t
2829 _user_get_thread_info(thread_id id, thread_info *userInfo)
2830 {
2831 	thread_info info;
2832 	status_t status;
2833 
2834 	if (!IS_USER_ADDRESS(userInfo))
2835 		return B_BAD_ADDRESS;
2836 
2837 	status = _get_thread_info(id, &info, sizeof(thread_info));
2838 
2839 	if (status >= B_OK
2840 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2841 		return B_BAD_ADDRESS;
2842 
2843 	return status;
2844 }
2845 
2846 
2847 status_t
2848 _user_get_next_thread_info(team_id team, int32 *userCookie,
2849 	thread_info *userInfo)
2850 {
2851 	status_t status;
2852 	thread_info info;
2853 	int32 cookie;
2854 
2855 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2856 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2857 		return B_BAD_ADDRESS;
2858 
2859 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2860 	if (status < B_OK)
2861 		return status;
2862 
2863 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2864 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2865 		return B_BAD_ADDRESS;
2866 
2867 	return status;
2868 }
2869 
2870 
2871 thread_id
2872 _user_find_thread(const char *userName)
2873 {
2874 	char name[B_OS_NAME_LENGTH];
2875 
2876 	if (userName == NULL)
2877 		return find_thread(NULL);
2878 
2879 	if (!IS_USER_ADDRESS(userName)
2880 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2881 		return B_BAD_ADDRESS;
2882 
2883 	return find_thread(name);
2884 }
2885 
2886 
2887 status_t
2888 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2889 {
2890 	status_t returnCode;
2891 	status_t status;
2892 
2893 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2894 		return B_BAD_ADDRESS;
2895 
2896 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2897 
2898 	if (status == B_OK && userReturnCode != NULL
2899 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2900 		return B_BAD_ADDRESS;
2901 	}
2902 
2903 	return syscall_restart_handle_post(status);
2904 }
2905 
2906 
2907 bool
2908 _user_has_data(thread_id thread)
2909 {
2910 	return has_data(thread);
2911 }
2912 
2913 
2914 status_t
2915 _user_send_data(thread_id thread, int32 code, const void *buffer,
2916 	size_t bufferSize)
2917 {
2918 	if (!IS_USER_ADDRESS(buffer))
2919 		return B_BAD_ADDRESS;
2920 
2921 	return send_data_etc(thread, code, buffer, bufferSize,
2922 		B_KILL_CAN_INTERRUPT);
2923 		// supports userland buffers
2924 }
2925 
2926 
2927 status_t
2928 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2929 {
2930 	thread_id sender;
2931 	status_t code;
2932 
2933 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2934 		|| !IS_USER_ADDRESS(buffer))
2935 		return B_BAD_ADDRESS;
2936 
2937 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2938 		// supports userland buffers
2939 
2940 	if (_userSender != NULL)
2941 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2942 			return B_BAD_ADDRESS;
2943 
2944 	return code;
2945 }
2946 
2947 
2948 status_t
2949 _user_block_thread(uint32 flags, bigtime_t timeout)
2950 {
2951 	syscall_restart_handle_timeout_pre(flags, timeout);
2952 	flags |= B_CAN_INTERRUPT;
2953 
2954 	struct thread* thread = thread_get_current_thread();
2955 
2956 	InterruptsSpinLocker locker(gThreadSpinlock);
2957 
2958 	// check, if already done
2959 	if (thread->user_thread->wait_status <= 0)
2960 		return thread->user_thread->wait_status;
2961 
2962 	// nope, so wait
2963 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2964 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2965 	thread->user_thread->wait_status = status;
2966 
2967 	return syscall_restart_handle_timeout_post(status, timeout);
2968 }
2969 
2970 
2971 status_t
2972 _user_unblock_thread(thread_id threadID, status_t status)
2973 {
2974 	InterruptsSpinLocker locker(gThreadSpinlock);
2975 	status_t error = user_unblock_thread(threadID, status);
2976 	scheduler_reschedule_if_necessary_locked();
2977 	return error;
2978 }
2979 
2980 
2981 status_t
2982 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2983 {
2984 	enum {
2985 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2986 	};
2987 
2988 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2989 		return B_BAD_ADDRESS;
2990 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2991 		return B_BAD_VALUE;
2992 
2993 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2994 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2995 		return B_BAD_ADDRESS;
2996 
2997 	InterruptsSpinLocker locker(gThreadSpinlock);
2998 	for (uint32 i = 0; i < count; i++)
2999 		user_unblock_thread(threads[i], status);
3000 
3001 	scheduler_reschedule_if_necessary_locked();
3002 
3003 	return B_OK;
3004 }
3005 
3006 
3007 // TODO: the following two functions don't belong here
3008 
3009 
3010 int
3011 _user_getrlimit(int resource, struct rlimit *urlp)
3012 {
3013 	struct rlimit rl;
3014 	int ret;
3015 
3016 	if (urlp == NULL)
3017 		return EINVAL;
3018 
3019 	if (!IS_USER_ADDRESS(urlp))
3020 		return B_BAD_ADDRESS;
3021 
3022 	ret = common_getrlimit(resource, &rl);
3023 
3024 	if (ret == 0) {
3025 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3026 		if (ret < 0)
3027 			return ret;
3028 
3029 		return 0;
3030 	}
3031 
3032 	return ret;
3033 }
3034 
3035 
3036 int
3037 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3038 {
3039 	struct rlimit resourceLimit;
3040 
3041 	if (userResourceLimit == NULL)
3042 		return EINVAL;
3043 
3044 	if (!IS_USER_ADDRESS(userResourceLimit)
3045 		|| user_memcpy(&resourceLimit, userResourceLimit,
3046 			sizeof(struct rlimit)) < B_OK)
3047 		return B_BAD_ADDRESS;
3048 
3049 	return common_setrlimit(resource, &resourceLimit);
3050 }
3051