xref: /haiku/src/system/kernel/thread.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/resource.h>
21 
22 #include <OS.h>
23 
24 #include <util/AutoLock.h>
25 #include <util/khash.h>
26 
27 #include <arch/debug.h>
28 #include <boot/kernel_args.h>
29 #include <condition_variable.h>
30 #include <cpu.h>
31 #include <int.h>
32 #include <kimage.h>
33 #include <kscheduler.h>
34 #include <ksignal.h>
35 #include <Notifications.h>
36 #include <real_time_clock.h>
37 #include <smp.h>
38 #include <syscalls.h>
39 #include <syscall_restart.h>
40 #include <team.h>
41 #include <tls.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <vfs.h>
45 #include <vm/vm.h>
46 #include <vm/VMAddressSpace.h>
47 #include <wait_for_objects.h>
48 
49 
50 //#define TRACE_THREAD
51 #ifdef TRACE_THREAD
52 #	define TRACE(x) dprintf x
53 #else
54 #	define TRACE(x) ;
55 #endif
56 
57 
58 #define THREAD_MAX_MESSAGE_SIZE		65536
59 
60 
61 struct thread_key {
62 	thread_id id;
63 };
64 
65 // global
66 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
67 
68 // thread list
69 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
70 static hash_table *sThreadHash = NULL;
71 static thread_id sNextThreadID = 1;
72 
73 // some arbitrary chosen limits - should probably depend on the available
74 // memory (the limit is not yet enforced)
75 static int32 sMaxThreads = 4096;
76 static int32 sUsedThreads = 0;
77 
78 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
79 	struct thread*	thread;
80 	team_id			teamID;
81 
82 	UndertakerEntry(struct thread* thread, team_id teamID)
83 		:
84 		thread(thread),
85 		teamID(teamID)
86 	{
87 	}
88 };
89 
90 
91 class ThreadNotificationService : public DefaultNotificationService {
92 public:
93 	ThreadNotificationService()
94 		: DefaultNotificationService("threads")
95 	{
96 	}
97 
98 	void Notify(uint32 eventCode, struct thread* thread)
99 	{
100 		char eventBuffer[128];
101 		KMessage event;
102 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
103 		event.AddInt32("event", eventCode);
104 		event.AddInt32("thread", thread->id);
105 		event.AddPointer("threadStruct", thread);
106 
107 		DefaultNotificationService::Notify(event, eventCode);
108 	}
109 };
110 
111 
112 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
113 static ConditionVariable sUndertakerCondition;
114 static ThreadNotificationService sNotificationService;
115 
116 
117 // The dead queue is used as a pool from which to retrieve and reuse previously
118 // allocated thread structs when creating a new thread. It should be gone once
119 // the slab allocator is in.
120 static struct thread_queue dead_q;
121 
122 static void thread_kthread_entry(void);
123 static void thread_kthread_exit(void);
124 
125 
126 /*!	Inserts a thread into a team.
127 	You must hold the team lock when you call this function.
128 */
129 static void
130 insert_thread_into_team(struct team *team, struct thread *thread)
131 {
132 	thread->team_next = team->thread_list;
133 	team->thread_list = thread;
134 	team->num_threads++;
135 
136 	if (team->num_threads == 1) {
137 		// this was the first thread
138 		team->main_thread = thread;
139 	}
140 	thread->team = team;
141 }
142 
143 
144 /*!	Removes a thread from a team.
145 	You must hold the team lock when you call this function.
146 */
147 static void
148 remove_thread_from_team(struct team *team, struct thread *thread)
149 {
150 	struct thread *temp, *last = NULL;
151 
152 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
153 		if (temp == thread) {
154 			if (last == NULL)
155 				team->thread_list = temp->team_next;
156 			else
157 				last->team_next = temp->team_next;
158 
159 			team->num_threads--;
160 			break;
161 		}
162 		last = temp;
163 	}
164 }
165 
166 
167 static int
168 thread_struct_compare(void *_t, const void *_key)
169 {
170 	struct thread *thread = (struct thread*)_t;
171 	const struct thread_key *key = (const struct thread_key*)_key;
172 
173 	if (thread->id == key->id)
174 		return 0;
175 
176 	return 1;
177 }
178 
179 
180 static uint32
181 thread_struct_hash(void *_t, const void *_key, uint32 range)
182 {
183 	struct thread *thread = (struct thread*)_t;
184 	const struct thread_key *key = (const struct thread_key*)_key;
185 
186 	if (thread != NULL)
187 		return thread->id % range;
188 
189 	return (uint32)key->id % range;
190 }
191 
192 
193 static void
194 reset_signals(struct thread *thread)
195 {
196 	thread->sig_pending = 0;
197 	thread->sig_block_mask = 0;
198 	thread->sig_temp_enabled = 0;
199 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
200 	thread->signal_stack_base = 0;
201 	thread->signal_stack_size = 0;
202 	thread->signal_stack_enabled = false;
203 }
204 
205 
206 /*!	Allocates and fills in thread structure (or reuses one from the
207 	dead queue).
208 
209 	\param threadID The ID to be assigned to the new thread. If
210 		  \code < 0 \endcode a fresh one is allocated.
211 	\param thread initialize this thread struct if nonnull
212 */
213 
214 static struct thread *
215 create_thread_struct(struct thread *inthread, const char *name,
216 	thread_id threadID, struct cpu_ent *cpu)
217 {
218 	struct thread *thread;
219 	cpu_status state;
220 	char temp[64];
221 	bool recycled = false;
222 
223 	if (inthread == NULL) {
224 		// try to recycle one from the dead queue first
225 		state = disable_interrupts();
226 		GRAB_THREAD_LOCK();
227 		thread = thread_dequeue(&dead_q);
228 		RELEASE_THREAD_LOCK();
229 		restore_interrupts(state);
230 
231 		// if not, create a new one
232 		if (thread == NULL) {
233 			thread = (struct thread *)malloc(sizeof(struct thread));
234 			if (thread == NULL)
235 				return NULL;
236 		} else {
237 			recycled = true;
238 		}
239 	} else {
240 		thread = inthread;
241 	}
242 
243 	if (!recycled)
244 		scheduler_on_thread_create(thread);
245 
246 	if (name != NULL)
247 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
248 	else
249 		strcpy(thread->name, "unnamed thread");
250 
251 	thread->flags = 0;
252 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
253 	thread->team = NULL;
254 	thread->cpu = cpu;
255 	thread->previous_cpu = NULL;
256 	thread->pinned_to_cpu = 0;
257 	thread->fault_handler = 0;
258 	thread->page_faults_allowed = 1;
259 	thread->kernel_stack_area = -1;
260 	thread->kernel_stack_base = 0;
261 	thread->user_stack_area = -1;
262 	thread->user_stack_base = 0;
263 	thread->user_local_storage = 0;
264 	thread->kernel_errno = 0;
265 	thread->team_next = NULL;
266 	thread->queue_next = NULL;
267 	thread->priority = thread->next_priority = -1;
268 	thread->io_priority = -1;
269 	thread->args1 = NULL;  thread->args2 = NULL;
270 	thread->alarm.period = 0;
271 	reset_signals(thread);
272 	thread->in_kernel = true;
273 	thread->was_yielded = false;
274 	thread->user_time = 0;
275 	thread->kernel_time = 0;
276 	thread->last_time = 0;
277 	thread->exit.status = 0;
278 	thread->exit.reason = 0;
279 	thread->exit.signal = 0;
280 	list_init(&thread->exit.waiters);
281 	thread->select_infos = NULL;
282 	thread->post_interrupt_callback = NULL;
283 	thread->post_interrupt_data = NULL;
284 	thread->user_thread = NULL;
285 
286 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
287 	thread->exit.sem = create_sem(0, temp);
288 	if (thread->exit.sem < B_OK)
289 		goto err1;
290 
291 	sprintf(temp, "%s send", thread->name);
292 	thread->msg.write_sem = create_sem(1, temp);
293 	if (thread->msg.write_sem < B_OK)
294 		goto err2;
295 
296 	sprintf(temp, "%s receive", thread->name);
297 	thread->msg.read_sem = create_sem(0, temp);
298 	if (thread->msg.read_sem < B_OK)
299 		goto err3;
300 
301 	if (arch_thread_init_thread_struct(thread) < B_OK)
302 		goto err4;
303 
304 	return thread;
305 
306 err4:
307 	delete_sem(thread->msg.read_sem);
308 err3:
309 	delete_sem(thread->msg.write_sem);
310 err2:
311 	delete_sem(thread->exit.sem);
312 err1:
313 	// ToDo: put them in the dead queue instead?
314 	if (inthread == NULL) {
315 		scheduler_on_thread_destroy(thread);
316 		free(thread);
317 	}
318 
319 	return NULL;
320 }
321 
322 
323 static void
324 delete_thread_struct(struct thread *thread)
325 {
326 	delete_sem(thread->exit.sem);
327 	delete_sem(thread->msg.write_sem);
328 	delete_sem(thread->msg.read_sem);
329 
330 	scheduler_on_thread_destroy(thread);
331 
332 	// ToDo: put them in the dead queue instead?
333 	free(thread);
334 }
335 
336 
337 /*! This function gets run by a new thread before anything else */
338 static void
339 thread_kthread_entry(void)
340 {
341 	struct thread *thread = thread_get_current_thread();
342 
343 	// The thread is new and has been scheduled the first time. Notify the user
344 	// debugger code.
345 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
346 		user_debug_thread_scheduled(thread);
347 
348 	// simulates the thread spinlock release that would occur if the thread had been
349 	// rescheded from. The resched didn't happen because the thread is new.
350 	RELEASE_THREAD_LOCK();
351 
352 	// start tracking time
353 	thread->last_time = system_time();
354 
355 	enable_interrupts(); // this essentially simulates a return-from-interrupt
356 }
357 
358 
359 static void
360 thread_kthread_exit(void)
361 {
362 	struct thread *thread = thread_get_current_thread();
363 
364 	thread->exit.reason = THREAD_RETURN_EXIT;
365 	thread_exit();
366 }
367 
368 
369 /*!	Initializes the thread and jumps to its userspace entry point.
370 	This function is called at creation time of every user thread,
371 	but not for a team's main thread.
372 */
373 static int
374 _create_user_thread_kentry(void)
375 {
376 	struct thread *thread = thread_get_current_thread();
377 
378 	// jump to the entry point in user space
379 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
380 		thread->args1, thread->args2);
381 
382 	// only get here if the above call fails
383 	return 0;
384 }
385 
386 
387 /*! Initializes the thread and calls it kernel space entry point. */
388 static int
389 _create_kernel_thread_kentry(void)
390 {
391 	struct thread *thread = thread_get_current_thread();
392 	int (*func)(void *args) = (int (*)(void *))thread->entry;
393 
394 	// call the entry function with the appropriate args
395 	return func(thread->args1);
396 }
397 
398 
399 /*!	Creates a new thread in the team with the specified team ID.
400 
401 	\param threadID The ID to be assigned to the new thread. If
402 		  \code < 0 \endcode a fresh one is allocated.
403 */
404 static thread_id
405 create_thread(thread_creation_attributes& attributes, bool kernel)
406 {
407 	struct thread *thread, *currentThread;
408 	struct team *team;
409 	cpu_status state;
410 	char stack_name[B_OS_NAME_LENGTH];
411 	status_t status;
412 	bool abort = false;
413 	bool debugNewThread = false;
414 
415 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
416 		attributes.thread, kernel ? "kernel" : "user"));
417 
418 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
419 		NULL);
420 	if (thread == NULL)
421 		return B_NO_MEMORY;
422 
423 	thread->priority = attributes.priority == -1
424 		? B_NORMAL_PRIORITY : attributes.priority;
425 	thread->next_priority = thread->priority;
426 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
427 	thread->state = B_THREAD_SUSPENDED;
428 	thread->next_state = B_THREAD_SUSPENDED;
429 
430 	// init debug structure
431 	init_thread_debug_info(&thread->debug_info);
432 
433 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
434 		thread->id);
435 	thread->kernel_stack_area = create_area(stack_name,
436 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
437 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
438 		B_FULL_LOCK,
439 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
440 
441 	if (thread->kernel_stack_area < 0) {
442 		// we're not yet part of a team, so we can just bail out
443 		status = thread->kernel_stack_area;
444 
445 		dprintf("create_thread: error creating kernel stack: %s!\n",
446 			strerror(status));
447 
448 		delete_thread_struct(thread);
449 		return status;
450 	}
451 
452 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
453 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
454 
455 	state = disable_interrupts();
456 	GRAB_THREAD_LOCK();
457 
458 	// If the new thread belongs to the same team as the current thread,
459 	// it may inherit some of the thread debug flags.
460 	currentThread = thread_get_current_thread();
461 	if (currentThread && currentThread->team->id == attributes.team) {
462 		// inherit all user flags...
463 		int32 debugFlags = currentThread->debug_info.flags
464 			& B_THREAD_DEBUG_USER_FLAG_MASK;
465 
466 		// ... save the syscall tracing flags, unless explicitely specified
467 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
468 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
469 				| B_THREAD_DEBUG_POST_SYSCALL);
470 		}
471 
472 		thread->debug_info.flags = debugFlags;
473 
474 		// stop the new thread, if desired
475 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
476 	}
477 
478 	// insert into global list
479 	hash_insert(sThreadHash, thread);
480 	sUsedThreads++;
481 	scheduler_on_thread_init(thread);
482 	RELEASE_THREAD_LOCK();
483 
484 	GRAB_TEAM_LOCK();
485 	// look at the team, make sure it's not being deleted
486 	team = team_get_team_struct_locked(attributes.team);
487 
488 	if (team == NULL || team->state == TEAM_STATE_DEATH
489 		|| team->death_entry != NULL) {
490 		abort = true;
491 	}
492 
493 	if (!abort && !kernel) {
494 		thread->user_thread = team_allocate_user_thread(team);
495 		abort = thread->user_thread == NULL;
496 	}
497 
498 	if (!abort) {
499 		// Debug the new thread, if the parent thread required that (see above),
500 		// or the respective global team debug flag is set. But only, if a
501 		// debugger is installed for the team.
502 		debugNewThread |= (atomic_get(&team->debug_info.flags)
503 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
504 		if (debugNewThread
505 			&& (atomic_get(&team->debug_info.flags)
506 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
507 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
508 		}
509 
510 		insert_thread_into_team(team, thread);
511 	}
512 
513 	RELEASE_TEAM_LOCK();
514 	if (abort) {
515 		GRAB_THREAD_LOCK();
516 		hash_remove(sThreadHash, thread);
517 		RELEASE_THREAD_LOCK();
518 	}
519 	restore_interrupts(state);
520 	if (abort) {
521 		delete_area(thread->kernel_stack_area);
522 		delete_thread_struct(thread);
523 		return B_BAD_TEAM_ID;
524 	}
525 
526 	thread->args1 = attributes.args1;
527 	thread->args2 = attributes.args2;
528 	thread->entry = attributes.entry;
529 	status = thread->id;
530 
531 	// notify listeners
532 	sNotificationService.Notify(THREAD_ADDED, thread);
533 
534 	if (kernel) {
535 		// this sets up an initial kthread stack that runs the entry
536 
537 		// Note: whatever function wants to set up a user stack later for this
538 		// thread must initialize the TLS for it
539 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
540 			&thread_kthread_entry, &thread_kthread_exit);
541 	} else {
542 		// create user stack
543 
544 		// the stack will be between USER_STACK_REGION and the main thread stack
545 		// area (the user stack of the main thread is created in
546 		// team_create_team())
547 		if (attributes.stack_address == NULL) {
548 			thread->user_stack_base = USER_STACK_REGION;
549 			if (attributes.stack_size <= 0)
550 				thread->user_stack_size = USER_STACK_SIZE;
551 			else
552 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
553 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
554 
555 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
556 				attributes.name, thread->id);
557 			thread->user_stack_area = create_area_etc(team->id, stack_name,
558 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
559 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
560 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
561 			if (thread->user_stack_area < B_OK
562 				|| arch_thread_init_tls(thread) < B_OK) {
563 				// great, we have a fully running thread without a (usable)
564 				// stack
565 				dprintf("create_thread: unable to create proper user stack!\n");
566 				status = thread->user_stack_area;
567 				kill_thread(thread->id);
568 			}
569 		} else {
570 			thread->user_stack_base = (addr_t)attributes.stack_address;
571 			thread->user_stack_size = attributes.stack_size;
572 		}
573 
574 		user_debug_update_new_thread_flags(thread->id);
575 
576 		// copy the user entry over to the args field in the thread struct
577 		// the function this will call will immediately switch the thread into
578 		// user space.
579 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
580 			&thread_kthread_entry, &thread_kthread_exit);
581 	}
582 
583 	return status;
584 }
585 
586 
587 static status_t
588 undertaker(void* /*args*/)
589 {
590 	while (true) {
591 		// wait for a thread to bury
592 		InterruptsSpinLocker locker(gThreadSpinlock);
593 
594 		while (sUndertakerEntries.IsEmpty()) {
595 			ConditionVariableEntry conditionEntry;
596 			sUndertakerCondition.Add(&conditionEntry);
597 			locker.Unlock();
598 
599 			conditionEntry.Wait();
600 
601 			locker.Lock();
602 		}
603 
604 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
605 		locker.Unlock();
606 
607 		UndertakerEntry entry = *_entry;
608 			// we need a copy, since the original entry is on the thread's stack
609 
610 		// we've got an entry
611 		struct thread* thread = entry.thread;
612 
613 		// delete the old kernel stack area
614 		delete_area(thread->kernel_stack_area);
615 
616 		// remove this thread from all of the global lists
617 		disable_interrupts();
618 		GRAB_TEAM_LOCK();
619 
620 		remove_thread_from_team(team_get_kernel_team(), thread);
621 
622 		RELEASE_TEAM_LOCK();
623 		enable_interrupts();
624 			// needed for the debugger notification below
625 
626 		// free the thread structure
627 		locker.Lock();
628 		thread_enqueue(thread, &dead_q);
629 			// TODO: Use the slab allocator!
630 	}
631 
632 	// never can get here
633 	return B_OK;
634 }
635 
636 
637 static sem_id
638 get_thread_wait_sem(struct thread* thread)
639 {
640 	if (thread->state == B_THREAD_WAITING
641 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
642 		return (sem_id)(addr_t)thread->wait.object;
643 	}
644 	return -1;
645 }
646 
647 
648 /*!	Fills the thread_info structure with information from the specified
649 	thread.
650 	The thread lock must be held when called.
651 */
652 static void
653 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
654 {
655 	info->thread = thread->id;
656 	info->team = thread->team->id;
657 
658 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
659 
660 	if (thread->state == B_THREAD_WAITING) {
661 		info->state = B_THREAD_WAITING;
662 
663 		switch (thread->wait.type) {
664 			case THREAD_BLOCK_TYPE_SNOOZE:
665 				info->state = B_THREAD_ASLEEP;
666 				break;
667 
668 			case THREAD_BLOCK_TYPE_SEMAPHORE:
669 			{
670 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
671 				if (sem == thread->msg.read_sem)
672 					info->state = B_THREAD_RECEIVING;
673 				break;
674 			}
675 
676 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
677 			default:
678 				break;
679 		}
680 	} else
681 		info->state = (thread_state)thread->state;
682 
683 	info->priority = thread->priority;
684 	info->user_time = thread->user_time;
685 	info->kernel_time = thread->kernel_time;
686 	info->stack_base = (void *)thread->user_stack_base;
687 	info->stack_end = (void *)(thread->user_stack_base
688 		+ thread->user_stack_size);
689 	info->sem = get_thread_wait_sem(thread);
690 }
691 
692 
693 static status_t
694 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
695 	int32 flags)
696 {
697 	struct thread *target;
698 	sem_id cachedSem;
699 	cpu_status state;
700 	status_t status;
701 
702 	state = disable_interrupts();
703 	GRAB_THREAD_LOCK();
704 	target = thread_get_thread_struct_locked(id);
705 	if (!target) {
706 		RELEASE_THREAD_LOCK();
707 		restore_interrupts(state);
708 		return B_BAD_THREAD_ID;
709 	}
710 	cachedSem = target->msg.write_sem;
711 	RELEASE_THREAD_LOCK();
712 	restore_interrupts(state);
713 
714 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
715 		return B_NO_MEMORY;
716 
717 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
718 	if (status == B_INTERRUPTED) {
719 		// We got interrupted by a signal
720 		return status;
721 	}
722 	if (status != B_OK) {
723 		// Any other acquisition problems may be due to thread deletion
724 		return B_BAD_THREAD_ID;
725 	}
726 
727 	void* data;
728 	if (bufferSize > 0) {
729 		data = malloc(bufferSize);
730 		if (data == NULL)
731 			return B_NO_MEMORY;
732 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
733 			free(data);
734 			return B_BAD_DATA;
735 		}
736 	} else
737 		data = NULL;
738 
739 	state = disable_interrupts();
740 	GRAB_THREAD_LOCK();
741 
742 	// The target thread could have been deleted at this point
743 	target = thread_get_thread_struct_locked(id);
744 	if (target == NULL) {
745 		RELEASE_THREAD_LOCK();
746 		restore_interrupts(state);
747 		free(data);
748 		return B_BAD_THREAD_ID;
749 	}
750 
751 	// Save message informations
752 	target->msg.sender = thread_get_current_thread()->id;
753 	target->msg.code = code;
754 	target->msg.size = bufferSize;
755 	target->msg.buffer = data;
756 	cachedSem = target->msg.read_sem;
757 
758 	RELEASE_THREAD_LOCK();
759 	restore_interrupts(state);
760 
761 	release_sem(cachedSem);
762 	return B_OK;
763 }
764 
765 
766 static int32
767 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
768 	int32 flags)
769 {
770 	struct thread *thread = thread_get_current_thread();
771 	status_t status;
772 	size_t size;
773 	int32 code;
774 
775 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
776 	if (status < B_OK) {
777 		// Actually, we're not supposed to return error codes
778 		// but since the only reason this can fail is that we
779 		// were killed, it's probably okay to do so (but also
780 		// meaningless).
781 		return status;
782 	}
783 
784 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
785 		size = min_c(bufferSize, thread->msg.size);
786 		status = user_memcpy(buffer, thread->msg.buffer, size);
787 		if (status != B_OK) {
788 			free(thread->msg.buffer);
789 			release_sem(thread->msg.write_sem);
790 			return status;
791 		}
792 	}
793 
794 	*_sender = thread->msg.sender;
795 	code = thread->msg.code;
796 
797 	free(thread->msg.buffer);
798 	release_sem(thread->msg.write_sem);
799 
800 	return code;
801 }
802 
803 
804 static status_t
805 common_getrlimit(int resource, struct rlimit * rlp)
806 {
807 	if (!rlp)
808 		return B_BAD_ADDRESS;
809 
810 	switch (resource) {
811 		case RLIMIT_NOFILE:
812 		case RLIMIT_NOVMON:
813 			return vfs_getrlimit(resource, rlp);
814 
815 		case RLIMIT_CORE:
816 			rlp->rlim_cur = 0;
817 			rlp->rlim_max = 0;
818 			return B_OK;
819 
820 		case RLIMIT_STACK:
821 		{
822 			struct thread *thread = thread_get_current_thread();
823 			if (!thread)
824 				return B_ERROR;
825 			rlp->rlim_cur = thread->user_stack_size;
826 			rlp->rlim_max = thread->user_stack_size;
827 			return B_OK;
828 		}
829 
830 		default:
831 			return EINVAL;
832 	}
833 
834 	return B_OK;
835 }
836 
837 
838 static status_t
839 common_setrlimit(int resource, const struct rlimit * rlp)
840 {
841 	if (!rlp)
842 		return B_BAD_ADDRESS;
843 
844 	switch (resource) {
845 		case RLIMIT_NOFILE:
846 		case RLIMIT_NOVMON:
847 			return vfs_setrlimit(resource, rlp);
848 
849 		case RLIMIT_CORE:
850 			// We don't support core file, so allow settings to 0/0 only.
851 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
852 				return EINVAL;
853 			return B_OK;
854 
855 		default:
856 			return EINVAL;
857 	}
858 
859 	return B_OK;
860 }
861 
862 
863 //	#pragma mark - debugger calls
864 
865 
866 static int
867 make_thread_unreal(int argc, char **argv)
868 {
869 	struct thread *thread;
870 	struct hash_iterator i;
871 	int32 id = -1;
872 
873 	if (argc > 2) {
874 		print_debugger_command_usage(argv[0]);
875 		return 0;
876 	}
877 
878 	if (argc > 1)
879 		id = strtoul(argv[1], NULL, 0);
880 
881 	hash_open(sThreadHash, &i);
882 
883 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
884 		if (id != -1 && thread->id != id)
885 			continue;
886 
887 		if (thread->priority > B_DISPLAY_PRIORITY) {
888 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
889 			kprintf("thread %ld made unreal\n", thread->id);
890 		}
891 	}
892 
893 	hash_close(sThreadHash, &i, false);
894 	return 0;
895 }
896 
897 
898 static int
899 set_thread_prio(int argc, char **argv)
900 {
901 	struct thread *thread;
902 	struct hash_iterator i;
903 	int32 id;
904 	int32 prio;
905 
906 	if (argc > 3 || argc < 2) {
907 		print_debugger_command_usage(argv[0]);
908 		return 0;
909 	}
910 
911 	prio = strtoul(argv[1], NULL, 0);
912 	if (prio > THREAD_MAX_SET_PRIORITY)
913 		prio = THREAD_MAX_SET_PRIORITY;
914 	if (prio < THREAD_MIN_SET_PRIORITY)
915 		prio = THREAD_MIN_SET_PRIORITY;
916 
917 	if (argc > 2)
918 		id = strtoul(argv[2], NULL, 0);
919 	else
920 		id = thread_get_current_thread()->id;
921 
922 	hash_open(sThreadHash, &i);
923 
924 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
925 		if (thread->id != id)
926 			continue;
927 		thread->priority = thread->next_priority = prio;
928 		kprintf("thread %ld set to priority %ld\n", id, prio);
929 		break;
930 	}
931 	if (!thread)
932 		kprintf("thread %ld (%#lx) not found\n", id, id);
933 
934 	hash_close(sThreadHash, &i, false);
935 	return 0;
936 }
937 
938 
939 static int
940 make_thread_suspended(int argc, char **argv)
941 {
942 	struct thread *thread;
943 	struct hash_iterator i;
944 	int32 id;
945 
946 	if (argc > 2) {
947 		print_debugger_command_usage(argv[0]);
948 		return 0;
949 	}
950 
951 	if (argc == 1)
952 		id = thread_get_current_thread()->id;
953 	else
954 		id = strtoul(argv[1], NULL, 0);
955 
956 	hash_open(sThreadHash, &i);
957 
958 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
959 		if (thread->id != id)
960 			continue;
961 
962 		thread->next_state = B_THREAD_SUSPENDED;
963 		kprintf("thread %ld suspended\n", id);
964 		break;
965 	}
966 	if (!thread)
967 		kprintf("thread %ld (%#lx) not found\n", id, id);
968 
969 	hash_close(sThreadHash, &i, false);
970 	return 0;
971 }
972 
973 
974 static int
975 make_thread_resumed(int argc, char **argv)
976 {
977 	struct thread *thread;
978 	struct hash_iterator i;
979 	int32 id;
980 
981 	if (argc != 2) {
982 		print_debugger_command_usage(argv[0]);
983 		return 0;
984 	}
985 
986 	// force user to enter a thread id, as using
987 	// the current thread is usually not intended
988 	id = strtoul(argv[1], NULL, 0);
989 
990 	hash_open(sThreadHash, &i);
991 
992 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
993 		if (thread->id != id)
994 			continue;
995 
996 		if (thread->state == B_THREAD_SUSPENDED) {
997 			scheduler_enqueue_in_run_queue(thread);
998 			kprintf("thread %ld resumed\n", thread->id);
999 		}
1000 		break;
1001 	}
1002 	if (!thread)
1003 		kprintf("thread %ld (%#lx) not found\n", id, id);
1004 
1005 	hash_close(sThreadHash, &i, false);
1006 	return 0;
1007 }
1008 
1009 
1010 static int
1011 drop_into_debugger(int argc, char **argv)
1012 {
1013 	status_t err;
1014 	int32 id;
1015 
1016 	if (argc > 2) {
1017 		print_debugger_command_usage(argv[0]);
1018 		return 0;
1019 	}
1020 
1021 	if (argc == 1)
1022 		id = thread_get_current_thread()->id;
1023 	else
1024 		id = strtoul(argv[1], NULL, 0);
1025 
1026 	err = _user_debug_thread(id);
1027 	if (err)
1028 		kprintf("drop failed\n");
1029 	else
1030 		kprintf("thread %ld dropped into user debugger\n", id);
1031 
1032 	return 0;
1033 }
1034 
1035 
1036 static const char *
1037 state_to_text(struct thread *thread, int32 state)
1038 {
1039 	switch (state) {
1040 		case B_THREAD_READY:
1041 			return "ready";
1042 
1043 		case B_THREAD_RUNNING:
1044 			return "running";
1045 
1046 		case B_THREAD_WAITING:
1047 		{
1048 			if (thread != NULL) {
1049 				switch (thread->wait.type) {
1050 					case THREAD_BLOCK_TYPE_SNOOZE:
1051 						return "zzz";
1052 
1053 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1054 					{
1055 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1056 						if (sem == thread->msg.read_sem)
1057 							return "receive";
1058 						break;
1059 					}
1060 				}
1061 			}
1062 
1063 			return "waiting";
1064 		}
1065 
1066 		case B_THREAD_SUSPENDED:
1067 			return "suspended";
1068 
1069 		case THREAD_STATE_FREE_ON_RESCHED:
1070 			return "death";
1071 
1072 		default:
1073 			return "UNKNOWN";
1074 	}
1075 }
1076 
1077 
1078 static void
1079 print_thread_list_table_head()
1080 {
1081 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1082 		"  team  name\n");
1083 }
1084 
1085 
1086 static void
1087 _dump_thread_info(struct thread *thread, bool shortInfo)
1088 {
1089 	if (shortInfo) {
1090 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1091 			thread->state));
1092 
1093 		// does it block on a semaphore or a condition variable?
1094 		if (thread->state == B_THREAD_WAITING) {
1095 			switch (thread->wait.type) {
1096 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1097 				{
1098 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1099 					if (sem == thread->msg.read_sem)
1100 						kprintf("                    ");
1101 					else
1102 						kprintf("sem  %12ld   ", sem);
1103 					break;
1104 				}
1105 
1106 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1107 					kprintf("cvar   %p   ", thread->wait.object);
1108 					break;
1109 
1110 				case THREAD_BLOCK_TYPE_SNOOZE:
1111 					kprintf("                    ");
1112 					break;
1113 
1114 				case THREAD_BLOCK_TYPE_SIGNAL:
1115 					kprintf("signal              ");
1116 					break;
1117 
1118 				case THREAD_BLOCK_TYPE_MUTEX:
1119 					kprintf("mutex  %p   ", thread->wait.object);
1120 					break;
1121 
1122 				case THREAD_BLOCK_TYPE_RW_LOCK:
1123 					kprintf("rwlock %p   ", thread->wait.object);
1124 					break;
1125 
1126 				case THREAD_BLOCK_TYPE_OTHER:
1127 					kprintf("other               ");
1128 					break;
1129 
1130 				default:
1131 					kprintf("???    %p   ", thread->wait.object);
1132 					break;
1133 			}
1134 		} else
1135 			kprintf("        -           ");
1136 
1137 		// on which CPU does it run?
1138 		if (thread->cpu)
1139 			kprintf("%2d", thread->cpu->cpu_num);
1140 		else
1141 			kprintf(" -");
1142 
1143 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1144 			(void *)thread->kernel_stack_base, thread->team->id,
1145 			thread->name != NULL ? thread->name : "<NULL>");
1146 
1147 		return;
1148 	}
1149 
1150 	// print the long info
1151 
1152 	struct death_entry *death = NULL;
1153 
1154 	kprintf("THREAD: %p\n", thread);
1155 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1156 	kprintf("name:               \"%s\"\n", thread->name);
1157 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1158 		thread->all_next, thread->team_next, thread->queue_next);
1159 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1160 		thread->next_priority, thread->io_priority);
1161 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1162 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1163 	kprintf("cpu:                %p ", thread->cpu);
1164 	if (thread->cpu)
1165 		kprintf("(%d)\n", thread->cpu->cpu_num);
1166 	else
1167 		kprintf("\n");
1168 	kprintf("sig_pending:        %#" B_PRIx32 " (blocked: %#" B_PRIx32
1169 		", temp enabled: %#" B_PRIx32 ")\n", thread->sig_pending,
1170 		thread->sig_block_mask, thread->sig_temp_enabled);
1171 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1172 
1173 	if (thread->state == B_THREAD_WAITING) {
1174 		kprintf("waiting for:        ");
1175 
1176 		switch (thread->wait.type) {
1177 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1178 			{
1179 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1180 				if (sem == thread->msg.read_sem)
1181 					kprintf("data\n");
1182 				else
1183 					kprintf("semaphore %ld\n", sem);
1184 				break;
1185 			}
1186 
1187 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1188 				kprintf("condition variable %p\n", thread->wait.object);
1189 				break;
1190 
1191 			case THREAD_BLOCK_TYPE_SNOOZE:
1192 				kprintf("snooze()\n");
1193 				break;
1194 
1195 			case THREAD_BLOCK_TYPE_SIGNAL:
1196 				kprintf("signal\n");
1197 				break;
1198 
1199 			case THREAD_BLOCK_TYPE_MUTEX:
1200 				kprintf("mutex %p\n", thread->wait.object);
1201 				break;
1202 
1203 			case THREAD_BLOCK_TYPE_RW_LOCK:
1204 				kprintf("rwlock %p\n", thread->wait.object);
1205 				break;
1206 
1207 			case THREAD_BLOCK_TYPE_OTHER:
1208 				kprintf("other (%s)\n", (char*)thread->wait.object);
1209 				break;
1210 
1211 			default:
1212 				kprintf("unknown (%p)\n", thread->wait.object);
1213 				break;
1214 		}
1215 	}
1216 
1217 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1218 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1219 	kprintf("entry:              %p\n", (void *)thread->entry);
1220 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1221 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1222 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1223 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1224 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1225 	kprintf("  exit.waiters:\n");
1226 	while ((death = (struct death_entry*)list_get_next_item(
1227 			&thread->exit.waiters, death)) != NULL) {
1228 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1229 	}
1230 
1231 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1232 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1233 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1234 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1235 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1236 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1237 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1238 		strerror(thread->kernel_errno));
1239 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1240 	kprintf("user_time:          %Ld\n", thread->user_time);
1241 	kprintf("flags:              0x%lx\n", thread->flags);
1242 	kprintf("architecture dependant section:\n");
1243 	arch_thread_dump_info(&thread->arch_info);
1244 }
1245 
1246 
1247 static int
1248 dump_thread_info(int argc, char **argv)
1249 {
1250 	bool shortInfo = false;
1251 	int argi = 1;
1252 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1253 		shortInfo = true;
1254 		print_thread_list_table_head();
1255 		argi++;
1256 	}
1257 
1258 	if (argi == argc) {
1259 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1260 		return 0;
1261 	}
1262 
1263 	for (; argi < argc; argi++) {
1264 		const char *name = argv[argi];
1265 		int32 id = strtoul(name, NULL, 0);
1266 
1267 		if (IS_KERNEL_ADDRESS(id)) {
1268 			// semi-hack
1269 			_dump_thread_info((struct thread *)id, shortInfo);
1270 			continue;
1271 		}
1272 
1273 		// walk through the thread list, trying to match name or id
1274 		bool found = false;
1275 		struct hash_iterator i;
1276 		hash_open(sThreadHash, &i);
1277 		struct thread *thread;
1278 		while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1279 			if (!strcmp(name, thread->name) || thread->id == id) {
1280 				_dump_thread_info(thread, shortInfo);
1281 				found = true;
1282 				break;
1283 			}
1284 		}
1285 		hash_close(sThreadHash, &i, false);
1286 
1287 		if (!found)
1288 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 
1295 static int
1296 dump_thread_list(int argc, char **argv)
1297 {
1298 	struct thread *thread;
1299 	struct hash_iterator i;
1300 	bool realTimeOnly = false;
1301 	bool calling = false;
1302 	const char *callSymbol = NULL;
1303 	addr_t callStart = 0;
1304 	addr_t callEnd = 0;
1305 	int32 requiredState = 0;
1306 	team_id team = -1;
1307 	sem_id sem = -1;
1308 
1309 	if (!strcmp(argv[0], "realtime"))
1310 		realTimeOnly = true;
1311 	else if (!strcmp(argv[0], "ready"))
1312 		requiredState = B_THREAD_READY;
1313 	else if (!strcmp(argv[0], "running"))
1314 		requiredState = B_THREAD_RUNNING;
1315 	else if (!strcmp(argv[0], "waiting")) {
1316 		requiredState = B_THREAD_WAITING;
1317 
1318 		if (argc > 1) {
1319 			sem = strtoul(argv[1], NULL, 0);
1320 			if (sem == 0)
1321 				kprintf("ignoring invalid semaphore argument.\n");
1322 		}
1323 	} else if (!strcmp(argv[0], "calling")) {
1324 		if (argc < 2) {
1325 			kprintf("Need to give a symbol name or start and end arguments.\n");
1326 			return 0;
1327 		} else if (argc == 3) {
1328 			callStart = parse_expression(argv[1]);
1329 			callEnd = parse_expression(argv[2]);
1330 		} else
1331 			callSymbol = argv[1];
1332 
1333 		calling = true;
1334 	} else if (argc > 1) {
1335 		team = strtoul(argv[1], NULL, 0);
1336 		if (team == 0)
1337 			kprintf("ignoring invalid team argument.\n");
1338 	}
1339 
1340 	print_thread_list_table_head();
1341 
1342 	hash_open(sThreadHash, &i);
1343 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1344 		// filter out threads not matching the search criteria
1345 		if ((requiredState && thread->state != requiredState)
1346 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1347 					callStart, callEnd))
1348 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1349 			|| (team > 0 && thread->team->id != team)
1350 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1351 			continue;
1352 
1353 		_dump_thread_info(thread, true);
1354 	}
1355 	hash_close(sThreadHash, &i, false);
1356 	return 0;
1357 }
1358 
1359 
1360 //	#pragma mark - private kernel API
1361 
1362 
1363 void
1364 thread_exit(void)
1365 {
1366 	cpu_status state;
1367 	struct thread *thread = thread_get_current_thread();
1368 	struct team *team = thread->team;
1369 	thread_id parentID = -1;
1370 	status_t status;
1371 	struct thread_debug_info debugInfo;
1372 	team_id teamID = team->id;
1373 
1374 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1375 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1376 			? "due to signal" : "normally", thread->exit.status));
1377 
1378 	if (!are_interrupts_enabled())
1379 		panic("thread_exit() called with interrupts disabled!\n");
1380 
1381 	// boost our priority to get this over with
1382 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1383 
1384 	// Cancel previously installed alarm timer, if any
1385 	cancel_timer(&thread->alarm);
1386 
1387 	// delete the user stack area first, we won't need it anymore
1388 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1389 		area_id area = thread->user_stack_area;
1390 		thread->user_stack_area = -1;
1391 		vm_delete_area(team->id, area, true);
1392 	}
1393 
1394 	struct job_control_entry *death = NULL;
1395 	struct death_entry* threadDeathEntry = NULL;
1396 	bool deleteTeam = false;
1397 	port_id debuggerPort = -1;
1398 
1399 	if (team != team_get_kernel_team()) {
1400 		user_debug_thread_exiting(thread);
1401 
1402 		if (team->main_thread == thread) {
1403 			// The main thread is exiting. Shut down the whole team.
1404 			deleteTeam = true;
1405 		} else {
1406 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1407 			team_free_user_thread(thread);
1408 		}
1409 
1410 		// remove this thread from the current team and add it to the kernel
1411 		// put the thread into the kernel team until it dies
1412 		state = disable_interrupts();
1413 		GRAB_TEAM_LOCK();
1414 
1415 		if (deleteTeam)
1416 			debuggerPort = team_shutdown_team(team, state);
1417 
1418 		GRAB_THREAD_LOCK();
1419 			// removing the thread and putting its death entry to the parent
1420 			// team needs to be an atomic operation
1421 
1422 		// remember how long this thread lasted
1423 		team->dead_threads_kernel_time += thread->kernel_time;
1424 		team->dead_threads_user_time += thread->user_time;
1425 
1426 		remove_thread_from_team(team, thread);
1427 		insert_thread_into_team(team_get_kernel_team(), thread);
1428 
1429 		if (team->death_entry != NULL) {
1430 			if (--team->death_entry->remaining_threads == 0)
1431 				team->death_entry->condition.NotifyOne(true, B_OK);
1432 		}
1433 
1434 		if (deleteTeam) {
1435 			struct team *parent = team->parent;
1436 
1437 			// remember who our parent was so we can send a signal
1438 			parentID = parent->id;
1439 
1440 			// Set the team job control state to "dead" and detach the job
1441 			// control entry from our team struct.
1442 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1443 			death = team->job_control_entry;
1444 			team->job_control_entry = NULL;
1445 
1446 			if (death != NULL) {
1447 				death->InitDeadState();
1448 
1449 				// team_set_job_control_state() already moved our entry
1450 				// into the parent's list. We just check the soft limit of
1451 				// death entries.
1452 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1453 					death = parent->dead_children->entries.RemoveHead();
1454 					parent->dead_children->count--;
1455 				} else
1456 					death = NULL;
1457 
1458 				RELEASE_THREAD_LOCK();
1459 			} else
1460 				RELEASE_THREAD_LOCK();
1461 
1462 			team_remove_team(team);
1463 
1464 			send_signal_etc(parentID, SIGCHLD,
1465 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1466 		} else {
1467 			// The thread is not the main thread. We store a thread death
1468 			// entry for it, unless someone is already waiting it.
1469 			if (threadDeathEntry != NULL
1470 				&& list_is_empty(&thread->exit.waiters)) {
1471 				threadDeathEntry->thread = thread->id;
1472 				threadDeathEntry->status = thread->exit.status;
1473 				threadDeathEntry->reason = thread->exit.reason;
1474 				threadDeathEntry->signal = thread->exit.signal;
1475 
1476 				// add entry -- remove and old one, if we hit the limit
1477 				list_add_item(&team->dead_threads, threadDeathEntry);
1478 				team->dead_threads_count++;
1479 				threadDeathEntry = NULL;
1480 
1481 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1482 					threadDeathEntry = (death_entry*)list_remove_head_item(
1483 						&team->dead_threads);
1484 					team->dead_threads_count--;
1485 				}
1486 			}
1487 
1488 			RELEASE_THREAD_LOCK();
1489 		}
1490 
1491 		RELEASE_TEAM_LOCK();
1492 
1493 		// swap address spaces, to make sure we're running on the kernel's pgdir
1494 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1495 		restore_interrupts(state);
1496 
1497 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1498 	}
1499 
1500 	free(threadDeathEntry);
1501 
1502 	// delete the team if we're its main thread
1503 	if (deleteTeam) {
1504 		team_delete_team(team, debuggerPort);
1505 
1506 		// we need to delete any death entry that made it to here
1507 		delete death;
1508 	}
1509 
1510 	state = disable_interrupts();
1511 	GRAB_THREAD_LOCK();
1512 
1513 	// remove thread from hash, so it's no longer accessible
1514 	hash_remove(sThreadHash, thread);
1515 	sUsedThreads--;
1516 
1517 	// Stop debugging for this thread
1518 	debugInfo = thread->debug_info;
1519 	clear_thread_debug_info(&thread->debug_info, true);
1520 
1521 	// Remove the select infos. We notify them a little later.
1522 	select_info* selectInfos = thread->select_infos;
1523 	thread->select_infos = NULL;
1524 
1525 	RELEASE_THREAD_LOCK();
1526 	restore_interrupts(state);
1527 
1528 	destroy_thread_debug_info(&debugInfo);
1529 
1530 	// notify select infos
1531 	select_info* info = selectInfos;
1532 	while (info != NULL) {
1533 		select_sync* sync = info->sync;
1534 
1535 		notify_select_events(info, B_EVENT_INVALID);
1536 		info = info->next;
1537 		put_select_sync(sync);
1538 	}
1539 
1540 	// notify listeners
1541 	sNotificationService.Notify(THREAD_REMOVED, thread);
1542 
1543 	// shutdown the thread messaging
1544 
1545 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1546 	if (status == B_WOULD_BLOCK) {
1547 		// there is data waiting for us, so let us eat it
1548 		thread_id sender;
1549 
1550 		delete_sem(thread->msg.write_sem);
1551 			// first, let's remove all possibly waiting writers
1552 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1553 	} else {
1554 		// we probably own the semaphore here, and we're the last to do so
1555 		delete_sem(thread->msg.write_sem);
1556 	}
1557 	// now we can safely remove the msg.read_sem
1558 	delete_sem(thread->msg.read_sem);
1559 
1560 	// fill all death entries and delete the sem that others will use to wait on us
1561 	{
1562 		sem_id cachedExitSem = thread->exit.sem;
1563 		cpu_status state;
1564 
1565 		state = disable_interrupts();
1566 		GRAB_THREAD_LOCK();
1567 
1568 		// make sure no one will grab this semaphore again
1569 		thread->exit.sem = -1;
1570 
1571 		// fill all death entries
1572 		death_entry* entry = NULL;
1573 		while ((entry = (struct death_entry*)list_get_next_item(
1574 				&thread->exit.waiters, entry)) != NULL) {
1575 			entry->status = thread->exit.status;
1576 			entry->reason = thread->exit.reason;
1577 			entry->signal = thread->exit.signal;
1578 		}
1579 
1580 		RELEASE_THREAD_LOCK();
1581 		restore_interrupts(state);
1582 
1583 		delete_sem(cachedExitSem);
1584 	}
1585 
1586 	// notify the debugger
1587 	if (teamID != team_get_kernel_team_id())
1588 		user_debug_thread_deleted(teamID, thread->id);
1589 
1590 	// enqueue in the undertaker list and reschedule for the last time
1591 	UndertakerEntry undertakerEntry(thread, teamID);
1592 
1593 	disable_interrupts();
1594 	GRAB_THREAD_LOCK();
1595 
1596 	sUndertakerEntries.Add(&undertakerEntry);
1597 	sUndertakerCondition.NotifyOne(true);
1598 
1599 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1600 	scheduler_reschedule();
1601 
1602 	panic("never can get here\n");
1603 }
1604 
1605 
1606 struct thread *
1607 thread_get_thread_struct(thread_id id)
1608 {
1609 	struct thread *thread;
1610 	cpu_status state;
1611 
1612 	state = disable_interrupts();
1613 	GRAB_THREAD_LOCK();
1614 
1615 	thread = thread_get_thread_struct_locked(id);
1616 
1617 	RELEASE_THREAD_LOCK();
1618 	restore_interrupts(state);
1619 
1620 	return thread;
1621 }
1622 
1623 
1624 struct thread *
1625 thread_get_thread_struct_locked(thread_id id)
1626 {
1627 	struct thread_key key;
1628 
1629 	key.id = id;
1630 
1631 	return (struct thread*)hash_lookup(sThreadHash, &key);
1632 }
1633 
1634 
1635 /*!	Called in the interrupt handler code when a thread enters
1636 	the kernel for any reason.
1637 	Only tracks time for now.
1638 	Interrupts are disabled.
1639 */
1640 void
1641 thread_at_kernel_entry(bigtime_t now)
1642 {
1643 	struct thread *thread = thread_get_current_thread();
1644 
1645 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1646 
1647 	// track user time
1648 	thread->user_time += now - thread->last_time;
1649 	thread->last_time = now;
1650 
1651 	thread->in_kernel = true;
1652 }
1653 
1654 
1655 /*!	Called whenever a thread exits kernel space to user space.
1656 	Tracks time, handles signals, ...
1657 	Interrupts must be enabled. When the function returns, interrupts will be
1658 	disabled.
1659 */
1660 void
1661 thread_at_kernel_exit(void)
1662 {
1663 	struct thread *thread = thread_get_current_thread();
1664 
1665 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1666 
1667 	while (handle_signals(thread)) {
1668 		InterruptsSpinLocker _(gThreadSpinlock);
1669 		scheduler_reschedule();
1670 	}
1671 
1672 	disable_interrupts();
1673 
1674 	thread->in_kernel = false;
1675 
1676 	// track kernel time
1677 	bigtime_t now = system_time();
1678 	thread->kernel_time += now - thread->last_time;
1679 	thread->last_time = now;
1680 }
1681 
1682 
1683 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1684 	and no debugging shall be done.
1685 	Interrupts must be disabled.
1686 */
1687 void
1688 thread_at_kernel_exit_no_signals(void)
1689 {
1690 	struct thread *thread = thread_get_current_thread();
1691 
1692 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1693 
1694 	thread->in_kernel = false;
1695 
1696 	// track kernel time
1697 	bigtime_t now = system_time();
1698 	thread->kernel_time += now - thread->last_time;
1699 	thread->last_time = now;
1700 }
1701 
1702 
1703 void
1704 thread_reset_for_exec(void)
1705 {
1706 	struct thread *thread = thread_get_current_thread();
1707 
1708 	reset_signals(thread);
1709 
1710 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
1711 }
1712 
1713 
1714 /*! Insert a thread to the tail of a queue */
1715 void
1716 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1717 {
1718 	thread->queue_next = NULL;
1719 	if (queue->head == NULL) {
1720 		queue->head = thread;
1721 		queue->tail = thread;
1722 	} else {
1723 		queue->tail->queue_next = thread;
1724 		queue->tail = thread;
1725 	}
1726 }
1727 
1728 
1729 struct thread *
1730 thread_lookat_queue(struct thread_queue *queue)
1731 {
1732 	return queue->head;
1733 }
1734 
1735 
1736 struct thread *
1737 thread_dequeue(struct thread_queue *queue)
1738 {
1739 	struct thread *thread = queue->head;
1740 
1741 	if (thread != NULL) {
1742 		queue->head = thread->queue_next;
1743 		if (queue->tail == thread)
1744 			queue->tail = NULL;
1745 	}
1746 	return thread;
1747 }
1748 
1749 
1750 struct thread *
1751 thread_dequeue_id(struct thread_queue *q, thread_id id)
1752 {
1753 	struct thread *thread;
1754 	struct thread *last = NULL;
1755 
1756 	thread = q->head;
1757 	while (thread != NULL) {
1758 		if (thread->id == id) {
1759 			if (last == NULL)
1760 				q->head = thread->queue_next;
1761 			else
1762 				last->queue_next = thread->queue_next;
1763 
1764 			if (q->tail == thread)
1765 				q->tail = last;
1766 			break;
1767 		}
1768 		last = thread;
1769 		thread = thread->queue_next;
1770 	}
1771 	return thread;
1772 }
1773 
1774 
1775 struct thread*
1776 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1777 {
1778 	struct hash_iterator iterator;
1779 	hash_open(sThreadHash, &iterator);
1780 
1781 	struct thread* thread;
1782 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
1783 			!= NULL) {
1784 		if (callback(thread, cookie))
1785 			break;
1786 	}
1787 
1788 	hash_close(sThreadHash, &iterator, false);
1789 
1790 	return thread;
1791 }
1792 
1793 
1794 thread_id
1795 allocate_thread_id(void)
1796 {
1797 	return atomic_add(&sNextThreadID, 1);
1798 }
1799 
1800 
1801 thread_id
1802 peek_next_thread_id(void)
1803 {
1804 	return atomic_get(&sNextThreadID);
1805 }
1806 
1807 
1808 /*!	Yield the CPU to other threads.
1809 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1810 	If \c false, it will continue to run, if there's no other thread in ready
1811 	state, and if it has a higher priority than the other ready threads, it
1812 	still has a good chance to continue.
1813 */
1814 void
1815 thread_yield(bool force)
1816 {
1817 	if (force) {
1818 		// snooze for roughly 3 thread quantums
1819 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1820 #if 0
1821 		cpu_status state;
1822 
1823 		struct thread *thread = thread_get_current_thread();
1824 		if (thread == NULL)
1825 			return;
1826 
1827 		state = disable_interrupts();
1828 		GRAB_THREAD_LOCK();
1829 
1830 		// mark the thread as yielded, so it will not be scheduled next
1831 		//thread->was_yielded = true;
1832 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1833 		scheduler_reschedule();
1834 
1835 		RELEASE_THREAD_LOCK();
1836 		restore_interrupts(state);
1837 #endif
1838 	} else {
1839 		struct thread *thread = thread_get_current_thread();
1840 		if (thread == NULL)
1841 			return;
1842 
1843 		// Don't force the thread off the CPU, just reschedule.
1844 		InterruptsSpinLocker _(gThreadSpinlock);
1845 		scheduler_reschedule();
1846 	}
1847 }
1848 
1849 
1850 /*!	Kernel private thread creation function.
1851 
1852 	\param threadID The ID to be assigned to the new thread. If
1853 		  \code < 0 \endcode a fresh one is allocated.
1854 */
1855 thread_id
1856 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1857 	void *arg, team_id team, thread_id threadID)
1858 {
1859 	thread_creation_attributes attributes;
1860 	attributes.entry = (thread_entry_func)function;
1861 	attributes.name = name;
1862 	attributes.priority = priority;
1863 	attributes.args1 = arg;
1864 	attributes.args2 = NULL;
1865 	attributes.stack_address = NULL;
1866 	attributes.stack_size = 0;
1867 	attributes.team = team;
1868 	attributes.thread = threadID;
1869 
1870 	return create_thread(attributes, true);
1871 }
1872 
1873 
1874 status_t
1875 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1876 	status_t *_returnCode)
1877 {
1878 	sem_id exitSem = B_BAD_THREAD_ID;
1879 	struct death_entry death;
1880 	job_control_entry* freeDeath = NULL;
1881 	struct thread *thread;
1882 	cpu_status state;
1883 	status_t status = B_OK;
1884 
1885 	if (id < B_OK)
1886 		return B_BAD_THREAD_ID;
1887 
1888 	// we need to resume the thread we're waiting for first
1889 
1890 	state = disable_interrupts();
1891 	GRAB_THREAD_LOCK();
1892 
1893 	thread = thread_get_thread_struct_locked(id);
1894 	if (thread != NULL) {
1895 		// remember the semaphore we have to wait on and place our death entry
1896 		exitSem = thread->exit.sem;
1897 		list_add_link_to_head(&thread->exit.waiters, &death);
1898 	}
1899 
1900 	death_entry* threadDeathEntry = NULL;
1901 
1902 	RELEASE_THREAD_LOCK();
1903 
1904 	if (thread == NULL) {
1905 		// we couldn't find this thread - maybe it's already gone, and we'll
1906 		// find its death entry in our team
1907 		GRAB_TEAM_LOCK();
1908 
1909 		struct team* team = thread_get_current_thread()->team;
1910 
1911 		// check the child death entries first (i.e. main threads of child
1912 		// teams)
1913 		bool deleteEntry;
1914 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1915 		if (freeDeath != NULL) {
1916 			death.status = freeDeath->status;
1917 			if (!deleteEntry)
1918 				freeDeath = NULL;
1919 		} else {
1920 			// check the thread death entries of the team (non-main threads)
1921 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1922 					&team->dead_threads, threadDeathEntry)) != NULL) {
1923 				if (threadDeathEntry->thread == id) {
1924 					list_remove_item(&team->dead_threads, threadDeathEntry);
1925 					team->dead_threads_count--;
1926 					death.status = threadDeathEntry->status;
1927 					break;
1928 				}
1929 			}
1930 
1931 			if (threadDeathEntry == NULL)
1932 				status = B_BAD_THREAD_ID;
1933 		}
1934 
1935 		RELEASE_TEAM_LOCK();
1936 	}
1937 
1938 	restore_interrupts(state);
1939 
1940 	if (thread == NULL && status == B_OK) {
1941 		// we found the thread's death entry in our team
1942 		if (_returnCode)
1943 			*_returnCode = death.status;
1944 
1945 		delete freeDeath;
1946 		free(threadDeathEntry);
1947 		return B_OK;
1948 	}
1949 
1950 	// we need to wait for the death of the thread
1951 
1952 	if (exitSem < B_OK)
1953 		return B_BAD_THREAD_ID;
1954 
1955 	resume_thread(id);
1956 		// make sure we don't wait forever on a suspended thread
1957 
1958 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1959 
1960 	if (status == B_OK) {
1961 		// this should never happen as the thread deletes the semaphore on exit
1962 		panic("could acquire exit_sem for thread %ld\n", id);
1963 	} else if (status == B_BAD_SEM_ID) {
1964 		// this is the way the thread normally exits
1965 		status = B_OK;
1966 
1967 		if (_returnCode)
1968 			*_returnCode = death.status;
1969 	} else {
1970 		// We were probably interrupted; we need to remove our death entry now.
1971 		state = disable_interrupts();
1972 		GRAB_THREAD_LOCK();
1973 
1974 		thread = thread_get_thread_struct_locked(id);
1975 		if (thread != NULL)
1976 			list_remove_link(&death);
1977 
1978 		RELEASE_THREAD_LOCK();
1979 		restore_interrupts(state);
1980 
1981 		// If the thread is already gone, we need to wait for its exit semaphore
1982 		// to make sure our death entry stays valid - it won't take long
1983 		if (thread == NULL)
1984 			acquire_sem(exitSem);
1985 	}
1986 
1987 	return status;
1988 }
1989 
1990 
1991 status_t
1992 select_thread(int32 id, struct select_info* info, bool kernel)
1993 {
1994 	InterruptsSpinLocker locker(gThreadSpinlock);
1995 
1996 	// get thread
1997 	struct thread* thread = thread_get_thread_struct_locked(id);
1998 	if (thread == NULL)
1999 		return B_BAD_THREAD_ID;
2000 
2001 	// We support only B_EVENT_INVALID at the moment.
2002 	info->selected_events &= B_EVENT_INVALID;
2003 
2004 	// add info to list
2005 	if (info->selected_events != 0) {
2006 		info->next = thread->select_infos;
2007 		thread->select_infos = info;
2008 
2009 		// we need a sync reference
2010 		atomic_add(&info->sync->ref_count, 1);
2011 	}
2012 
2013 	return B_OK;
2014 }
2015 
2016 
2017 status_t
2018 deselect_thread(int32 id, struct select_info* info, bool kernel)
2019 {
2020 	InterruptsSpinLocker locker(gThreadSpinlock);
2021 
2022 	// get thread
2023 	struct thread* thread = thread_get_thread_struct_locked(id);
2024 	if (thread == NULL)
2025 		return B_BAD_THREAD_ID;
2026 
2027 	// remove info from list
2028 	select_info** infoLocation = &thread->select_infos;
2029 	while (*infoLocation != NULL && *infoLocation != info)
2030 		infoLocation = &(*infoLocation)->next;
2031 
2032 	if (*infoLocation != info)
2033 		return B_OK;
2034 
2035 	*infoLocation = info->next;
2036 
2037 	locker.Unlock();
2038 
2039 	// surrender sync reference
2040 	put_select_sync(info->sync);
2041 
2042 	return B_OK;
2043 }
2044 
2045 
2046 int32
2047 thread_max_threads(void)
2048 {
2049 	return sMaxThreads;
2050 }
2051 
2052 
2053 int32
2054 thread_used_threads(void)
2055 {
2056 	return sUsedThreads;
2057 }
2058 
2059 
2060 const char*
2061 thread_state_to_text(struct thread* thread, int32 state)
2062 {
2063 	return state_to_text(thread, state);
2064 }
2065 
2066 
2067 int32
2068 thread_get_io_priority(thread_id id)
2069 {
2070 	// take a shortcut, if it is the current thread
2071 	struct thread* thread = thread_get_current_thread();
2072 	int32 priority;
2073 	if (id == thread->id) {
2074 		int32 priority = thread->io_priority;
2075 		return priority < 0 ? thread->priority : priority;
2076 	}
2077 
2078 	// not the current thread -- get it
2079 	InterruptsSpinLocker locker(gThreadSpinlock);
2080 
2081 	thread = thread_get_thread_struct_locked(id);
2082 	if (thread == NULL)
2083 		return B_BAD_THREAD_ID;
2084 
2085 	priority = thread->io_priority;
2086 	return priority < 0 ? thread->priority : priority;
2087 }
2088 
2089 
2090 void
2091 thread_set_io_priority(int32 priority)
2092 {
2093 	struct thread* thread = thread_get_current_thread();
2094 	thread->io_priority = priority;
2095 }
2096 
2097 
2098 status_t
2099 thread_init(kernel_args *args)
2100 {
2101 	uint32 i;
2102 
2103 	TRACE(("thread_init: entry\n"));
2104 
2105 	// create the thread hash table
2106 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
2107 		&thread_struct_compare, &thread_struct_hash);
2108 
2109 	// zero out the dead thread structure q
2110 	memset(&dead_q, 0, sizeof(dead_q));
2111 
2112 	if (arch_thread_init(args) < B_OK)
2113 		panic("arch_thread_init() failed!\n");
2114 
2115 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2116 	sNextThreadID = B_SYSTEM_TEAM + 1;
2117 
2118 	// create an idle thread for each cpu
2119 
2120 	for (i = 0; i < args->num_cpus; i++) {
2121 		struct thread *thread;
2122 		area_info info;
2123 		char name[64];
2124 
2125 		sprintf(name, "idle thread %lu", i + 1);
2126 		thread = create_thread_struct(&sIdleThreads[i], name,
2127 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2128 		if (thread == NULL) {
2129 			panic("error creating idle thread struct\n");
2130 			return B_NO_MEMORY;
2131 		}
2132 
2133 		gCPU[i].running_thread = thread;
2134 
2135 		thread->team = team_get_kernel_team();
2136 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2137 		thread->state = B_THREAD_RUNNING;
2138 		thread->next_state = B_THREAD_READY;
2139 		sprintf(name, "idle thread %lu kstack", i + 1);
2140 		thread->kernel_stack_area = find_area(name);
2141 		thread->entry = NULL;
2142 
2143 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2144 			panic("error finding idle kstack area\n");
2145 
2146 		thread->kernel_stack_base = (addr_t)info.address;
2147 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2148 
2149 		hash_insert(sThreadHash, thread);
2150 		insert_thread_into_team(thread->team, thread);
2151 	}
2152 	sUsedThreads = args->num_cpus;
2153 
2154 	// init the notification service
2155 	new(&sNotificationService) ThreadNotificationService();
2156 
2157 	// start the undertaker thread
2158 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2159 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2160 
2161 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2162 		B_DISPLAY_PRIORITY, NULL);
2163 	if (undertakerThread < 0)
2164 		panic("Failed to create undertaker thread!");
2165 	resume_thread(undertakerThread);
2166 
2167 	// set up some debugger commands
2168 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2169 		"[ <team> ]\n"
2170 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2171 		"all threads of the specified team.\n"
2172 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2173 	add_debugger_command_etc("ready", &dump_thread_list,
2174 		"List all ready threads",
2175 		"\n"
2176 		"Prints a list of all threads in ready state.\n", 0);
2177 	add_debugger_command_etc("running", &dump_thread_list,
2178 		"List all running threads",
2179 		"\n"
2180 		"Prints a list of all threads in running state.\n", 0);
2181 	add_debugger_command_etc("waiting", &dump_thread_list,
2182 		"List all waiting threads (optionally for a specific semaphore)",
2183 		"[ <sem> ]\n"
2184 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2185 		"specified, only the threads waiting on that semaphore are listed.\n"
2186 		"  <sem>  - ID of the semaphore.\n", 0);
2187 	add_debugger_command_etc("realtime", &dump_thread_list,
2188 		"List all realtime threads",
2189 		"\n"
2190 		"Prints a list of all threads with realtime priority.\n", 0);
2191 	add_debugger_command_etc("thread", &dump_thread_info,
2192 		"Dump info about a particular thread",
2193 		"[ -s ] ( <id> | <address> | <name> )*\n"
2194 		"Prints information about the specified thread. If no argument is\n"
2195 		"given the current thread is selected.\n"
2196 		"  -s         - Print info in compact table form (like \"threads\").\n"
2197 		"  <id>       - The ID of the thread.\n"
2198 		"  <address>  - The address of the thread structure.\n"
2199 		"  <name>     - The thread's name.\n", 0);
2200 	add_debugger_command_etc("calling", &dump_thread_list,
2201 		"Show all threads that have a specific address in their call chain",
2202 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2203 	add_debugger_command_etc("unreal", &make_thread_unreal,
2204 		"Set realtime priority threads to normal priority",
2205 		"[ <id> ]\n"
2206 		"Sets the priority of all realtime threads or, if given, the one\n"
2207 		"with the specified ID to \"normal\" priority.\n"
2208 		"  <id>  - The ID of the thread.\n", 0);
2209 	add_debugger_command_etc("suspend", &make_thread_suspended,
2210 		"Suspend a thread",
2211 		"[ <id> ]\n"
2212 		"Suspends the thread with the given ID. If no ID argument is given\n"
2213 		"the current thread is selected.\n"
2214 		"  <id>  - The ID of the thread.\n", 0);
2215 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2216 		"<id>\n"
2217 		"Resumes the specified thread, if it is currently suspended.\n"
2218 		"  <id>  - The ID of the thread.\n", 0);
2219 	add_debugger_command_etc("drop", &drop_into_debugger,
2220 		"Drop a thread into the userland debugger",
2221 		"<id>\n"
2222 		"Drops the specified (userland) thread into the userland debugger\n"
2223 		"after leaving the kernel debugger.\n"
2224 		"  <id>  - The ID of the thread.\n", 0);
2225 	add_debugger_command_etc("priority", &set_thread_prio,
2226 		"Set a thread's priority",
2227 		"<priority> [ <id> ]\n"
2228 		"Sets the priority of the thread with the specified ID to the given\n"
2229 		"priority. If no thread ID is given, the current thread is selected.\n"
2230 		"  <priority>  - The thread's new priority (0 - 120)\n"
2231 		"  <id>        - The ID of the thread.\n", 0);
2232 
2233 	return B_OK;
2234 }
2235 
2236 
2237 status_t
2238 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2239 {
2240 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2241 	// so that get_current_cpu and friends will work, which is crucial for
2242 	// a lot of low level routines
2243 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2244 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2245 	return B_OK;
2246 }
2247 
2248 
2249 //	#pragma mark - thread blocking API
2250 
2251 
2252 static status_t
2253 thread_block_timeout(timer* timer)
2254 {
2255 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2256 	// we're holding the thread lock already. This makes things comfortably
2257 	// easy.
2258 
2259 	struct thread* thread = (struct thread*)timer->user_data;
2260 	thread_unblock_locked(thread, B_TIMED_OUT);
2261 
2262 	return B_HANDLED_INTERRUPT;
2263 }
2264 
2265 
2266 status_t
2267 thread_block()
2268 {
2269 	InterruptsSpinLocker _(gThreadSpinlock);
2270 	return thread_block_locked(thread_get_current_thread());
2271 }
2272 
2273 
2274 void
2275 thread_unblock(status_t threadID, status_t status)
2276 {
2277 	InterruptsSpinLocker _(gThreadSpinlock);
2278 
2279 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2280 	if (thread != NULL)
2281 		thread_unblock_locked(thread, status);
2282 }
2283 
2284 
2285 status_t
2286 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2287 {
2288 	InterruptsSpinLocker _(gThreadSpinlock);
2289 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2290 }
2291 
2292 
2293 status_t
2294 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2295 {
2296 	struct thread* thread = thread_get_current_thread();
2297 
2298 	if (thread->wait.status != 1)
2299 		return thread->wait.status;
2300 
2301 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2302 		&& timeout != B_INFINITE_TIMEOUT;
2303 
2304 	if (useTimer) {
2305 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2306 		// avoids nasty race conditions and deadlock problems that could
2307 		// otherwise occur between our cancel_timer() and a concurrently
2308 		// executing thread_block_timeout().
2309 		uint32 timerFlags;
2310 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2311 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2312 		} else {
2313 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2314 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2315 				timeout -= rtc_boot_time();
2316 		}
2317 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2318 
2319 		// install the timer
2320 		thread->wait.unblock_timer.user_data = thread;
2321 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2322 			timerFlags);
2323 	}
2324 
2325 	// block
2326 	status_t error = thread_block_locked(thread);
2327 
2328 	// cancel timer, if it didn't fire
2329 	if (error != B_TIMED_OUT && useTimer)
2330 		cancel_timer(&thread->wait.unblock_timer);
2331 
2332 	return error;
2333 }
2334 
2335 
2336 /*!	Thread spinlock must be held.
2337 */
2338 static status_t
2339 user_unblock_thread(thread_id threadID, status_t status)
2340 {
2341 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2342 	if (thread == NULL)
2343 		return B_BAD_THREAD_ID;
2344 	if (thread->user_thread == NULL)
2345 		return B_NOT_ALLOWED;
2346 
2347 	if (thread->user_thread->wait_status > 0) {
2348 		thread->user_thread->wait_status = status;
2349 		thread_unblock_locked(thread, status);
2350 	}
2351 
2352 	return B_OK;
2353 }
2354 
2355 
2356 //	#pragma mark - public kernel API
2357 
2358 
2359 void
2360 exit_thread(status_t returnValue)
2361 {
2362 	struct thread *thread = thread_get_current_thread();
2363 
2364 	thread->exit.status = returnValue;
2365 	thread->exit.reason = THREAD_RETURN_EXIT;
2366 
2367 	// if called from a kernel thread, we don't deliver the signal,
2368 	// we just exit directly to keep the user space behaviour of
2369 	// this function
2370 	if (thread->team != team_get_kernel_team())
2371 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2372 	else
2373 		thread_exit();
2374 }
2375 
2376 
2377 status_t
2378 kill_thread(thread_id id)
2379 {
2380 	if (id <= 0)
2381 		return B_BAD_VALUE;
2382 
2383 	return send_signal(id, SIGKILLTHR);
2384 }
2385 
2386 
2387 status_t
2388 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2389 {
2390 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2391 }
2392 
2393 
2394 int32
2395 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2396 {
2397 	return receive_data_etc(sender, buffer, bufferSize, 0);
2398 }
2399 
2400 
2401 bool
2402 has_data(thread_id thread)
2403 {
2404 	int32 count;
2405 
2406 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2407 			&count) != B_OK)
2408 		return false;
2409 
2410 	return count == 0 ? false : true;
2411 }
2412 
2413 
2414 status_t
2415 _get_thread_info(thread_id id, thread_info *info, size_t size)
2416 {
2417 	status_t status = B_OK;
2418 	struct thread *thread;
2419 	cpu_status state;
2420 
2421 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2422 		return B_BAD_VALUE;
2423 
2424 	state = disable_interrupts();
2425 	GRAB_THREAD_LOCK();
2426 
2427 	thread = thread_get_thread_struct_locked(id);
2428 	if (thread == NULL) {
2429 		status = B_BAD_VALUE;
2430 		goto err;
2431 	}
2432 
2433 	fill_thread_info(thread, info, size);
2434 
2435 err:
2436 	RELEASE_THREAD_LOCK();
2437 	restore_interrupts(state);
2438 
2439 	return status;
2440 }
2441 
2442 
2443 status_t
2444 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
2445 	size_t size)
2446 {
2447 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
2448 		return B_BAD_VALUE;
2449 
2450 	int32 lastID = *_cookie;
2451 
2452 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
2453 
2454 	struct team* team;
2455 	if (teamID == B_CURRENT_TEAM)
2456 		team = thread_get_current_thread()->team;
2457 	else
2458 		team = team_get_team_struct_locked(teamID);
2459 
2460 	if (team == NULL)
2461 		return B_BAD_VALUE;
2462 
2463 	struct thread* thread = NULL;
2464 
2465 	if (lastID == 0) {
2466 		// We start with the main thread
2467 		thread = team->main_thread;
2468 	} else {
2469 		// Find the one thread with an ID higher than ours
2470 		// (as long as the IDs don't overlap they are always sorted from
2471 		// highest to lowest).
2472 		for (struct thread* next = team->thread_list; next != NULL;
2473 				next = next->team_next) {
2474 			if (next->id <= lastID)
2475 				break;
2476 
2477 			thread = next;
2478 		}
2479 	}
2480 
2481 	if (thread == NULL)
2482 		return B_BAD_VALUE;
2483 
2484 	lastID = thread->id;
2485 	*_cookie = lastID;
2486 
2487 	SpinLocker threadLocker(gThreadSpinlock);
2488 	fill_thread_info(thread, info, size);
2489 
2490 	return B_OK;
2491 }
2492 
2493 
2494 thread_id
2495 find_thread(const char *name)
2496 {
2497 	struct hash_iterator iterator;
2498 	struct thread *thread;
2499 	cpu_status state;
2500 
2501 	if (name == NULL)
2502 		return thread_get_current_thread_id();
2503 
2504 	state = disable_interrupts();
2505 	GRAB_THREAD_LOCK();
2506 
2507 	// ToDo: this might not be in the same order as find_thread() in BeOS
2508 	//		which could be theoretically problematic.
2509 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2510 	//		cheap either - although this function is probably used very rarely.
2511 
2512 	hash_open(sThreadHash, &iterator);
2513 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2514 			!= NULL) {
2515 		// Search through hash
2516 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2517 			thread_id id = thread->id;
2518 
2519 			RELEASE_THREAD_LOCK();
2520 			restore_interrupts(state);
2521 			return id;
2522 		}
2523 	}
2524 
2525 	RELEASE_THREAD_LOCK();
2526 	restore_interrupts(state);
2527 
2528 	return B_NAME_NOT_FOUND;
2529 }
2530 
2531 
2532 status_t
2533 rename_thread(thread_id id, const char *name)
2534 {
2535 	struct thread *thread = thread_get_current_thread();
2536 	status_t status = B_BAD_THREAD_ID;
2537 	cpu_status state;
2538 
2539 	if (name == NULL)
2540 		return B_BAD_VALUE;
2541 
2542 	state = disable_interrupts();
2543 	GRAB_THREAD_LOCK();
2544 
2545 	if (thread->id != id)
2546 		thread = thread_get_thread_struct_locked(id);
2547 
2548 	if (thread != NULL) {
2549 		if (thread->team == thread_get_current_thread()->team) {
2550 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2551 			status = B_OK;
2552 		} else
2553 			status = B_NOT_ALLOWED;
2554 	}
2555 
2556 	RELEASE_THREAD_LOCK();
2557 	restore_interrupts(state);
2558 
2559 	return status;
2560 }
2561 
2562 
2563 status_t
2564 set_thread_priority(thread_id id, int32 priority)
2565 {
2566 	struct thread *thread;
2567 	int32 oldPriority;
2568 
2569 	// make sure the passed in priority is within bounds
2570 	if (priority > THREAD_MAX_SET_PRIORITY)
2571 		priority = THREAD_MAX_SET_PRIORITY;
2572 	if (priority < THREAD_MIN_SET_PRIORITY)
2573 		priority = THREAD_MIN_SET_PRIORITY;
2574 
2575 	thread = thread_get_current_thread();
2576 	if (thread->id == id) {
2577 		if (thread_is_idle_thread(thread))
2578 			return B_NOT_ALLOWED;
2579 
2580 		// It's ourself, so we know we aren't in the run queue, and we can
2581 		// manipulate our structure directly
2582 		oldPriority = thread->priority;
2583 			// Note that this might not return the correct value if we are
2584 			// preempted here, and another thread changes our priority before
2585 			// the next line is executed.
2586 		thread->priority = thread->next_priority = priority;
2587 	} else {
2588 		InterruptsSpinLocker _(gThreadSpinlock);
2589 
2590 		thread = thread_get_thread_struct_locked(id);
2591 		if (thread == NULL)
2592 			return B_BAD_THREAD_ID;
2593 
2594 		if (thread_is_idle_thread(thread))
2595 			return B_NOT_ALLOWED;
2596 
2597 		oldPriority = thread->priority;
2598 		scheduler_set_thread_priority(thread, priority);
2599 	}
2600 
2601 	return oldPriority;
2602 }
2603 
2604 
2605 status_t
2606 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2607 {
2608 	status_t status;
2609 
2610 	if (timebase != B_SYSTEM_TIMEBASE)
2611 		return B_BAD_VALUE;
2612 
2613 	InterruptsSpinLocker _(gThreadSpinlock);
2614 	struct thread* thread = thread_get_current_thread();
2615 
2616 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2617 	status = thread_block_with_timeout_locked(flags, timeout);
2618 
2619 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2620 		return B_OK;
2621 
2622 	return status;
2623 }
2624 
2625 
2626 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2627 status_t
2628 snooze(bigtime_t timeout)
2629 {
2630 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2631 }
2632 
2633 
2634 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
2635 	signals.
2636 */
2637 status_t
2638 snooze_until(bigtime_t timeout, int timebase)
2639 {
2640 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2641 }
2642 
2643 
2644 status_t
2645 wait_for_thread(thread_id thread, status_t *_returnCode)
2646 {
2647 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2648 }
2649 
2650 
2651 status_t
2652 suspend_thread(thread_id id)
2653 {
2654 	if (id <= 0)
2655 		return B_BAD_VALUE;
2656 
2657 	return send_signal(id, SIGSTOP);
2658 }
2659 
2660 
2661 status_t
2662 resume_thread(thread_id id)
2663 {
2664 	if (id <= 0)
2665 		return B_BAD_VALUE;
2666 
2667 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2668 		// This retains compatibility to BeOS which documents the
2669 		// combination of suspend_thread() and resume_thread() to
2670 		// interrupt threads waiting on semaphores.
2671 }
2672 
2673 
2674 thread_id
2675 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2676 	void *arg)
2677 {
2678 	thread_creation_attributes attributes;
2679 	attributes.entry = (thread_entry_func)function;
2680 	attributes.name = name;
2681 	attributes.priority = priority;
2682 	attributes.args1 = arg;
2683 	attributes.args2 = NULL;
2684 	attributes.stack_address = NULL;
2685 	attributes.stack_size = 0;
2686 	attributes.team = team_get_kernel_team()->id;
2687 	attributes.thread = -1;
2688 
2689 	return create_thread(attributes, true);
2690 }
2691 
2692 
2693 int
2694 getrlimit(int resource, struct rlimit * rlp)
2695 {
2696 	status_t error = common_getrlimit(resource, rlp);
2697 	if (error != B_OK) {
2698 		errno = error;
2699 		return -1;
2700 	}
2701 
2702 	return 0;
2703 }
2704 
2705 
2706 int
2707 setrlimit(int resource, const struct rlimit * rlp)
2708 {
2709 	status_t error = common_setrlimit(resource, rlp);
2710 	if (error != B_OK) {
2711 		errno = error;
2712 		return -1;
2713 	}
2714 
2715 	return 0;
2716 }
2717 
2718 
2719 //	#pragma mark - syscalls
2720 
2721 
2722 void
2723 _user_exit_thread(status_t returnValue)
2724 {
2725 	exit_thread(returnValue);
2726 }
2727 
2728 
2729 status_t
2730 _user_kill_thread(thread_id thread)
2731 {
2732 	return kill_thread(thread);
2733 }
2734 
2735 
2736 status_t
2737 _user_resume_thread(thread_id thread)
2738 {
2739 	return resume_thread(thread);
2740 }
2741 
2742 
2743 status_t
2744 _user_suspend_thread(thread_id thread)
2745 {
2746 	return suspend_thread(thread);
2747 }
2748 
2749 
2750 status_t
2751 _user_rename_thread(thread_id thread, const char *userName)
2752 {
2753 	char name[B_OS_NAME_LENGTH];
2754 
2755 	if (!IS_USER_ADDRESS(userName)
2756 		|| userName == NULL
2757 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2758 		return B_BAD_ADDRESS;
2759 
2760 	return rename_thread(thread, name);
2761 }
2762 
2763 
2764 int32
2765 _user_set_thread_priority(thread_id thread, int32 newPriority)
2766 {
2767 	return set_thread_priority(thread, newPriority);
2768 }
2769 
2770 
2771 thread_id
2772 _user_spawn_thread(thread_creation_attributes* userAttributes)
2773 {
2774 	thread_creation_attributes attributes;
2775 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2776 		|| user_memcpy(&attributes, userAttributes,
2777 				sizeof(attributes)) != B_OK) {
2778 		return B_BAD_ADDRESS;
2779 	}
2780 
2781 	if (attributes.stack_size != 0
2782 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2783 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2784 		return B_BAD_VALUE;
2785 	}
2786 
2787 	char name[B_OS_NAME_LENGTH];
2788 	thread_id threadID;
2789 
2790 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2791 		|| (attributes.stack_address != NULL
2792 			&& !IS_USER_ADDRESS(attributes.stack_address))
2793 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2794 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2795 		return B_BAD_ADDRESS;
2796 
2797 	attributes.name = attributes.name != NULL ? name : "user thread";
2798 	attributes.team = thread_get_current_thread()->team->id;
2799 	attributes.thread = -1;
2800 
2801 	threadID = create_thread(attributes, false);
2802 
2803 	if (threadID >= 0)
2804 		user_debug_thread_created(threadID);
2805 
2806 	return threadID;
2807 }
2808 
2809 
2810 status_t
2811 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2812 {
2813 	// NOTE: We only know the system timebase at the moment.
2814 	syscall_restart_handle_timeout_pre(flags, timeout);
2815 
2816 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2817 
2818 	return syscall_restart_handle_timeout_post(error, timeout);
2819 }
2820 
2821 
2822 void
2823 _user_thread_yield(void)
2824 {
2825 	thread_yield(true);
2826 }
2827 
2828 
2829 status_t
2830 _user_get_thread_info(thread_id id, thread_info *userInfo)
2831 {
2832 	thread_info info;
2833 	status_t status;
2834 
2835 	if (!IS_USER_ADDRESS(userInfo))
2836 		return B_BAD_ADDRESS;
2837 
2838 	status = _get_thread_info(id, &info, sizeof(thread_info));
2839 
2840 	if (status >= B_OK
2841 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2842 		return B_BAD_ADDRESS;
2843 
2844 	return status;
2845 }
2846 
2847 
2848 status_t
2849 _user_get_next_thread_info(team_id team, int32 *userCookie,
2850 	thread_info *userInfo)
2851 {
2852 	status_t status;
2853 	thread_info info;
2854 	int32 cookie;
2855 
2856 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2857 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2858 		return B_BAD_ADDRESS;
2859 
2860 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2861 	if (status < B_OK)
2862 		return status;
2863 
2864 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2865 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2866 		return B_BAD_ADDRESS;
2867 
2868 	return status;
2869 }
2870 
2871 
2872 thread_id
2873 _user_find_thread(const char *userName)
2874 {
2875 	char name[B_OS_NAME_LENGTH];
2876 
2877 	if (userName == NULL)
2878 		return find_thread(NULL);
2879 
2880 	if (!IS_USER_ADDRESS(userName)
2881 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2882 		return B_BAD_ADDRESS;
2883 
2884 	return find_thread(name);
2885 }
2886 
2887 
2888 status_t
2889 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2890 {
2891 	status_t returnCode;
2892 	status_t status;
2893 
2894 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2895 		return B_BAD_ADDRESS;
2896 
2897 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2898 
2899 	if (status == B_OK && userReturnCode != NULL
2900 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2901 		return B_BAD_ADDRESS;
2902 	}
2903 
2904 	return syscall_restart_handle_post(status);
2905 }
2906 
2907 
2908 bool
2909 _user_has_data(thread_id thread)
2910 {
2911 	return has_data(thread);
2912 }
2913 
2914 
2915 status_t
2916 _user_send_data(thread_id thread, int32 code, const void *buffer,
2917 	size_t bufferSize)
2918 {
2919 	if (!IS_USER_ADDRESS(buffer))
2920 		return B_BAD_ADDRESS;
2921 
2922 	return send_data_etc(thread, code, buffer, bufferSize,
2923 		B_KILL_CAN_INTERRUPT);
2924 		// supports userland buffers
2925 }
2926 
2927 
2928 status_t
2929 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2930 {
2931 	thread_id sender;
2932 	status_t code;
2933 
2934 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2935 		|| !IS_USER_ADDRESS(buffer))
2936 		return B_BAD_ADDRESS;
2937 
2938 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2939 		// supports userland buffers
2940 
2941 	if (_userSender != NULL)
2942 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2943 			return B_BAD_ADDRESS;
2944 
2945 	return code;
2946 }
2947 
2948 
2949 status_t
2950 _user_block_thread(uint32 flags, bigtime_t timeout)
2951 {
2952 	syscall_restart_handle_timeout_pre(flags, timeout);
2953 	flags |= B_CAN_INTERRUPT;
2954 
2955 	struct thread* thread = thread_get_current_thread();
2956 
2957 	InterruptsSpinLocker locker(gThreadSpinlock);
2958 
2959 	// check, if already done
2960 	if (thread->user_thread->wait_status <= 0)
2961 		return thread->user_thread->wait_status;
2962 
2963 	// nope, so wait
2964 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2965 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2966 	thread->user_thread->wait_status = status;
2967 
2968 	return syscall_restart_handle_timeout_post(status, timeout);
2969 }
2970 
2971 
2972 status_t
2973 _user_unblock_thread(thread_id threadID, status_t status)
2974 {
2975 	InterruptsSpinLocker locker(gThreadSpinlock);
2976 	status_t error = user_unblock_thread(threadID, status);
2977 	scheduler_reschedule_if_necessary_locked();
2978 	return error;
2979 }
2980 
2981 
2982 status_t
2983 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2984 {
2985 	enum {
2986 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2987 	};
2988 
2989 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2990 		return B_BAD_ADDRESS;
2991 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2992 		return B_BAD_VALUE;
2993 
2994 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2995 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2996 		return B_BAD_ADDRESS;
2997 
2998 	InterruptsSpinLocker locker(gThreadSpinlock);
2999 	for (uint32 i = 0; i < count; i++)
3000 		user_unblock_thread(threads[i], status);
3001 
3002 	scheduler_reschedule_if_necessary_locked();
3003 
3004 	return B_OK;
3005 }
3006 
3007 
3008 // TODO: the following two functions don't belong here
3009 
3010 
3011 int
3012 _user_getrlimit(int resource, struct rlimit *urlp)
3013 {
3014 	struct rlimit rl;
3015 	int ret;
3016 
3017 	if (urlp == NULL)
3018 		return EINVAL;
3019 
3020 	if (!IS_USER_ADDRESS(urlp))
3021 		return B_BAD_ADDRESS;
3022 
3023 	ret = common_getrlimit(resource, &rl);
3024 
3025 	if (ret == 0) {
3026 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3027 		if (ret < 0)
3028 			return ret;
3029 
3030 		return 0;
3031 	}
3032 
3033 	return ret;
3034 }
3035 
3036 
3037 int
3038 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3039 {
3040 	struct rlimit resourceLimit;
3041 
3042 	if (userResourceLimit == NULL)
3043 		return EINVAL;
3044 
3045 	if (!IS_USER_ADDRESS(userResourceLimit)
3046 		|| user_memcpy(&resourceLimit, userResourceLimit,
3047 			sizeof(struct rlimit)) < B_OK)
3048 		return B_BAD_ADDRESS;
3049 
3050 	return common_setrlimit(resource, &resourceLimit);
3051 }
3052