xref: /haiku/src/system/kernel/thread.cpp (revision 1c09002cbee8e797a0f8bbfc5678dfadd39ee1a7)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Threading routines */
12 
13 
14 #include <thread.h>
15 
16 #include <errno.h>
17 #include <malloc.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/resource.h>
22 
23 #include <OS.h>
24 
25 #include <util/AutoLock.h>
26 #include <util/khash.h>
27 
28 #include <arch/debug.h>
29 #include <boot/kernel_args.h>
30 #include <condition_variable.h>
31 #include <cpu.h>
32 #include <int.h>
33 #include <kimage.h>
34 #include <kscheduler.h>
35 #include <ksignal.h>
36 #include <Notifications.h>
37 #include <real_time_clock.h>
38 #include <slab/Slab.h>
39 #include <smp.h>
40 #include <syscalls.h>
41 #include <syscall_restart.h>
42 #include <team.h>
43 #include <tls.h>
44 #include <user_runtime.h>
45 #include <user_thread.h>
46 #include <vfs.h>
47 #include <vm/vm.h>
48 #include <vm/VMAddressSpace.h>
49 #include <wait_for_objects.h>
50 
51 
52 //#define TRACE_THREAD
53 #ifdef TRACE_THREAD
54 #	define TRACE(x) dprintf x
55 #else
56 #	define TRACE(x) ;
57 #endif
58 
59 
60 #define THREAD_MAX_MESSAGE_SIZE		65536
61 
62 
63 struct thread_key {
64 	thread_id id;
65 };
66 
67 // global
68 spinlock gThreadSpinlock = B_SPINLOCK_INITIALIZER;
69 
70 // thread list
71 static Thread sIdleThreads[B_MAX_CPU_COUNT];
72 static hash_table *sThreadHash = NULL;
73 static thread_id sNextThreadID = 1;
74 
75 // some arbitrary chosen limits - should probably depend on the available
76 // memory (the limit is not yet enforced)
77 static int32 sMaxThreads = 4096;
78 static int32 sUsedThreads = 0;
79 
80 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
81 	Thread*	thread;
82 	team_id	teamID;
83 
84 	UndertakerEntry(Thread* thread, team_id teamID)
85 		:
86 		thread(thread),
87 		teamID(teamID)
88 	{
89 	}
90 };
91 
92 
93 class ThreadNotificationService : public DefaultNotificationService {
94 public:
95 	ThreadNotificationService()
96 		: DefaultNotificationService("threads")
97 	{
98 	}
99 
100 	void Notify(uint32 eventCode, team_id teamID, thread_id threadID,
101 		Thread* thread = NULL)
102 	{
103 		char eventBuffer[128];
104 		KMessage event;
105 		event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
106 		event.AddInt32("event", eventCode);
107 		event.AddInt32("team", teamID);
108 		event.AddInt32("thread", threadID);
109 		if (thread != NULL)
110 			event.AddPointer("threadStruct", thread);
111 
112 		DefaultNotificationService::Notify(event, eventCode);
113 	}
114 
115 	void Notify(uint32 eventCode, Thread* thread)
116 	{
117 		return Notify(eventCode, thread->id, thread->team->id, thread);
118 	}
119 };
120 
121 
122 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
123 static ConditionVariable sUndertakerCondition;
124 static ThreadNotificationService sNotificationService;
125 
126 
127 // object cache to allocate thread structures from
128 static object_cache* sThreadCache;
129 
130 static void thread_kthread_entry(void);
131 static void thread_kthread_exit(void);
132 
133 
134 /*!	Inserts a thread into a team.
135 	You must hold the team lock when you call this function.
136 */
137 static void
138 insert_thread_into_team(Team *team, Thread *thread)
139 {
140 	thread->team_next = team->thread_list;
141 	team->thread_list = thread;
142 	team->num_threads++;
143 
144 	if (team->num_threads == 1) {
145 		// this was the first thread
146 		team->main_thread = thread;
147 	}
148 	thread->team = team;
149 }
150 
151 
152 /*!	Removes a thread from a team.
153 	You must hold the team lock when you call this function.
154 */
155 static void
156 remove_thread_from_team(Team *team, Thread *thread)
157 {
158 	Thread *temp, *last = NULL;
159 
160 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
161 		if (temp == thread) {
162 			if (last == NULL)
163 				team->thread_list = temp->team_next;
164 			else
165 				last->team_next = temp->team_next;
166 
167 			team->num_threads--;
168 			break;
169 		}
170 		last = temp;
171 	}
172 }
173 
174 
175 static int
176 thread_struct_compare(void *_t, const void *_key)
177 {
178 	Thread *thread = (Thread*)_t;
179 	const struct thread_key *key = (const struct thread_key*)_key;
180 
181 	if (thread->id == key->id)
182 		return 0;
183 
184 	return 1;
185 }
186 
187 
188 static uint32
189 thread_struct_hash(void *_t, const void *_key, uint32 range)
190 {
191 	Thread *thread = (Thread*)_t;
192 	const struct thread_key *key = (const struct thread_key*)_key;
193 
194 	if (thread != NULL)
195 		return thread->id % range;
196 
197 	return (uint32)key->id % range;
198 }
199 
200 
201 static void
202 reset_signals(Thread *thread)
203 {
204 	thread->sig_pending = 0;
205 	thread->sig_block_mask = 0;
206 	thread->sig_temp_enabled = 0;
207 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
208 	thread->signal_stack_base = 0;
209 	thread->signal_stack_size = 0;
210 	thread->signal_stack_enabled = false;
211 }
212 
213 
214 /*!	Allocates and fills in thread structure.
215 
216 	\param threadID The ID to be assigned to the new thread. If
217 		  \code < 0 \endcode a fresh one is allocated.
218 	\param thread initialize this thread struct if nonnull
219 */
220 static Thread *
221 create_thread_struct(Thread *inthread, const char *name,
222 	thread_id threadID, struct cpu_ent *cpu)
223 {
224 	Thread *thread;
225 	char temp[64];
226 
227 	if (inthread == NULL) {
228 		thread = (Thread*)object_cache_alloc(sThreadCache, 0);
229 		if (thread == NULL)
230 			return NULL;
231 
232 		scheduler_on_thread_create(thread);
233 			// TODO: We could use the object cache object
234 			// constructor/destructor!
235 	} else
236 		thread = inthread;
237 
238 	if (name != NULL)
239 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
240 	else
241 		strcpy(thread->name, "unnamed thread");
242 
243 	thread->flags = 0;
244 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
245 	thread->team = NULL;
246 	thread->cpu = cpu;
247 	thread->previous_cpu = NULL;
248 	thread->pinned_to_cpu = 0;
249 	thread->fault_handler = 0;
250 	thread->page_faults_allowed = 1;
251 	thread->kernel_stack_area = -1;
252 	thread->kernel_stack_base = 0;
253 	thread->user_stack_area = -1;
254 	thread->user_stack_base = 0;
255 	thread->user_local_storage = 0;
256 	thread->kernel_errno = 0;
257 	thread->team_next = NULL;
258 	thread->queue_next = NULL;
259 	thread->priority = thread->next_priority = -1;
260 	thread->io_priority = -1;
261 	thread->args1 = NULL;  thread->args2 = NULL;
262 	thread->alarm.period = 0;
263 	reset_signals(thread);
264 	thread->in_kernel = true;
265 	thread->was_yielded = false;
266 	thread->user_time = 0;
267 	thread->kernel_time = 0;
268 	thread->last_time = 0;
269 	thread->exit.status = 0;
270 	thread->exit.reason = 0;
271 	thread->exit.signal = 0;
272 	list_init(&thread->exit.waiters);
273 	thread->select_infos = NULL;
274 	thread->post_interrupt_callback = NULL;
275 	thread->post_interrupt_data = NULL;
276 	thread->user_thread = NULL;
277 
278 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
279 	thread->exit.sem = create_sem(0, temp);
280 	if (thread->exit.sem < B_OK)
281 		goto err1;
282 
283 	sprintf(temp, "%s send", thread->name);
284 	thread->msg.write_sem = create_sem(1, temp);
285 	if (thread->msg.write_sem < B_OK)
286 		goto err2;
287 
288 	sprintf(temp, "%s receive", thread->name);
289 	thread->msg.read_sem = create_sem(0, temp);
290 	if (thread->msg.read_sem < B_OK)
291 		goto err3;
292 
293 	if (arch_thread_init_thread_struct(thread) < B_OK)
294 		goto err4;
295 
296 	return thread;
297 
298 err4:
299 	delete_sem(thread->msg.read_sem);
300 err3:
301 	delete_sem(thread->msg.write_sem);
302 err2:
303 	delete_sem(thread->exit.sem);
304 err1:
305 	// ToDo: put them in the dead queue instead?
306 	if (inthread == NULL) {
307 		scheduler_on_thread_destroy(thread);
308 		object_cache_free(sThreadCache, thread, 0);
309 	}
310 
311 	return NULL;
312 }
313 
314 
315 static void
316 delete_thread_struct(Thread *thread)
317 {
318 	delete_sem(thread->exit.sem);
319 	delete_sem(thread->msg.write_sem);
320 	delete_sem(thread->msg.read_sem);
321 
322 	scheduler_on_thread_destroy(thread);
323 	object_cache_free(sThreadCache, thread, 0);
324 }
325 
326 
327 /*! This function gets run by a new thread before anything else */
328 static void
329 thread_kthread_entry(void)
330 {
331 	Thread *thread = thread_get_current_thread();
332 
333 	// The thread is new and has been scheduled the first time. Notify the user
334 	// debugger code.
335 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
336 		user_debug_thread_scheduled(thread);
337 
338 	// simulates the thread spinlock release that would occur if the thread had been
339 	// rescheded from. The resched didn't happen because the thread is new.
340 	RELEASE_THREAD_LOCK();
341 
342 	// start tracking time
343 	thread->last_time = system_time();
344 
345 	enable_interrupts(); // this essentially simulates a return-from-interrupt
346 }
347 
348 
349 static void
350 thread_kthread_exit(void)
351 {
352 	Thread *thread = thread_get_current_thread();
353 
354 	thread->exit.reason = THREAD_RETURN_EXIT;
355 	thread_exit();
356 }
357 
358 
359 /*!	Initializes the thread and jumps to its userspace entry point.
360 	This function is called at creation time of every user thread,
361 	but not for a team's main thread.
362 */
363 static int
364 _create_user_thread_kentry(void)
365 {
366 	Thread *thread = thread_get_current_thread();
367 
368 	// jump to the entry point in user space
369 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
370 		thread->args1, thread->args2);
371 
372 	// only get here if the above call fails
373 	return 0;
374 }
375 
376 
377 /*! Initializes the thread and calls it kernel space entry point. */
378 static int
379 _create_kernel_thread_kentry(void)
380 {
381 	Thread *thread = thread_get_current_thread();
382 	int (*func)(void *args) = (int (*)(void *))thread->entry;
383 
384 	// call the entry function with the appropriate args
385 	return func(thread->args1);
386 }
387 
388 
389 /*!	Creates a new thread in the team with the specified team ID.
390 
391 	\param threadID The ID to be assigned to the new thread. If
392 		  \code < 0 \endcode a fresh one is allocated.
393 */
394 static thread_id
395 create_thread(thread_creation_attributes& attributes, bool kernel)
396 {
397 	Thread *thread, *currentThread;
398 	Team *team;
399 	cpu_status state;
400 	char stack_name[B_OS_NAME_LENGTH];
401 	status_t status;
402 	bool abort = false;
403 	bool debugNewThread = false;
404 
405 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
406 		attributes.thread, kernel ? "kernel" : "user"));
407 
408 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
409 		NULL);
410 	if (thread == NULL)
411 		return B_NO_MEMORY;
412 
413 	thread->priority = attributes.priority == -1
414 		? B_NORMAL_PRIORITY : attributes.priority;
415 	thread->next_priority = thread->priority;
416 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
417 	thread->state = B_THREAD_SUSPENDED;
418 	thread->next_state = B_THREAD_SUSPENDED;
419 
420 	// init debug structure
421 	init_thread_debug_info(&thread->debug_info);
422 
423 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
424 		thread->id);
425 	thread->kernel_stack_area = create_area(stack_name,
426 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
427 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES  * B_PAGE_SIZE,
428 		B_FULL_LOCK,
429 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
430 
431 	if (thread->kernel_stack_area < 0) {
432 		// we're not yet part of a team, so we can just bail out
433 		status = thread->kernel_stack_area;
434 
435 		dprintf("create_thread: error creating kernel stack: %s!\n",
436 			strerror(status));
437 
438 		delete_thread_struct(thread);
439 		return status;
440 	}
441 
442 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
443 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
444 
445 	state = disable_interrupts();
446 	GRAB_THREAD_LOCK();
447 
448 	// If the new thread belongs to the same team as the current thread,
449 	// it may inherit some of the thread debug flags.
450 	currentThread = thread_get_current_thread();
451 	if (currentThread && currentThread->team->id == attributes.team) {
452 		// inherit all user flags...
453 		int32 debugFlags = currentThread->debug_info.flags
454 			& B_THREAD_DEBUG_USER_FLAG_MASK;
455 
456 		// ... save the syscall tracing flags, unless explicitely specified
457 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
458 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
459 				| B_THREAD_DEBUG_POST_SYSCALL);
460 		}
461 
462 		thread->debug_info.flags = debugFlags;
463 
464 		// stop the new thread, if desired
465 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
466 
467 		// copy signal handlers
468 		memcpy(thread->sig_action, currentThread->sig_action,
469 			sizeof(thread->sig_action));
470 	}
471 
472 	// insert into global list
473 	hash_insert(sThreadHash, thread);
474 	sUsedThreads++;
475 	scheduler_on_thread_init(thread);
476 	RELEASE_THREAD_LOCK();
477 
478 	GRAB_TEAM_LOCK();
479 	// look at the team, make sure it's not being deleted
480 	team = team_get_team_struct_locked(attributes.team);
481 
482 	if (team == NULL || team->state == TEAM_STATE_DEATH
483 		|| team->death_entry != NULL) {
484 		abort = true;
485 	}
486 
487 	if (!abort && !kernel) {
488 		thread->user_thread = team_allocate_user_thread(team);
489 		abort = thread->user_thread == NULL;
490 	}
491 
492 	if (!abort) {
493 		// Debug the new thread, if the parent thread required that (see above),
494 		// or the respective global team debug flag is set. But only, if a
495 		// debugger is installed for the team.
496 		debugNewThread |= (atomic_get(&team->debug_info.flags)
497 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
498 		if (debugNewThread
499 			&& (atomic_get(&team->debug_info.flags)
500 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
501 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
502 		}
503 
504 		insert_thread_into_team(team, thread);
505 	}
506 
507 	RELEASE_TEAM_LOCK();
508 	if (abort) {
509 		GRAB_THREAD_LOCK();
510 		hash_remove(sThreadHash, thread);
511 		RELEASE_THREAD_LOCK();
512 	}
513 	restore_interrupts(state);
514 	if (abort) {
515 		delete_area(thread->kernel_stack_area);
516 		delete_thread_struct(thread);
517 		return B_BAD_TEAM_ID;
518 	}
519 
520 	thread->args1 = attributes.args1;
521 	thread->args2 = attributes.args2;
522 	thread->entry = attributes.entry;
523 	status = thread->id;
524 
525 	// notify listeners
526 	sNotificationService.Notify(THREAD_ADDED, thread);
527 
528 	if (kernel) {
529 		// this sets up an initial kthread stack that runs the entry
530 
531 		// Note: whatever function wants to set up a user stack later for this
532 		// thread must initialize the TLS for it
533 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
534 			&thread_kthread_entry, &thread_kthread_exit);
535 	} else {
536 		// create user stack
537 
538 		// the stack will be between USER_STACK_REGION and the main thread stack
539 		// area (the user stack of the main thread is created in
540 		// team_create_team())
541 		if (attributes.stack_address == NULL) {
542 			thread->user_stack_base = USER_STACK_REGION;
543 			if (attributes.stack_size <= 0)
544 				thread->user_stack_size = USER_STACK_SIZE;
545 			else
546 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
547 			thread->user_stack_size += USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
548 
549 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
550 				attributes.name, thread->id);
551 			virtual_address_restrictions virtualRestrictions = {};
552 			virtualRestrictions.address = (void*)thread->user_stack_base;
553 			virtualRestrictions.address_specification = B_BASE_ADDRESS;
554 			physical_address_restrictions physicalRestrictions = {};
555 			thread->user_stack_area = create_area_etc(team->id, stack_name,
556 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
557 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
558 				&virtualRestrictions, &physicalRestrictions,
559 				(void**)&thread->user_stack_base);
560 			if (thread->user_stack_area < B_OK
561 				|| arch_thread_init_tls(thread) < B_OK) {
562 				// great, we have a fully running thread without a (usable)
563 				// stack
564 				dprintf("create_thread: unable to create proper user stack!\n");
565 				status = thread->user_stack_area;
566 				kill_thread(thread->id);
567 			}
568 		} else {
569 			thread->user_stack_base = (addr_t)attributes.stack_address;
570 			thread->user_stack_size = attributes.stack_size;
571 		}
572 
573 		user_debug_update_new_thread_flags(thread->id);
574 
575 		// copy the user entry over to the args field in the thread struct
576 		// the function this will call will immediately switch the thread into
577 		// user space.
578 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
579 			&thread_kthread_entry, &thread_kthread_exit);
580 	}
581 
582 	return status;
583 }
584 
585 
586 static status_t
587 undertaker(void* /*args*/)
588 {
589 	while (true) {
590 		// wait for a thread to bury
591 		InterruptsSpinLocker locker(gThreadSpinlock);
592 
593 		while (sUndertakerEntries.IsEmpty()) {
594 			ConditionVariableEntry conditionEntry;
595 			sUndertakerCondition.Add(&conditionEntry);
596 			locker.Unlock();
597 
598 			conditionEntry.Wait();
599 
600 			locker.Lock();
601 		}
602 
603 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
604 		locker.Unlock();
605 
606 		UndertakerEntry entry = *_entry;
607 			// we need a copy, since the original entry is on the thread's stack
608 
609 		// we've got an entry
610 		Thread* thread = entry.thread;
611 
612 		// delete the old kernel stack area
613 		delete_area(thread->kernel_stack_area);
614 
615 		// remove this thread from all of the global lists
616 		disable_interrupts();
617 		GRAB_TEAM_LOCK();
618 
619 		remove_thread_from_team(team_get_kernel_team(), thread);
620 
621 		RELEASE_TEAM_LOCK();
622 		enable_interrupts();
623 			// needed for the debugger notification below
624 
625 		// free the thread structure
626 		delete_thread_struct(thread);
627 	}
628 
629 	// never can get here
630 	return B_OK;
631 }
632 
633 
634 static sem_id
635 get_thread_wait_sem(Thread* thread)
636 {
637 	if (thread->state == B_THREAD_WAITING
638 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
639 		return (sem_id)(addr_t)thread->wait.object;
640 	}
641 	return -1;
642 }
643 
644 
645 /*!	Fills the thread_info structure with information from the specified
646 	thread.
647 	The thread lock must be held when called.
648 */
649 static void
650 fill_thread_info(Thread *thread, thread_info *info, size_t size)
651 {
652 	info->thread = thread->id;
653 	info->team = thread->team->id;
654 
655 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
656 
657 	if (thread->state == B_THREAD_WAITING) {
658 		info->state = B_THREAD_WAITING;
659 
660 		switch (thread->wait.type) {
661 			case THREAD_BLOCK_TYPE_SNOOZE:
662 				info->state = B_THREAD_ASLEEP;
663 				break;
664 
665 			case THREAD_BLOCK_TYPE_SEMAPHORE:
666 			{
667 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
668 				if (sem == thread->msg.read_sem)
669 					info->state = B_THREAD_RECEIVING;
670 				break;
671 			}
672 
673 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
674 			default:
675 				break;
676 		}
677 	} else
678 		info->state = (thread_state)thread->state;
679 
680 	info->priority = thread->priority;
681 	info->user_time = thread->user_time;
682 	info->kernel_time = thread->kernel_time;
683 	info->stack_base = (void *)thread->user_stack_base;
684 	info->stack_end = (void *)(thread->user_stack_base
685 		+ thread->user_stack_size);
686 	info->sem = get_thread_wait_sem(thread);
687 }
688 
689 
690 static status_t
691 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
692 	int32 flags)
693 {
694 	Thread *target;
695 	sem_id cachedSem;
696 	cpu_status state;
697 	status_t status;
698 
699 	state = disable_interrupts();
700 	GRAB_THREAD_LOCK();
701 	target = thread_get_thread_struct_locked(id);
702 	if (!target) {
703 		RELEASE_THREAD_LOCK();
704 		restore_interrupts(state);
705 		return B_BAD_THREAD_ID;
706 	}
707 	cachedSem = target->msg.write_sem;
708 	RELEASE_THREAD_LOCK();
709 	restore_interrupts(state);
710 
711 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
712 		return B_NO_MEMORY;
713 
714 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
715 	if (status == B_INTERRUPTED) {
716 		// We got interrupted by a signal
717 		return status;
718 	}
719 	if (status != B_OK) {
720 		// Any other acquisition problems may be due to thread deletion
721 		return B_BAD_THREAD_ID;
722 	}
723 
724 	void* data;
725 	if (bufferSize > 0) {
726 		data = malloc(bufferSize);
727 		if (data == NULL)
728 			return B_NO_MEMORY;
729 		if (user_memcpy(data, buffer, bufferSize) != B_OK) {
730 			free(data);
731 			return B_BAD_DATA;
732 		}
733 	} else
734 		data = NULL;
735 
736 	state = disable_interrupts();
737 	GRAB_THREAD_LOCK();
738 
739 	// The target thread could have been deleted at this point
740 	target = thread_get_thread_struct_locked(id);
741 	if (target == NULL) {
742 		RELEASE_THREAD_LOCK();
743 		restore_interrupts(state);
744 		free(data);
745 		return B_BAD_THREAD_ID;
746 	}
747 
748 	// Save message informations
749 	target->msg.sender = thread_get_current_thread()->id;
750 	target->msg.code = code;
751 	target->msg.size = bufferSize;
752 	target->msg.buffer = data;
753 	cachedSem = target->msg.read_sem;
754 
755 	RELEASE_THREAD_LOCK();
756 	restore_interrupts(state);
757 
758 	release_sem(cachedSem);
759 	return B_OK;
760 }
761 
762 
763 static int32
764 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
765 	int32 flags)
766 {
767 	Thread *thread = thread_get_current_thread();
768 	status_t status;
769 	size_t size;
770 	int32 code;
771 
772 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
773 	if (status < B_OK) {
774 		// Actually, we're not supposed to return error codes
775 		// but since the only reason this can fail is that we
776 		// were killed, it's probably okay to do so (but also
777 		// meaningless).
778 		return status;
779 	}
780 
781 	if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
782 		size = min_c(bufferSize, thread->msg.size);
783 		status = user_memcpy(buffer, thread->msg.buffer, size);
784 		if (status != B_OK) {
785 			free(thread->msg.buffer);
786 			release_sem(thread->msg.write_sem);
787 			return status;
788 		}
789 	}
790 
791 	*_sender = thread->msg.sender;
792 	code = thread->msg.code;
793 
794 	free(thread->msg.buffer);
795 	release_sem(thread->msg.write_sem);
796 
797 	return code;
798 }
799 
800 
801 static status_t
802 common_getrlimit(int resource, struct rlimit * rlp)
803 {
804 	if (!rlp)
805 		return B_BAD_ADDRESS;
806 
807 	switch (resource) {
808 		case RLIMIT_NOFILE:
809 		case RLIMIT_NOVMON:
810 			return vfs_getrlimit(resource, rlp);
811 
812 		case RLIMIT_CORE:
813 			rlp->rlim_cur = 0;
814 			rlp->rlim_max = 0;
815 			return B_OK;
816 
817 		case RLIMIT_STACK:
818 		{
819 			Thread *thread = thread_get_current_thread();
820 			if (!thread)
821 				return B_ERROR;
822 			rlp->rlim_cur = thread->user_stack_size;
823 			rlp->rlim_max = thread->user_stack_size;
824 			return B_OK;
825 		}
826 
827 		default:
828 			return EINVAL;
829 	}
830 
831 	return B_OK;
832 }
833 
834 
835 static status_t
836 common_setrlimit(int resource, const struct rlimit * rlp)
837 {
838 	if (!rlp)
839 		return B_BAD_ADDRESS;
840 
841 	switch (resource) {
842 		case RLIMIT_NOFILE:
843 		case RLIMIT_NOVMON:
844 			return vfs_setrlimit(resource, rlp);
845 
846 		case RLIMIT_CORE:
847 			// We don't support core file, so allow settings to 0/0 only.
848 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
849 				return EINVAL;
850 			return B_OK;
851 
852 		default:
853 			return EINVAL;
854 	}
855 
856 	return B_OK;
857 }
858 
859 
860 //	#pragma mark - debugger calls
861 
862 
863 static int
864 make_thread_unreal(int argc, char **argv)
865 {
866 	Thread *thread;
867 	struct hash_iterator i;
868 	int32 id = -1;
869 
870 	if (argc > 2) {
871 		print_debugger_command_usage(argv[0]);
872 		return 0;
873 	}
874 
875 	if (argc > 1)
876 		id = strtoul(argv[1], NULL, 0);
877 
878 	hash_open(sThreadHash, &i);
879 
880 	while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
881 		if (id != -1 && thread->id != id)
882 			continue;
883 
884 		if (thread->priority > B_DISPLAY_PRIORITY) {
885 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
886 			kprintf("thread %ld made unreal\n", thread->id);
887 		}
888 	}
889 
890 	hash_close(sThreadHash, &i, false);
891 	return 0;
892 }
893 
894 
895 static int
896 set_thread_prio(int argc, char **argv)
897 {
898 	Thread *thread;
899 	struct hash_iterator i;
900 	int32 id;
901 	int32 prio;
902 
903 	if (argc > 3 || argc < 2) {
904 		print_debugger_command_usage(argv[0]);
905 		return 0;
906 	}
907 
908 	prio = strtoul(argv[1], NULL, 0);
909 	if (prio > THREAD_MAX_SET_PRIORITY)
910 		prio = THREAD_MAX_SET_PRIORITY;
911 	if (prio < THREAD_MIN_SET_PRIORITY)
912 		prio = THREAD_MIN_SET_PRIORITY;
913 
914 	if (argc > 2)
915 		id = strtoul(argv[2], NULL, 0);
916 	else
917 		id = thread_get_current_thread()->id;
918 
919 	hash_open(sThreadHash, &i);
920 
921 	while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
922 		if (thread->id != id)
923 			continue;
924 		thread->priority = thread->next_priority = prio;
925 		kprintf("thread %ld set to priority %ld\n", id, prio);
926 		break;
927 	}
928 	if (!thread)
929 		kprintf("thread %ld (%#lx) not found\n", id, id);
930 
931 	hash_close(sThreadHash, &i, false);
932 	return 0;
933 }
934 
935 
936 static int
937 make_thread_suspended(int argc, char **argv)
938 {
939 	Thread *thread;
940 	struct hash_iterator i;
941 	int32 id;
942 
943 	if (argc > 2) {
944 		print_debugger_command_usage(argv[0]);
945 		return 0;
946 	}
947 
948 	if (argc == 1)
949 		id = thread_get_current_thread()->id;
950 	else
951 		id = strtoul(argv[1], NULL, 0);
952 
953 	hash_open(sThreadHash, &i);
954 
955 	while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
956 		if (thread->id != id)
957 			continue;
958 
959 		thread->next_state = B_THREAD_SUSPENDED;
960 		kprintf("thread %ld suspended\n", id);
961 		break;
962 	}
963 	if (!thread)
964 		kprintf("thread %ld (%#lx) not found\n", id, id);
965 
966 	hash_close(sThreadHash, &i, false);
967 	return 0;
968 }
969 
970 
971 static int
972 make_thread_resumed(int argc, char **argv)
973 {
974 	Thread *thread;
975 	struct hash_iterator i;
976 	int32 id;
977 
978 	if (argc != 2) {
979 		print_debugger_command_usage(argv[0]);
980 		return 0;
981 	}
982 
983 	// force user to enter a thread id, as using
984 	// the current thread is usually not intended
985 	id = strtoul(argv[1], NULL, 0);
986 
987 	hash_open(sThreadHash, &i);
988 
989 	while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
990 		if (thread->id != id)
991 			continue;
992 
993 		if (thread->state == B_THREAD_SUSPENDED) {
994 			scheduler_enqueue_in_run_queue(thread);
995 			kprintf("thread %ld resumed\n", thread->id);
996 		}
997 		break;
998 	}
999 	if (!thread)
1000 		kprintf("thread %ld (%#lx) not found\n", id, id);
1001 
1002 	hash_close(sThreadHash, &i, false);
1003 	return 0;
1004 }
1005 
1006 
1007 static int
1008 drop_into_debugger(int argc, char **argv)
1009 {
1010 	status_t err;
1011 	int32 id;
1012 
1013 	if (argc > 2) {
1014 		print_debugger_command_usage(argv[0]);
1015 		return 0;
1016 	}
1017 
1018 	if (argc == 1)
1019 		id = thread_get_current_thread()->id;
1020 	else
1021 		id = strtoul(argv[1], NULL, 0);
1022 
1023 	err = _user_debug_thread(id);
1024 	if (err)
1025 		kprintf("drop failed\n");
1026 	else
1027 		kprintf("thread %ld dropped into user debugger\n", id);
1028 
1029 	return 0;
1030 }
1031 
1032 
1033 static const char *
1034 state_to_text(Thread *thread, int32 state)
1035 {
1036 	switch (state) {
1037 		case B_THREAD_READY:
1038 			return "ready";
1039 
1040 		case B_THREAD_RUNNING:
1041 			return "running";
1042 
1043 		case B_THREAD_WAITING:
1044 		{
1045 			if (thread != NULL) {
1046 				switch (thread->wait.type) {
1047 					case THREAD_BLOCK_TYPE_SNOOZE:
1048 						return "zzz";
1049 
1050 					case THREAD_BLOCK_TYPE_SEMAPHORE:
1051 					{
1052 						sem_id sem = (sem_id)(addr_t)thread->wait.object;
1053 						if (sem == thread->msg.read_sem)
1054 							return "receive";
1055 						break;
1056 					}
1057 				}
1058 			}
1059 
1060 			return "waiting";
1061 		}
1062 
1063 		case B_THREAD_SUSPENDED:
1064 			return "suspended";
1065 
1066 		case THREAD_STATE_FREE_ON_RESCHED:
1067 			return "death";
1068 
1069 		default:
1070 			return "UNKNOWN";
1071 	}
1072 }
1073 
1074 
1075 static void
1076 print_thread_list_table_head()
1077 {
1078 	kprintf("thread         id  state     wait for   object  cpu pri  stack    "
1079 		"  team  name\n");
1080 }
1081 
1082 
1083 static void
1084 _dump_thread_info(Thread *thread, bool shortInfo)
1085 {
1086 	if (shortInfo) {
1087 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1088 			thread->state));
1089 
1090 		// does it block on a semaphore or a condition variable?
1091 		if (thread->state == B_THREAD_WAITING) {
1092 			switch (thread->wait.type) {
1093 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1094 				{
1095 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1096 					if (sem == thread->msg.read_sem)
1097 						kprintf("                    ");
1098 					else
1099 						kprintf("sem  %12ld   ", sem);
1100 					break;
1101 				}
1102 
1103 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1104 					kprintf("cvar   %p   ", thread->wait.object);
1105 					break;
1106 
1107 				case THREAD_BLOCK_TYPE_SNOOZE:
1108 					kprintf("                    ");
1109 					break;
1110 
1111 				case THREAD_BLOCK_TYPE_SIGNAL:
1112 					kprintf("signal              ");
1113 					break;
1114 
1115 				case THREAD_BLOCK_TYPE_MUTEX:
1116 					kprintf("mutex  %p   ", thread->wait.object);
1117 					break;
1118 
1119 				case THREAD_BLOCK_TYPE_RW_LOCK:
1120 					kprintf("rwlock %p   ", thread->wait.object);
1121 					break;
1122 
1123 				case THREAD_BLOCK_TYPE_OTHER:
1124 					kprintf("other               ");
1125 					break;
1126 
1127 				default:
1128 					kprintf("???    %p   ", thread->wait.object);
1129 					break;
1130 			}
1131 		} else
1132 			kprintf("        -           ");
1133 
1134 		// on which CPU does it run?
1135 		if (thread->cpu)
1136 			kprintf("%2d", thread->cpu->cpu_num);
1137 		else
1138 			kprintf(" -");
1139 
1140 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1141 			(void *)thread->kernel_stack_base, thread->team->id,
1142 			thread->name != NULL ? thread->name : "<NULL>");
1143 
1144 		return;
1145 	}
1146 
1147 	// print the long info
1148 
1149 	struct death_entry *death = NULL;
1150 
1151 	kprintf("THREAD: %p\n", thread);
1152 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1153 	kprintf("name:               \"%s\"\n", thread->name);
1154 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1155 		thread->all_next, thread->team_next, thread->queue_next);
1156 	kprintf("priority:           %ld (next %ld, I/O: %ld)\n", thread->priority,
1157 		thread->next_priority, thread->io_priority);
1158 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1159 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1160 	kprintf("cpu:                %p ", thread->cpu);
1161 	if (thread->cpu)
1162 		kprintf("(%d)\n", thread->cpu->cpu_num);
1163 	else
1164 		kprintf("\n");
1165 	kprintf("sig_pending:        %#" B_PRIx32 " (blocked: %#" B_PRIx32
1166 		", temp enabled: %#" B_PRIx32 ")\n", thread->sig_pending,
1167 		thread->sig_block_mask, thread->sig_temp_enabled);
1168 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1169 
1170 	if (thread->state == B_THREAD_WAITING) {
1171 		kprintf("waiting for:        ");
1172 
1173 		switch (thread->wait.type) {
1174 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1175 			{
1176 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1177 				if (sem == thread->msg.read_sem)
1178 					kprintf("data\n");
1179 				else
1180 					kprintf("semaphore %ld\n", sem);
1181 				break;
1182 			}
1183 
1184 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1185 				kprintf("condition variable %p\n", thread->wait.object);
1186 				break;
1187 
1188 			case THREAD_BLOCK_TYPE_SNOOZE:
1189 				kprintf("snooze()\n");
1190 				break;
1191 
1192 			case THREAD_BLOCK_TYPE_SIGNAL:
1193 				kprintf("signal\n");
1194 				break;
1195 
1196 			case THREAD_BLOCK_TYPE_MUTEX:
1197 				kprintf("mutex %p\n", thread->wait.object);
1198 				break;
1199 
1200 			case THREAD_BLOCK_TYPE_RW_LOCK:
1201 				kprintf("rwlock %p\n", thread->wait.object);
1202 				break;
1203 
1204 			case THREAD_BLOCK_TYPE_OTHER:
1205 				kprintf("other (%s)\n", (char*)thread->wait.object);
1206 				break;
1207 
1208 			default:
1209 				kprintf("unknown (%p)\n", thread->wait.object);
1210 				break;
1211 		}
1212 	}
1213 
1214 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1215 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1216 	kprintf("entry:              %p\n", (void *)thread->entry);
1217 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1218 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1219 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1220 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1221 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1222 	kprintf("  exit.waiters:\n");
1223 	while ((death = (struct death_entry*)list_get_next_item(
1224 			&thread->exit.waiters, death)) != NULL) {
1225 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1226 	}
1227 
1228 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1229 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1230 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1231 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1232 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1233 	kprintf("user_thread:        %p\n", (void *)thread->user_thread);
1234 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1235 		strerror(thread->kernel_errno));
1236 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1237 	kprintf("user_time:          %Ld\n", thread->user_time);
1238 	kprintf("flags:              0x%lx\n", thread->flags);
1239 	kprintf("architecture dependant section:\n");
1240 	arch_thread_dump_info(&thread->arch_info);
1241 }
1242 
1243 
1244 static int
1245 dump_thread_info(int argc, char **argv)
1246 {
1247 	bool shortInfo = false;
1248 	int argi = 1;
1249 	if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1250 		shortInfo = true;
1251 		print_thread_list_table_head();
1252 		argi++;
1253 	}
1254 
1255 	if (argi == argc) {
1256 		_dump_thread_info(thread_get_current_thread(), shortInfo);
1257 		return 0;
1258 	}
1259 
1260 	for (; argi < argc; argi++) {
1261 		const char *name = argv[argi];
1262 		int32 id = strtoul(name, NULL, 0);
1263 
1264 		if (IS_KERNEL_ADDRESS(id)) {
1265 			// semi-hack
1266 			_dump_thread_info((Thread *)id, shortInfo);
1267 			continue;
1268 		}
1269 
1270 		// walk through the thread list, trying to match name or id
1271 		bool found = false;
1272 		struct hash_iterator i;
1273 		hash_open(sThreadHash, &i);
1274 		Thread *thread;
1275 		while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
1276 			if (!strcmp(name, thread->name) || thread->id == id) {
1277 				_dump_thread_info(thread, shortInfo);
1278 				found = true;
1279 				break;
1280 			}
1281 		}
1282 		hash_close(sThreadHash, &i, false);
1283 
1284 		if (!found)
1285 			kprintf("thread \"%s\" (%ld) doesn't exist!\n", name, id);
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 
1292 static int
1293 dump_thread_list(int argc, char **argv)
1294 {
1295 	Thread *thread;
1296 	struct hash_iterator i;
1297 	bool realTimeOnly = false;
1298 	bool calling = false;
1299 	const char *callSymbol = NULL;
1300 	addr_t callStart = 0;
1301 	addr_t callEnd = 0;
1302 	int32 requiredState = 0;
1303 	team_id team = -1;
1304 	sem_id sem = -1;
1305 
1306 	if (!strcmp(argv[0], "realtime"))
1307 		realTimeOnly = true;
1308 	else if (!strcmp(argv[0], "ready"))
1309 		requiredState = B_THREAD_READY;
1310 	else if (!strcmp(argv[0], "running"))
1311 		requiredState = B_THREAD_RUNNING;
1312 	else if (!strcmp(argv[0], "waiting")) {
1313 		requiredState = B_THREAD_WAITING;
1314 
1315 		if (argc > 1) {
1316 			sem = strtoul(argv[1], NULL, 0);
1317 			if (sem == 0)
1318 				kprintf("ignoring invalid semaphore argument.\n");
1319 		}
1320 	} else if (!strcmp(argv[0], "calling")) {
1321 		if (argc < 2) {
1322 			kprintf("Need to give a symbol name or start and end arguments.\n");
1323 			return 0;
1324 		} else if (argc == 3) {
1325 			callStart = parse_expression(argv[1]);
1326 			callEnd = parse_expression(argv[2]);
1327 		} else
1328 			callSymbol = argv[1];
1329 
1330 		calling = true;
1331 	} else if (argc > 1) {
1332 		team = strtoul(argv[1], NULL, 0);
1333 		if (team == 0)
1334 			kprintf("ignoring invalid team argument.\n");
1335 	}
1336 
1337 	print_thread_list_table_head();
1338 
1339 	hash_open(sThreadHash, &i);
1340 	while ((thread = (Thread*)hash_next(sThreadHash, &i)) != NULL) {
1341 		// filter out threads not matching the search criteria
1342 		if ((requiredState && thread->state != requiredState)
1343 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1344 					callStart, callEnd))
1345 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1346 			|| (team > 0 && thread->team->id != team)
1347 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1348 			continue;
1349 
1350 		_dump_thread_info(thread, true);
1351 	}
1352 	hash_close(sThreadHash, &i, false);
1353 	return 0;
1354 }
1355 
1356 
1357 //	#pragma mark - private kernel API
1358 
1359 
1360 void
1361 thread_exit(void)
1362 {
1363 	cpu_status state;
1364 	Thread *thread = thread_get_current_thread();
1365 	Team *team = thread->team;
1366 	thread_id parentID = -1;
1367 	status_t status;
1368 	struct thread_debug_info debugInfo;
1369 	team_id teamID = team->id;
1370 
1371 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1372 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1373 			? "due to signal" : "normally", thread->exit.status));
1374 
1375 	if (!are_interrupts_enabled())
1376 		panic("thread_exit() called with interrupts disabled!\n");
1377 
1378 	// boost our priority to get this over with
1379 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1380 
1381 	// Cancel previously installed alarm timer, if any
1382 	cancel_timer(&thread->alarm);
1383 
1384 	// remember the user stack area -- we will delete it below
1385 	area_id userStackArea = -1;
1386 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1387 		userStackArea = thread->user_stack_area;
1388 		thread->user_stack_area = -1;
1389 	}
1390 
1391 	struct job_control_entry *death = NULL;
1392 	struct death_entry* threadDeathEntry = NULL;
1393 	bool deleteTeam = false;
1394 	port_id debuggerPort = -1;
1395 
1396 	if (team != team_get_kernel_team()) {
1397 		user_debug_thread_exiting(thread);
1398 
1399 		if (team->main_thread == thread) {
1400 			// The main thread is exiting. Shut down the whole team.
1401 			deleteTeam = true;
1402 		} else {
1403 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1404 			team_free_user_thread(thread);
1405 		}
1406 
1407 		// remove this thread from the current team and add it to the kernel
1408 		// put the thread into the kernel team until it dies
1409 		state = disable_interrupts();
1410 		GRAB_TEAM_LOCK();
1411 
1412 		if (deleteTeam)
1413 			debuggerPort = team_shutdown_team(team, state);
1414 
1415 		GRAB_THREAD_LOCK();
1416 			// removing the thread and putting its death entry to the parent
1417 			// team needs to be an atomic operation
1418 
1419 		// remember how long this thread lasted
1420 		team->dead_threads_kernel_time += thread->kernel_time;
1421 		team->dead_threads_user_time += thread->user_time;
1422 
1423 		remove_thread_from_team(team, thread);
1424 		insert_thread_into_team(team_get_kernel_team(), thread);
1425 
1426 		if (team->death_entry != NULL) {
1427 			if (--team->death_entry->remaining_threads == 0)
1428 				team->death_entry->condition.NotifyOne(true, B_OK);
1429 		}
1430 
1431 		if (deleteTeam) {
1432 			Team *parent = team->parent;
1433 
1434 			// remember who our parent was so we can send a signal
1435 			parentID = parent->id;
1436 
1437 			// Set the team job control state to "dead" and detach the job
1438 			// control entry from our team struct.
1439 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1440 			death = team->job_control_entry;
1441 			team->job_control_entry = NULL;
1442 
1443 			if (death != NULL) {
1444 				death->InitDeadState();
1445 
1446 				// team_set_job_control_state() already moved our entry
1447 				// into the parent's list. We just check the soft limit of
1448 				// death entries.
1449 				if (parent->dead_children.count > MAX_DEAD_CHILDREN) {
1450 					death = parent->dead_children.entries.RemoveHead();
1451 					parent->dead_children.count--;
1452 				} else
1453 					death = NULL;
1454 
1455 				RELEASE_THREAD_LOCK();
1456 			} else
1457 				RELEASE_THREAD_LOCK();
1458 
1459 			team_remove_team(team);
1460 
1461 			send_signal_etc(parentID, SIGCHLD,
1462 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1463 		} else {
1464 			// The thread is not the main thread. We store a thread death
1465 			// entry for it, unless someone is already waiting it.
1466 			if (threadDeathEntry != NULL
1467 				&& list_is_empty(&thread->exit.waiters)) {
1468 				threadDeathEntry->thread = thread->id;
1469 				threadDeathEntry->status = thread->exit.status;
1470 				threadDeathEntry->reason = thread->exit.reason;
1471 				threadDeathEntry->signal = thread->exit.signal;
1472 
1473 				// add entry -- remove and old one, if we hit the limit
1474 				list_add_item(&team->dead_threads, threadDeathEntry);
1475 				team->dead_threads_count++;
1476 				threadDeathEntry = NULL;
1477 
1478 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1479 					threadDeathEntry = (death_entry*)list_remove_head_item(
1480 						&team->dead_threads);
1481 					team->dead_threads_count--;
1482 				}
1483 			}
1484 
1485 			RELEASE_THREAD_LOCK();
1486 		}
1487 
1488 		RELEASE_TEAM_LOCK();
1489 
1490 		// swap address spaces, to make sure we're running on the kernel's pgdir
1491 		vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
1492 		restore_interrupts(state);
1493 
1494 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1495 	}
1496 
1497 	free(threadDeathEntry);
1498 
1499 	// delete the team if we're its main thread
1500 	if (deleteTeam) {
1501 		team_delete_team(team, debuggerPort);
1502 
1503 		// we need to delete any death entry that made it to here
1504 		delete death;
1505 	}
1506 
1507 	state = disable_interrupts();
1508 	GRAB_THREAD_LOCK();
1509 
1510 	// remove thread from hash, so it's no longer accessible
1511 	hash_remove(sThreadHash, thread);
1512 	sUsedThreads--;
1513 
1514 	// Stop debugging for this thread
1515 	debugInfo = thread->debug_info;
1516 	clear_thread_debug_info(&thread->debug_info, true);
1517 
1518 	// Remove the select infos. We notify them a little later.
1519 	select_info* selectInfos = thread->select_infos;
1520 	thread->select_infos = NULL;
1521 
1522 	RELEASE_THREAD_LOCK();
1523 	restore_interrupts(state);
1524 
1525 	destroy_thread_debug_info(&debugInfo);
1526 
1527 	// notify select infos
1528 	select_info* info = selectInfos;
1529 	while (info != NULL) {
1530 		select_sync* sync = info->sync;
1531 
1532 		notify_select_events(info, B_EVENT_INVALID);
1533 		info = info->next;
1534 		put_select_sync(sync);
1535 	}
1536 
1537 	// notify listeners
1538 	sNotificationService.Notify(THREAD_REMOVED, thread);
1539 
1540 	// shutdown the thread messaging
1541 
1542 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1543 	if (status == B_WOULD_BLOCK) {
1544 		// there is data waiting for us, so let us eat it
1545 		thread_id sender;
1546 
1547 		delete_sem(thread->msg.write_sem);
1548 			// first, let's remove all possibly waiting writers
1549 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1550 	} else {
1551 		// we probably own the semaphore here, and we're the last to do so
1552 		delete_sem(thread->msg.write_sem);
1553 	}
1554 	// now we can safely remove the msg.read_sem
1555 	delete_sem(thread->msg.read_sem);
1556 
1557 	// fill all death entries and delete the sem that others will use to wait on us
1558 	{
1559 		sem_id cachedExitSem = thread->exit.sem;
1560 		cpu_status state;
1561 
1562 		state = disable_interrupts();
1563 		GRAB_THREAD_LOCK();
1564 
1565 		// make sure no one will grab this semaphore again
1566 		thread->exit.sem = -1;
1567 
1568 		// fill all death entries
1569 		death_entry* entry = NULL;
1570 		while ((entry = (struct death_entry*)list_get_next_item(
1571 				&thread->exit.waiters, entry)) != NULL) {
1572 			entry->status = thread->exit.status;
1573 			entry->reason = thread->exit.reason;
1574 			entry->signal = thread->exit.signal;
1575 		}
1576 
1577 		RELEASE_THREAD_LOCK();
1578 		restore_interrupts(state);
1579 
1580 		delete_sem(cachedExitSem);
1581 	}
1582 
1583 	// delete the user stack, if this was a user thread
1584 	if (!deleteTeam && userStackArea >= 0) {
1585 		// We postponed deleting the user stack until now, since this way all
1586 		// notifications for the thread's death are out already and all other
1587 		// threads waiting for this thread's death and some object on its stack
1588 		// will wake up before we (try to) delete the stack area. Of most
1589 		// relevance is probably the case where this is the main thread and
1590 		// other threads use objects on its stack -- so we want them terminated
1591 		// first.
1592 		// When the team is deleted, all areas are deleted anyway, so we don't
1593 		// need to do that explicitly in that case.
1594 		vm_delete_area(teamID, userStackArea, true);
1595 	}
1596 
1597 	// notify the debugger
1598 	if (teamID != team_get_kernel_team_id())
1599 		user_debug_thread_deleted(teamID, thread->id);
1600 
1601 	// enqueue in the undertaker list and reschedule for the last time
1602 	UndertakerEntry undertakerEntry(thread, teamID);
1603 
1604 	disable_interrupts();
1605 	GRAB_THREAD_LOCK();
1606 
1607 	sUndertakerEntries.Add(&undertakerEntry);
1608 	sUndertakerCondition.NotifyOne(true);
1609 
1610 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1611 	scheduler_reschedule();
1612 
1613 	panic("never can get here\n");
1614 }
1615 
1616 
1617 Thread *
1618 thread_get_thread_struct(thread_id id)
1619 {
1620 	Thread *thread;
1621 	cpu_status state;
1622 
1623 	state = disable_interrupts();
1624 	GRAB_THREAD_LOCK();
1625 
1626 	thread = thread_get_thread_struct_locked(id);
1627 
1628 	RELEASE_THREAD_LOCK();
1629 	restore_interrupts(state);
1630 
1631 	return thread;
1632 }
1633 
1634 
1635 Thread *
1636 thread_get_thread_struct_locked(thread_id id)
1637 {
1638 	struct thread_key key;
1639 
1640 	key.id = id;
1641 
1642 	return (Thread*)hash_lookup(sThreadHash, &key);
1643 }
1644 
1645 
1646 /*!	Called in the interrupt handler code when a thread enters
1647 	the kernel for any reason.
1648 	Only tracks time for now.
1649 	Interrupts are disabled.
1650 */
1651 void
1652 thread_at_kernel_entry(bigtime_t now)
1653 {
1654 	Thread *thread = thread_get_current_thread();
1655 
1656 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1657 
1658 	// track user time
1659 	thread->user_time += now - thread->last_time;
1660 	thread->last_time = now;
1661 
1662 	thread->in_kernel = true;
1663 }
1664 
1665 
1666 /*!	Called whenever a thread exits kernel space to user space.
1667 	Tracks time, handles signals, ...
1668 	Interrupts must be enabled. When the function returns, interrupts will be
1669 	disabled.
1670 */
1671 void
1672 thread_at_kernel_exit(void)
1673 {
1674 	Thread *thread = thread_get_current_thread();
1675 
1676 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1677 
1678 	while (handle_signals(thread)) {
1679 		InterruptsSpinLocker _(gThreadSpinlock);
1680 		scheduler_reschedule();
1681 	}
1682 
1683 	disable_interrupts();
1684 
1685 	thread->in_kernel = false;
1686 
1687 	// track kernel time
1688 	bigtime_t now = system_time();
1689 	thread->kernel_time += now - thread->last_time;
1690 	thread->last_time = now;
1691 }
1692 
1693 
1694 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1695 	and no debugging shall be done.
1696 	Interrupts must be disabled.
1697 */
1698 void
1699 thread_at_kernel_exit_no_signals(void)
1700 {
1701 	Thread *thread = thread_get_current_thread();
1702 
1703 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1704 
1705 	thread->in_kernel = false;
1706 
1707 	// track kernel time
1708 	bigtime_t now = system_time();
1709 	thread->kernel_time += now - thread->last_time;
1710 	thread->last_time = now;
1711 }
1712 
1713 
1714 void
1715 thread_reset_for_exec(void)
1716 {
1717 	Thread *thread = thread_get_current_thread();
1718 
1719 	reset_signals(thread);
1720 
1721 	// Note: We don't cancel an alarm. It is supposed to survive exec*().
1722 }
1723 
1724 
1725 /*! Insert a thread to the tail of a queue */
1726 void
1727 thread_enqueue(Thread *thread, struct thread_queue *queue)
1728 {
1729 	thread->queue_next = NULL;
1730 	if (queue->head == NULL) {
1731 		queue->head = thread;
1732 		queue->tail = thread;
1733 	} else {
1734 		queue->tail->queue_next = thread;
1735 		queue->tail = thread;
1736 	}
1737 }
1738 
1739 
1740 Thread *
1741 thread_lookat_queue(struct thread_queue *queue)
1742 {
1743 	return queue->head;
1744 }
1745 
1746 
1747 Thread *
1748 thread_dequeue(struct thread_queue *queue)
1749 {
1750 	Thread *thread = queue->head;
1751 
1752 	if (thread != NULL) {
1753 		queue->head = thread->queue_next;
1754 		if (queue->tail == thread)
1755 			queue->tail = NULL;
1756 	}
1757 	return thread;
1758 }
1759 
1760 
1761 Thread *
1762 thread_dequeue_id(struct thread_queue *q, thread_id id)
1763 {
1764 	Thread *thread;
1765 	Thread *last = NULL;
1766 
1767 	thread = q->head;
1768 	while (thread != NULL) {
1769 		if (thread->id == id) {
1770 			if (last == NULL)
1771 				q->head = thread->queue_next;
1772 			else
1773 				last->queue_next = thread->queue_next;
1774 
1775 			if (q->tail == thread)
1776 				q->tail = last;
1777 			break;
1778 		}
1779 		last = thread;
1780 		thread = thread->queue_next;
1781 	}
1782 	return thread;
1783 }
1784 
1785 
1786 Thread*
1787 thread_iterate_through_threads(thread_iterator_callback callback, void* cookie)
1788 {
1789 	struct hash_iterator iterator;
1790 	hash_open(sThreadHash, &iterator);
1791 
1792 	Thread* thread;
1793 	while ((thread = (Thread*)hash_next(sThreadHash, &iterator))
1794 			!= NULL) {
1795 		if (callback(thread, cookie))
1796 			break;
1797 	}
1798 
1799 	hash_close(sThreadHash, &iterator, false);
1800 
1801 	return thread;
1802 }
1803 
1804 
1805 thread_id
1806 allocate_thread_id(void)
1807 {
1808 	return atomic_add(&sNextThreadID, 1);
1809 }
1810 
1811 
1812 thread_id
1813 peek_next_thread_id(void)
1814 {
1815 	return atomic_get(&sNextThreadID);
1816 }
1817 
1818 
1819 /*!	Yield the CPU to other threads.
1820 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1821 	If \c false, it will continue to run, if there's no other thread in ready
1822 	state, and if it has a higher priority than the other ready threads, it
1823 	still has a good chance to continue.
1824 */
1825 void
1826 thread_yield(bool force)
1827 {
1828 	if (force) {
1829 		// snooze for roughly 3 thread quantums
1830 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1831 #if 0
1832 		cpu_status state;
1833 
1834 		Thread *thread = thread_get_current_thread();
1835 		if (thread == NULL)
1836 			return;
1837 
1838 		state = disable_interrupts();
1839 		GRAB_THREAD_LOCK();
1840 
1841 		// mark the thread as yielded, so it will not be scheduled next
1842 		//thread->was_yielded = true;
1843 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1844 		scheduler_reschedule();
1845 
1846 		RELEASE_THREAD_LOCK();
1847 		restore_interrupts(state);
1848 #endif
1849 	} else {
1850 		Thread *thread = thread_get_current_thread();
1851 		if (thread == NULL)
1852 			return;
1853 
1854 		// Don't force the thread off the CPU, just reschedule.
1855 		InterruptsSpinLocker _(gThreadSpinlock);
1856 		scheduler_reschedule();
1857 	}
1858 }
1859 
1860 
1861 /*!	Kernel private thread creation function.
1862 
1863 	\param threadID The ID to be assigned to the new thread. If
1864 		  \code < 0 \endcode a fresh one is allocated.
1865 */
1866 thread_id
1867 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1868 	void *arg, team_id team, thread_id threadID)
1869 {
1870 	thread_creation_attributes attributes;
1871 	attributes.entry = (thread_entry_func)function;
1872 	attributes.name = name;
1873 	attributes.priority = priority;
1874 	attributes.args1 = arg;
1875 	attributes.args2 = NULL;
1876 	attributes.stack_address = NULL;
1877 	attributes.stack_size = 0;
1878 	attributes.team = team;
1879 	attributes.thread = threadID;
1880 
1881 	return create_thread(attributes, true);
1882 }
1883 
1884 
1885 status_t
1886 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1887 	status_t *_returnCode)
1888 {
1889 	sem_id exitSem = B_BAD_THREAD_ID;
1890 	struct death_entry death;
1891 	job_control_entry* freeDeath = NULL;
1892 	Thread *thread;
1893 	cpu_status state;
1894 	status_t status = B_OK;
1895 
1896 	if (id < B_OK)
1897 		return B_BAD_THREAD_ID;
1898 
1899 	// we need to resume the thread we're waiting for first
1900 
1901 	state = disable_interrupts();
1902 	GRAB_THREAD_LOCK();
1903 
1904 	thread = thread_get_thread_struct_locked(id);
1905 	if (thread != NULL) {
1906 		// remember the semaphore we have to wait on and place our death entry
1907 		exitSem = thread->exit.sem;
1908 		list_add_link_to_head(&thread->exit.waiters, &death);
1909 	}
1910 
1911 	death_entry* threadDeathEntry = NULL;
1912 
1913 	RELEASE_THREAD_LOCK();
1914 
1915 	if (thread == NULL) {
1916 		// we couldn't find this thread - maybe it's already gone, and we'll
1917 		// find its death entry in our team
1918 		GRAB_TEAM_LOCK();
1919 
1920 		Team* team = thread_get_current_thread()->team;
1921 
1922 		// check the child death entries first (i.e. main threads of child
1923 		// teams)
1924 		bool deleteEntry;
1925 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1926 		if (freeDeath != NULL) {
1927 			death.status = freeDeath->status;
1928 			if (!deleteEntry)
1929 				freeDeath = NULL;
1930 		} else {
1931 			// check the thread death entries of the team (non-main threads)
1932 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1933 					&team->dead_threads, threadDeathEntry)) != NULL) {
1934 				if (threadDeathEntry->thread == id) {
1935 					list_remove_item(&team->dead_threads, threadDeathEntry);
1936 					team->dead_threads_count--;
1937 					death.status = threadDeathEntry->status;
1938 					break;
1939 				}
1940 			}
1941 
1942 			if (threadDeathEntry == NULL)
1943 				status = B_BAD_THREAD_ID;
1944 		}
1945 
1946 		RELEASE_TEAM_LOCK();
1947 	}
1948 
1949 	restore_interrupts(state);
1950 
1951 	if (thread == NULL && status == B_OK) {
1952 		// we found the thread's death entry in our team
1953 		if (_returnCode)
1954 			*_returnCode = death.status;
1955 
1956 		delete freeDeath;
1957 		free(threadDeathEntry);
1958 		return B_OK;
1959 	}
1960 
1961 	// we need to wait for the death of the thread
1962 
1963 	if (exitSem < B_OK)
1964 		return B_BAD_THREAD_ID;
1965 
1966 	resume_thread(id);
1967 		// make sure we don't wait forever on a suspended thread
1968 
1969 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1970 
1971 	if (status == B_OK) {
1972 		// this should never happen as the thread deletes the semaphore on exit
1973 		panic("could acquire exit_sem for thread %ld\n", id);
1974 	} else if (status == B_BAD_SEM_ID) {
1975 		// this is the way the thread normally exits
1976 		status = B_OK;
1977 
1978 		if (_returnCode)
1979 			*_returnCode = death.status;
1980 	} else {
1981 		// We were probably interrupted; we need to remove our death entry now.
1982 		state = disable_interrupts();
1983 		GRAB_THREAD_LOCK();
1984 
1985 		thread = thread_get_thread_struct_locked(id);
1986 		if (thread != NULL)
1987 			list_remove_link(&death);
1988 
1989 		RELEASE_THREAD_LOCK();
1990 		restore_interrupts(state);
1991 
1992 		// If the thread is already gone, we need to wait for its exit semaphore
1993 		// to make sure our death entry stays valid - it won't take long
1994 		if (thread == NULL)
1995 			acquire_sem(exitSem);
1996 	}
1997 
1998 	return status;
1999 }
2000 
2001 
2002 status_t
2003 select_thread(int32 id, struct select_info* info, bool kernel)
2004 {
2005 	InterruptsSpinLocker locker(gThreadSpinlock);
2006 
2007 	// get thread
2008 	Thread* thread = thread_get_thread_struct_locked(id);
2009 	if (thread == NULL)
2010 		return B_BAD_THREAD_ID;
2011 
2012 	// We support only B_EVENT_INVALID at the moment.
2013 	info->selected_events &= B_EVENT_INVALID;
2014 
2015 	// add info to list
2016 	if (info->selected_events != 0) {
2017 		info->next = thread->select_infos;
2018 		thread->select_infos = info;
2019 
2020 		// we need a sync reference
2021 		atomic_add(&info->sync->ref_count, 1);
2022 	}
2023 
2024 	return B_OK;
2025 }
2026 
2027 
2028 status_t
2029 deselect_thread(int32 id, struct select_info* info, bool kernel)
2030 {
2031 	InterruptsSpinLocker locker(gThreadSpinlock);
2032 
2033 	// get thread
2034 	Thread* thread = thread_get_thread_struct_locked(id);
2035 	if (thread == NULL)
2036 		return B_BAD_THREAD_ID;
2037 
2038 	// remove info from list
2039 	select_info** infoLocation = &thread->select_infos;
2040 	while (*infoLocation != NULL && *infoLocation != info)
2041 		infoLocation = &(*infoLocation)->next;
2042 
2043 	if (*infoLocation != info)
2044 		return B_OK;
2045 
2046 	*infoLocation = info->next;
2047 
2048 	locker.Unlock();
2049 
2050 	// surrender sync reference
2051 	put_select_sync(info->sync);
2052 
2053 	return B_OK;
2054 }
2055 
2056 
2057 int32
2058 thread_max_threads(void)
2059 {
2060 	return sMaxThreads;
2061 }
2062 
2063 
2064 int32
2065 thread_used_threads(void)
2066 {
2067 	return sUsedThreads;
2068 }
2069 
2070 
2071 const char*
2072 thread_state_to_text(Thread* thread, int32 state)
2073 {
2074 	return state_to_text(thread, state);
2075 }
2076 
2077 
2078 int32
2079 thread_get_io_priority(thread_id id)
2080 {
2081 	// take a shortcut, if it is the current thread
2082 	Thread* thread = thread_get_current_thread();
2083 	int32 priority;
2084 	if (id == thread->id) {
2085 		int32 priority = thread->io_priority;
2086 		return priority < 0 ? thread->priority : priority;
2087 	}
2088 
2089 	// not the current thread -- get it
2090 	InterruptsSpinLocker locker(gThreadSpinlock);
2091 
2092 	thread = thread_get_thread_struct_locked(id);
2093 	if (thread == NULL)
2094 		return B_BAD_THREAD_ID;
2095 
2096 	priority = thread->io_priority;
2097 	return priority < 0 ? thread->priority : priority;
2098 }
2099 
2100 
2101 void
2102 thread_set_io_priority(int32 priority)
2103 {
2104 	Thread* thread = thread_get_current_thread();
2105 	thread->io_priority = priority;
2106 }
2107 
2108 
2109 status_t
2110 thread_init(kernel_args *args)
2111 {
2112 	uint32 i;
2113 
2114 	TRACE(("thread_init: entry\n"));
2115 
2116 	// create the thread hash table
2117 	sThreadHash = hash_init(15, offsetof(Thread, all_next),
2118 		&thread_struct_compare, &thread_struct_hash);
2119 
2120 	// create the thread structure object cache
2121 	sThreadCache = create_object_cache("threads", sizeof(Thread), 16, NULL,
2122 		NULL, NULL);
2123 		// Note: The x86 port requires 16 byte alignment of thread structures.
2124 	if (sThreadCache == NULL)
2125 		panic("thread_init(): failed to allocate thread object cache!");
2126 
2127 	if (arch_thread_init(args) < B_OK)
2128 		panic("arch_thread_init() failed!\n");
2129 
2130 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2131 	sNextThreadID = B_SYSTEM_TEAM + 1;
2132 
2133 	// create an idle thread for each cpu
2134 
2135 	for (i = 0; i < args->num_cpus; i++) {
2136 		Thread *thread;
2137 		area_info info;
2138 		char name[64];
2139 
2140 		sprintf(name, "idle thread %lu", i + 1);
2141 		thread = create_thread_struct(&sIdleThreads[i], name,
2142 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2143 		if (thread == NULL) {
2144 			panic("error creating idle thread struct\n");
2145 			return B_NO_MEMORY;
2146 		}
2147 
2148 		gCPU[i].running_thread = thread;
2149 
2150 		thread->team = team_get_kernel_team();
2151 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2152 		thread->state = B_THREAD_RUNNING;
2153 		thread->next_state = B_THREAD_READY;
2154 		sprintf(name, "idle thread %lu kstack", i + 1);
2155 		thread->kernel_stack_area = find_area(name);
2156 		thread->entry = NULL;
2157 
2158 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2159 			panic("error finding idle kstack area\n");
2160 
2161 		thread->kernel_stack_base = (addr_t)info.address;
2162 		thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2163 
2164 		hash_insert(sThreadHash, thread);
2165 		insert_thread_into_team(thread->team, thread);
2166 	}
2167 	sUsedThreads = args->num_cpus;
2168 
2169 	// init the notification service
2170 	new(&sNotificationService) ThreadNotificationService();
2171 
2172 	// start the undertaker thread
2173 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2174 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2175 
2176 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2177 		B_DISPLAY_PRIORITY, NULL);
2178 	if (undertakerThread < 0)
2179 		panic("Failed to create undertaker thread!");
2180 	resume_thread(undertakerThread);
2181 
2182 	// set up some debugger commands
2183 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2184 		"[ <team> ]\n"
2185 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2186 		"all threads of the specified team.\n"
2187 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2188 	add_debugger_command_etc("ready", &dump_thread_list,
2189 		"List all ready threads",
2190 		"\n"
2191 		"Prints a list of all threads in ready state.\n", 0);
2192 	add_debugger_command_etc("running", &dump_thread_list,
2193 		"List all running threads",
2194 		"\n"
2195 		"Prints a list of all threads in running state.\n", 0);
2196 	add_debugger_command_etc("waiting", &dump_thread_list,
2197 		"List all waiting threads (optionally for a specific semaphore)",
2198 		"[ <sem> ]\n"
2199 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2200 		"specified, only the threads waiting on that semaphore are listed.\n"
2201 		"  <sem>  - ID of the semaphore.\n", 0);
2202 	add_debugger_command_etc("realtime", &dump_thread_list,
2203 		"List all realtime threads",
2204 		"\n"
2205 		"Prints a list of all threads with realtime priority.\n", 0);
2206 	add_debugger_command_etc("thread", &dump_thread_info,
2207 		"Dump info about a particular thread",
2208 		"[ -s ] ( <id> | <address> | <name> )*\n"
2209 		"Prints information about the specified thread. If no argument is\n"
2210 		"given the current thread is selected.\n"
2211 		"  -s         - Print info in compact table form (like \"threads\").\n"
2212 		"  <id>       - The ID of the thread.\n"
2213 		"  <address>  - The address of the thread structure.\n"
2214 		"  <name>     - The thread's name.\n", 0);
2215 	add_debugger_command_etc("calling", &dump_thread_list,
2216 		"Show all threads that have a specific address in their call chain",
2217 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2218 	add_debugger_command_etc("unreal", &make_thread_unreal,
2219 		"Set realtime priority threads to normal priority",
2220 		"[ <id> ]\n"
2221 		"Sets the priority of all realtime threads or, if given, the one\n"
2222 		"with the specified ID to \"normal\" priority.\n"
2223 		"  <id>  - The ID of the thread.\n", 0);
2224 	add_debugger_command_etc("suspend", &make_thread_suspended,
2225 		"Suspend a thread",
2226 		"[ <id> ]\n"
2227 		"Suspends the thread with the given ID. If no ID argument is given\n"
2228 		"the current thread is selected.\n"
2229 		"  <id>  - The ID of the thread.\n", 0);
2230 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2231 		"<id>\n"
2232 		"Resumes the specified thread, if it is currently suspended.\n"
2233 		"  <id>  - The ID of the thread.\n", 0);
2234 	add_debugger_command_etc("drop", &drop_into_debugger,
2235 		"Drop a thread into the userland debugger",
2236 		"<id>\n"
2237 		"Drops the specified (userland) thread into the userland debugger\n"
2238 		"after leaving the kernel debugger.\n"
2239 		"  <id>  - The ID of the thread.\n", 0);
2240 	add_debugger_command_etc("priority", &set_thread_prio,
2241 		"Set a thread's priority",
2242 		"<priority> [ <id> ]\n"
2243 		"Sets the priority of the thread with the specified ID to the given\n"
2244 		"priority. If no thread ID is given, the current thread is selected.\n"
2245 		"  <priority>  - The thread's new priority (0 - 120)\n"
2246 		"  <id>        - The ID of the thread.\n", 0);
2247 
2248 	return B_OK;
2249 }
2250 
2251 
2252 status_t
2253 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2254 {
2255 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2256 	// so that get_current_cpu and friends will work, which is crucial for
2257 	// a lot of low level routines
2258 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2259 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2260 	return B_OK;
2261 }
2262 
2263 
2264 //	#pragma mark - thread blocking API
2265 
2266 
2267 static status_t
2268 thread_block_timeout(timer* timer)
2269 {
2270 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2271 	// we're holding the thread lock already. This makes things comfortably
2272 	// easy.
2273 
2274 	Thread* thread = (Thread*)timer->user_data;
2275 	thread_unblock_locked(thread, B_TIMED_OUT);
2276 
2277 	return B_HANDLED_INTERRUPT;
2278 }
2279 
2280 
2281 status_t
2282 thread_block()
2283 {
2284 	InterruptsSpinLocker _(gThreadSpinlock);
2285 	return thread_block_locked(thread_get_current_thread());
2286 }
2287 
2288 
2289 void
2290 thread_unblock(status_t threadID, status_t status)
2291 {
2292 	InterruptsSpinLocker _(gThreadSpinlock);
2293 
2294 	Thread* thread = thread_get_thread_struct_locked(threadID);
2295 	if (thread != NULL)
2296 		thread_unblock_locked(thread, status);
2297 }
2298 
2299 
2300 status_t
2301 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2302 {
2303 	InterruptsSpinLocker _(gThreadSpinlock);
2304 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2305 }
2306 
2307 
2308 status_t
2309 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2310 {
2311 	Thread* thread = thread_get_current_thread();
2312 
2313 	if (thread->wait.status != 1)
2314 		return thread->wait.status;
2315 
2316 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2317 		&& timeout != B_INFINITE_TIMEOUT;
2318 
2319 	if (useTimer) {
2320 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2321 		// avoids nasty race conditions and deadlock problems that could
2322 		// otherwise occur between our cancel_timer() and a concurrently
2323 		// executing thread_block_timeout().
2324 		uint32 timerFlags;
2325 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2326 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2327 		} else {
2328 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2329 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2330 				timeout -= rtc_boot_time();
2331 		}
2332 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2333 
2334 		// install the timer
2335 		thread->wait.unblock_timer.user_data = thread;
2336 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2337 			timerFlags);
2338 	}
2339 
2340 	// block
2341 	status_t error = thread_block_locked(thread);
2342 
2343 	// cancel timer, if it didn't fire
2344 	if (error != B_TIMED_OUT && useTimer)
2345 		cancel_timer(&thread->wait.unblock_timer);
2346 
2347 	return error;
2348 }
2349 
2350 
2351 /*!	Thread spinlock must be held.
2352 */
2353 static status_t
2354 user_unblock_thread(thread_id threadID, status_t status)
2355 {
2356 	Thread* thread = thread_get_thread_struct_locked(threadID);
2357 	if (thread == NULL)
2358 		return B_BAD_THREAD_ID;
2359 	if (thread->user_thread == NULL)
2360 		return B_NOT_ALLOWED;
2361 
2362 	if (thread->user_thread->wait_status > 0) {
2363 		thread->user_thread->wait_status = status;
2364 		thread_unblock_locked(thread, status);
2365 	}
2366 
2367 	return B_OK;
2368 }
2369 
2370 
2371 //	#pragma mark - public kernel API
2372 
2373 
2374 void
2375 exit_thread(status_t returnValue)
2376 {
2377 	Thread *thread = thread_get_current_thread();
2378 
2379 	thread->exit.status = returnValue;
2380 	thread->exit.reason = THREAD_RETURN_EXIT;
2381 
2382 	// if called from a kernel thread, we don't deliver the signal,
2383 	// we just exit directly to keep the user space behaviour of
2384 	// this function
2385 	if (thread->team != team_get_kernel_team())
2386 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2387 	else
2388 		thread_exit();
2389 }
2390 
2391 
2392 status_t
2393 kill_thread(thread_id id)
2394 {
2395 	if (id <= 0)
2396 		return B_BAD_VALUE;
2397 
2398 	return send_signal(id, SIGKILLTHR);
2399 }
2400 
2401 
2402 status_t
2403 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2404 {
2405 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2406 }
2407 
2408 
2409 int32
2410 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2411 {
2412 	return receive_data_etc(sender, buffer, bufferSize, 0);
2413 }
2414 
2415 
2416 bool
2417 has_data(thread_id thread)
2418 {
2419 	int32 count;
2420 
2421 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2422 			&count) != B_OK)
2423 		return false;
2424 
2425 	return count == 0 ? false : true;
2426 }
2427 
2428 
2429 status_t
2430 _get_thread_info(thread_id id, thread_info *info, size_t size)
2431 {
2432 	status_t status = B_OK;
2433 	Thread *thread;
2434 	cpu_status state;
2435 
2436 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2437 		return B_BAD_VALUE;
2438 
2439 	state = disable_interrupts();
2440 	GRAB_THREAD_LOCK();
2441 
2442 	thread = thread_get_thread_struct_locked(id);
2443 	if (thread == NULL) {
2444 		status = B_BAD_VALUE;
2445 		goto err;
2446 	}
2447 
2448 	fill_thread_info(thread, info, size);
2449 
2450 err:
2451 	RELEASE_THREAD_LOCK();
2452 	restore_interrupts(state);
2453 
2454 	return status;
2455 }
2456 
2457 
2458 status_t
2459 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
2460 	size_t size)
2461 {
2462 	if (info == NULL || size != sizeof(thread_info) || teamID < 0)
2463 		return B_BAD_VALUE;
2464 
2465 	int32 lastID = *_cookie;
2466 
2467 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
2468 
2469 	Team* team;
2470 	if (teamID == B_CURRENT_TEAM)
2471 		team = thread_get_current_thread()->team;
2472 	else
2473 		team = team_get_team_struct_locked(teamID);
2474 
2475 	if (team == NULL)
2476 		return B_BAD_VALUE;
2477 
2478 	Thread* thread = NULL;
2479 
2480 	if (lastID == 0) {
2481 		// We start with the main thread
2482 		thread = team->main_thread;
2483 	} else {
2484 		// Find the one thread with an ID higher than ours
2485 		// (as long as the IDs don't overlap they are always sorted from
2486 		// highest to lowest).
2487 		for (Thread* next = team->thread_list; next != NULL;
2488 				next = next->team_next) {
2489 			if (next->id <= lastID)
2490 				break;
2491 
2492 			thread = next;
2493 		}
2494 	}
2495 
2496 	if (thread == NULL)
2497 		return B_BAD_VALUE;
2498 
2499 	lastID = thread->id;
2500 	*_cookie = lastID;
2501 
2502 	SpinLocker threadLocker(gThreadSpinlock);
2503 	fill_thread_info(thread, info, size);
2504 
2505 	return B_OK;
2506 }
2507 
2508 
2509 thread_id
2510 find_thread(const char *name)
2511 {
2512 	struct hash_iterator iterator;
2513 	Thread *thread;
2514 	cpu_status state;
2515 
2516 	if (name == NULL)
2517 		return thread_get_current_thread_id();
2518 
2519 	state = disable_interrupts();
2520 	GRAB_THREAD_LOCK();
2521 
2522 	// ToDo: this might not be in the same order as find_thread() in BeOS
2523 	//		which could be theoretically problematic.
2524 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2525 	//		cheap either - although this function is probably used very rarely.
2526 
2527 	hash_open(sThreadHash, &iterator);
2528 	while ((thread = (Thread*)hash_next(sThreadHash, &iterator))
2529 			!= NULL) {
2530 		// Search through hash
2531 		if (!strcmp(thread->name, name)) {
2532 			thread_id id = thread->id;
2533 
2534 			RELEASE_THREAD_LOCK();
2535 			restore_interrupts(state);
2536 			return id;
2537 		}
2538 	}
2539 
2540 	RELEASE_THREAD_LOCK();
2541 	restore_interrupts(state);
2542 
2543 	return B_NAME_NOT_FOUND;
2544 }
2545 
2546 
2547 status_t
2548 rename_thread(thread_id id, const char* name)
2549 {
2550 	Thread* thread = thread_get_current_thread();
2551 
2552 	if (name == NULL)
2553 		return B_BAD_VALUE;
2554 
2555 	InterruptsSpinLocker locker(gThreadSpinlock);
2556 
2557 	if (thread->id != id) {
2558 		thread = thread_get_thread_struct_locked(id);
2559 		if (thread == NULL)
2560 			return B_BAD_THREAD_ID;
2561 		if (thread->team != thread_get_current_thread()->team)
2562 			return B_NOT_ALLOWED;
2563 	}
2564 
2565 	strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2566 
2567 	team_id teamID = thread->team->id;
2568 
2569 	locker.Unlock();
2570 
2571 	// notify listeners
2572 	sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id);
2573 		// don't pass the thread structure, as it's unsafe, if it isn't ours
2574 
2575 	return B_OK;
2576 }
2577 
2578 
2579 status_t
2580 set_thread_priority(thread_id id, int32 priority)
2581 {
2582 	Thread *thread;
2583 	int32 oldPriority;
2584 
2585 	// make sure the passed in priority is within bounds
2586 	if (priority > THREAD_MAX_SET_PRIORITY)
2587 		priority = THREAD_MAX_SET_PRIORITY;
2588 	if (priority < THREAD_MIN_SET_PRIORITY)
2589 		priority = THREAD_MIN_SET_PRIORITY;
2590 
2591 	thread = thread_get_current_thread();
2592 	if (thread->id == id) {
2593 		if (thread_is_idle_thread(thread))
2594 			return B_NOT_ALLOWED;
2595 
2596 		// It's ourself, so we know we aren't in the run queue, and we can
2597 		// manipulate our structure directly
2598 		oldPriority = thread->priority;
2599 			// Note that this might not return the correct value if we are
2600 			// preempted here, and another thread changes our priority before
2601 			// the next line is executed.
2602 		thread->priority = thread->next_priority = priority;
2603 	} else {
2604 		InterruptsSpinLocker _(gThreadSpinlock);
2605 
2606 		thread = thread_get_thread_struct_locked(id);
2607 		if (thread == NULL)
2608 			return B_BAD_THREAD_ID;
2609 
2610 		if (thread_is_idle_thread(thread))
2611 			return B_NOT_ALLOWED;
2612 
2613 		oldPriority = thread->priority;
2614 		scheduler_set_thread_priority(thread, priority);
2615 	}
2616 
2617 	return oldPriority;
2618 }
2619 
2620 
2621 status_t
2622 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2623 {
2624 	status_t status;
2625 
2626 	if (timebase != B_SYSTEM_TIMEBASE)
2627 		return B_BAD_VALUE;
2628 
2629 	InterruptsSpinLocker _(gThreadSpinlock);
2630 	Thread* thread = thread_get_current_thread();
2631 
2632 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2633 	status = thread_block_with_timeout_locked(flags, timeout);
2634 
2635 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2636 		return B_OK;
2637 
2638 	return status;
2639 }
2640 
2641 
2642 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2643 status_t
2644 snooze(bigtime_t timeout)
2645 {
2646 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2647 }
2648 
2649 
2650 /*!	snooze_until() for internal kernel use only; doesn't interrupt on
2651 	signals.
2652 */
2653 status_t
2654 snooze_until(bigtime_t timeout, int timebase)
2655 {
2656 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2657 }
2658 
2659 
2660 status_t
2661 wait_for_thread(thread_id thread, status_t *_returnCode)
2662 {
2663 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2664 }
2665 
2666 
2667 status_t
2668 suspend_thread(thread_id id)
2669 {
2670 	if (id <= 0)
2671 		return B_BAD_VALUE;
2672 
2673 	return send_signal(id, SIGSTOP);
2674 }
2675 
2676 
2677 status_t
2678 resume_thread(thread_id id)
2679 {
2680 	if (id <= 0)
2681 		return B_BAD_VALUE;
2682 
2683 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2684 		// This retains compatibility to BeOS which documents the
2685 		// combination of suspend_thread() and resume_thread() to
2686 		// interrupt threads waiting on semaphores.
2687 }
2688 
2689 
2690 thread_id
2691 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2692 	void *arg)
2693 {
2694 	thread_creation_attributes attributes;
2695 	attributes.entry = (thread_entry_func)function;
2696 	attributes.name = name;
2697 	attributes.priority = priority;
2698 	attributes.args1 = arg;
2699 	attributes.args2 = NULL;
2700 	attributes.stack_address = NULL;
2701 	attributes.stack_size = 0;
2702 	attributes.team = team_get_kernel_team()->id;
2703 	attributes.thread = -1;
2704 
2705 	return create_thread(attributes, true);
2706 }
2707 
2708 
2709 int
2710 getrlimit(int resource, struct rlimit * rlp)
2711 {
2712 	status_t error = common_getrlimit(resource, rlp);
2713 	if (error != B_OK) {
2714 		errno = error;
2715 		return -1;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 
2722 int
2723 setrlimit(int resource, const struct rlimit * rlp)
2724 {
2725 	status_t error = common_setrlimit(resource, rlp);
2726 	if (error != B_OK) {
2727 		errno = error;
2728 		return -1;
2729 	}
2730 
2731 	return 0;
2732 }
2733 
2734 
2735 //	#pragma mark - syscalls
2736 
2737 
2738 void
2739 _user_exit_thread(status_t returnValue)
2740 {
2741 	exit_thread(returnValue);
2742 }
2743 
2744 
2745 status_t
2746 _user_kill_thread(thread_id thread)
2747 {
2748 	return kill_thread(thread);
2749 }
2750 
2751 
2752 status_t
2753 _user_resume_thread(thread_id thread)
2754 {
2755 	return resume_thread(thread);
2756 }
2757 
2758 
2759 status_t
2760 _user_suspend_thread(thread_id thread)
2761 {
2762 	return suspend_thread(thread);
2763 }
2764 
2765 
2766 status_t
2767 _user_rename_thread(thread_id thread, const char *userName)
2768 {
2769 	char name[B_OS_NAME_LENGTH];
2770 
2771 	if (!IS_USER_ADDRESS(userName)
2772 		|| userName == NULL
2773 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2774 		return B_BAD_ADDRESS;
2775 
2776 	return rename_thread(thread, name);
2777 }
2778 
2779 
2780 int32
2781 _user_set_thread_priority(thread_id thread, int32 newPriority)
2782 {
2783 	return set_thread_priority(thread, newPriority);
2784 }
2785 
2786 
2787 thread_id
2788 _user_spawn_thread(thread_creation_attributes* userAttributes)
2789 {
2790 	thread_creation_attributes attributes;
2791 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2792 		|| user_memcpy(&attributes, userAttributes,
2793 				sizeof(attributes)) != B_OK) {
2794 		return B_BAD_ADDRESS;
2795 	}
2796 
2797 	if (attributes.stack_size != 0
2798 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2799 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2800 		return B_BAD_VALUE;
2801 	}
2802 
2803 	char name[B_OS_NAME_LENGTH];
2804 	thread_id threadID;
2805 
2806 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2807 		|| (attributes.stack_address != NULL
2808 			&& !IS_USER_ADDRESS(attributes.stack_address))
2809 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2810 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2811 		return B_BAD_ADDRESS;
2812 
2813 	attributes.name = attributes.name != NULL ? name : "user thread";
2814 	attributes.team = thread_get_current_thread()->team->id;
2815 	attributes.thread = -1;
2816 
2817 	threadID = create_thread(attributes, false);
2818 
2819 	if (threadID >= 0)
2820 		user_debug_thread_created(threadID);
2821 
2822 	return threadID;
2823 }
2824 
2825 
2826 status_t
2827 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2828 {
2829 	// NOTE: We only know the system timebase at the moment.
2830 	syscall_restart_handle_timeout_pre(flags, timeout);
2831 
2832 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2833 
2834 	return syscall_restart_handle_timeout_post(error, timeout);
2835 }
2836 
2837 
2838 void
2839 _user_thread_yield(void)
2840 {
2841 	thread_yield(true);
2842 }
2843 
2844 
2845 status_t
2846 _user_get_thread_info(thread_id id, thread_info *userInfo)
2847 {
2848 	thread_info info;
2849 	status_t status;
2850 
2851 	if (!IS_USER_ADDRESS(userInfo))
2852 		return B_BAD_ADDRESS;
2853 
2854 	status = _get_thread_info(id, &info, sizeof(thread_info));
2855 
2856 	if (status >= B_OK
2857 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2858 		return B_BAD_ADDRESS;
2859 
2860 	return status;
2861 }
2862 
2863 
2864 status_t
2865 _user_get_next_thread_info(team_id team, int32 *userCookie,
2866 	thread_info *userInfo)
2867 {
2868 	status_t status;
2869 	thread_info info;
2870 	int32 cookie;
2871 
2872 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2873 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2874 		return B_BAD_ADDRESS;
2875 
2876 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2877 	if (status < B_OK)
2878 		return status;
2879 
2880 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2881 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2882 		return B_BAD_ADDRESS;
2883 
2884 	return status;
2885 }
2886 
2887 
2888 thread_id
2889 _user_find_thread(const char *userName)
2890 {
2891 	char name[B_OS_NAME_LENGTH];
2892 
2893 	if (userName == NULL)
2894 		return find_thread(NULL);
2895 
2896 	if (!IS_USER_ADDRESS(userName)
2897 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2898 		return B_BAD_ADDRESS;
2899 
2900 	return find_thread(name);
2901 }
2902 
2903 
2904 status_t
2905 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2906 {
2907 	status_t returnCode;
2908 	status_t status;
2909 
2910 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2911 		return B_BAD_ADDRESS;
2912 
2913 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2914 
2915 	if (status == B_OK && userReturnCode != NULL
2916 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2917 		return B_BAD_ADDRESS;
2918 	}
2919 
2920 	return syscall_restart_handle_post(status);
2921 }
2922 
2923 
2924 bool
2925 _user_has_data(thread_id thread)
2926 {
2927 	return has_data(thread);
2928 }
2929 
2930 
2931 status_t
2932 _user_send_data(thread_id thread, int32 code, const void *buffer,
2933 	size_t bufferSize)
2934 {
2935 	if (!IS_USER_ADDRESS(buffer))
2936 		return B_BAD_ADDRESS;
2937 
2938 	return send_data_etc(thread, code, buffer, bufferSize,
2939 		B_KILL_CAN_INTERRUPT);
2940 		// supports userland buffers
2941 }
2942 
2943 
2944 status_t
2945 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2946 {
2947 	thread_id sender;
2948 	status_t code;
2949 
2950 	if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
2951 		|| !IS_USER_ADDRESS(buffer))
2952 		return B_BAD_ADDRESS;
2953 
2954 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2955 		// supports userland buffers
2956 
2957 	if (_userSender != NULL)
2958 		if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2959 			return B_BAD_ADDRESS;
2960 
2961 	return code;
2962 }
2963 
2964 
2965 status_t
2966 _user_block_thread(uint32 flags, bigtime_t timeout)
2967 {
2968 	syscall_restart_handle_timeout_pre(flags, timeout);
2969 	flags |= B_CAN_INTERRUPT;
2970 
2971 	Thread* thread = thread_get_current_thread();
2972 
2973 	InterruptsSpinLocker locker(gThreadSpinlock);
2974 
2975 	// check, if already done
2976 	if (thread->user_thread->wait_status <= 0)
2977 		return thread->user_thread->wait_status;
2978 
2979 	// nope, so wait
2980 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2981 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2982 	thread->user_thread->wait_status = status;
2983 
2984 	return syscall_restart_handle_timeout_post(status, timeout);
2985 }
2986 
2987 
2988 status_t
2989 _user_unblock_thread(thread_id threadID, status_t status)
2990 {
2991 	InterruptsSpinLocker locker(gThreadSpinlock);
2992 	status_t error = user_unblock_thread(threadID, status);
2993 	scheduler_reschedule_if_necessary_locked();
2994 	return error;
2995 }
2996 
2997 
2998 status_t
2999 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3000 {
3001 	enum {
3002 		MAX_USER_THREADS_TO_UNBLOCK	= 128
3003 	};
3004 
3005 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3006 		return B_BAD_ADDRESS;
3007 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
3008 		return B_BAD_VALUE;
3009 
3010 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3011 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3012 		return B_BAD_ADDRESS;
3013 
3014 	InterruptsSpinLocker locker(gThreadSpinlock);
3015 	for (uint32 i = 0; i < count; i++)
3016 		user_unblock_thread(threads[i], status);
3017 
3018 	scheduler_reschedule_if_necessary_locked();
3019 
3020 	return B_OK;
3021 }
3022 
3023 
3024 // TODO: the following two functions don't belong here
3025 
3026 
3027 int
3028 _user_getrlimit(int resource, struct rlimit *urlp)
3029 {
3030 	struct rlimit rl;
3031 	int ret;
3032 
3033 	if (urlp == NULL)
3034 		return EINVAL;
3035 
3036 	if (!IS_USER_ADDRESS(urlp))
3037 		return B_BAD_ADDRESS;
3038 
3039 	ret = common_getrlimit(resource, &rl);
3040 
3041 	if (ret == 0) {
3042 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3043 		if (ret < 0)
3044 			return ret;
3045 
3046 		return 0;
3047 	}
3048 
3049 	return ret;
3050 }
3051 
3052 
3053 int
3054 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3055 {
3056 	struct rlimit resourceLimit;
3057 
3058 	if (userResourceLimit == NULL)
3059 		return EINVAL;
3060 
3061 	if (!IS_USER_ADDRESS(userResourceLimit)
3062 		|| user_memcpy(&resourceLimit, userResourceLimit,
3063 			sizeof(struct rlimit)) < B_OK)
3064 		return B_BAD_ADDRESS;
3065 
3066 	return common_setrlimit(resource, &resourceLimit);
3067 }
3068