xref: /haiku/src/system/kernel/thread.cpp (revision 425b1199b0cb2116ac84cd286d29569e62d86774)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <smp.h>
34 #include <syscalls.h>
35 #include <syscall_restart.h>
36 #include <team.h>
37 #include <tls.h>
38 #include <user_runtime.h>
39 #include <vfs.h>
40 #include <vm.h>
41 #include <vm_address_space.h>
42 #include <wait_for_objects.h>
43 
44 
45 //#define TRACE_THREAD
46 #ifdef TRACE_THREAD
47 #	define TRACE(x) dprintf x
48 #else
49 #	define TRACE(x) ;
50 #endif
51 
52 
53 #define THREAD_MAX_MESSAGE_SIZE		65536
54 
55 // used to pass messages between thread_exit and thread_exit2
56 
57 struct thread_exit_args {
58 	struct thread	*thread;
59 	area_id			old_kernel_stack;
60 	uint32			death_stack;
61 	sem_id			death_sem;
62 	team_id			original_team_id;
63 };
64 
65 struct thread_key {
66 	thread_id id;
67 };
68 
69 // global
70 spinlock thread_spinlock = 0;
71 
72 // thread list
73 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
74 static hash_table *sThreadHash = NULL;
75 static thread_id sNextThreadID = 1;
76 
77 // some arbitrary chosen limits - should probably depend on the available
78 // memory (the limit is not yet enforced)
79 static int32 sMaxThreads = 4096;
80 static int32 sUsedThreads = 0;
81 
82 // death stacks - used temporarily as a thread cleans itself up
83 struct death_stack {
84 	area_id	area;
85 	addr_t	address;
86 	bool	in_use;
87 };
88 static struct death_stack *sDeathStacks;
89 static unsigned int sNumDeathStacks;
90 static unsigned int volatile sDeathStackBitmap;
91 static sem_id sDeathStackSem;
92 static spinlock sDeathStackLock = 0;
93 
94 // The dead queue is used as a pool from which to retrieve and reuse previously
95 // allocated thread structs when creating a new thread. It should be gone once
96 // the slab allocator is in.
97 struct thread_queue dead_q;
98 
99 static void thread_kthread_entry(void);
100 static void thread_kthread_exit(void);
101 
102 
103 /*!
104 	Inserts a thread into a team.
105 	You must hold the team lock when you call this function.
106 */
107 static void
108 insert_thread_into_team(struct team *team, struct thread *thread)
109 {
110 	thread->team_next = team->thread_list;
111 	team->thread_list = thread;
112 	team->num_threads++;
113 
114 	if (team->num_threads == 1) {
115 		// this was the first thread
116 		team->main_thread = thread;
117 	}
118 	thread->team = team;
119 }
120 
121 
122 /*!
123 	Removes a thread from a team.
124 	You must hold the team lock when you call this function.
125 */
126 static void
127 remove_thread_from_team(struct team *team, struct thread *thread)
128 {
129 	struct thread *temp, *last = NULL;
130 
131 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
132 		if (temp == thread) {
133 			if (last == NULL)
134 				team->thread_list = temp->team_next;
135 			else
136 				last->team_next = temp->team_next;
137 
138 			team->num_threads--;
139 			break;
140 		}
141 		last = temp;
142 	}
143 }
144 
145 
146 static int
147 thread_struct_compare(void *_t, const void *_key)
148 {
149 	struct thread *thread = (struct thread*)_t;
150 	const struct thread_key *key = (const struct thread_key*)_key;
151 
152 	if (thread->id == key->id)
153 		return 0;
154 
155 	return 1;
156 }
157 
158 
159 static uint32
160 thread_struct_hash(void *_t, const void *_key, uint32 range)
161 {
162 	struct thread *thread = (struct thread*)_t;
163 	const struct thread_key *key = (const struct thread_key*)_key;
164 
165 	if (thread != NULL)
166 		return thread->id % range;
167 
168 	return (uint32)key->id % range;
169 }
170 
171 
172 static void
173 reset_signals(struct thread *thread)
174 {
175 	thread->sig_pending = 0;
176 	thread->sig_block_mask = 0;
177 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
178 	thread->signal_stack_base = 0;
179 	thread->signal_stack_size = 0;
180 	thread->signal_stack_enabled = false;
181 }
182 
183 
184 /*!
185 	Allocates and fills in thread structure (or reuses one from the
186 	dead queue).
187 
188 	\param threadID The ID to be assigned to the new thread. If
189 		  \code < 0 \endcode a fresh one is allocated.
190 	\param thread initialize this thread struct if nonnull
191 */
192 
193 static struct thread *
194 create_thread_struct(struct thread *inthread, const char *name,
195 	thread_id threadID, struct cpu_ent *cpu)
196 {
197 	struct thread *thread;
198 	cpu_status state;
199 	char temp[64];
200 
201 	if (inthread == NULL) {
202 		// try to recycle one from the dead queue first
203 		state = disable_interrupts();
204 		GRAB_THREAD_LOCK();
205 		thread = thread_dequeue(&dead_q);
206 		RELEASE_THREAD_LOCK();
207 		restore_interrupts(state);
208 
209 		// if not, create a new one
210 		if (thread == NULL) {
211 			thread = (struct thread *)malloc(sizeof(struct thread));
212 			if (thread == NULL)
213 				return NULL;
214 		}
215 	} else {
216 		thread = inthread;
217 	}
218 
219 	if (name != NULL)
220 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
221 	else
222 		strcpy(thread->name, "unnamed thread");
223 
224 	thread->flags = 0;
225 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
226 	thread->team = NULL;
227 	thread->cpu = cpu;
228 	thread->fault_handler = 0;
229 	thread->page_faults_allowed = 1;
230 	thread->kernel_stack_area = -1;
231 	thread->kernel_stack_base = 0;
232 	thread->user_stack_area = -1;
233 	thread->user_stack_base = 0;
234 	thread->user_local_storage = 0;
235 	thread->kernel_errno = 0;
236 	thread->team_next = NULL;
237 	thread->queue_next = NULL;
238 	thread->priority = thread->next_priority = -1;
239 	thread->args1 = NULL;  thread->args2 = NULL;
240 	thread->alarm.period = 0;
241 	reset_signals(thread);
242 	thread->in_kernel = true;
243 	thread->was_yielded = false;
244 	thread->user_time = 0;
245 	thread->kernel_time = 0;
246 	thread->last_time = 0;
247 	thread->exit.status = 0;
248 	thread->exit.reason = 0;
249 	thread->exit.signal = 0;
250 	list_init(&thread->exit.waiters);
251 	thread->select_infos = NULL;
252 
253 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
254 	thread->exit.sem = create_sem(0, temp);
255 	if (thread->exit.sem < B_OK)
256 		goto err1;
257 
258 	sprintf(temp, "%s send", thread->name);
259 	thread->msg.write_sem = create_sem(1, temp);
260 	if (thread->msg.write_sem < B_OK)
261 		goto err2;
262 
263 	sprintf(temp, "%s receive", thread->name);
264 	thread->msg.read_sem = create_sem(0, temp);
265 	if (thread->msg.read_sem < B_OK)
266 		goto err3;
267 
268 	if (arch_thread_init_thread_struct(thread) < B_OK)
269 		goto err4;
270 
271 	return thread;
272 
273 err4:
274 	delete_sem(thread->msg.read_sem);
275 err3:
276 	delete_sem(thread->msg.write_sem);
277 err2:
278 	delete_sem(thread->exit.sem);
279 err1:
280 	// ToDo: put them in the dead queue instead?
281 	if (inthread == NULL)
282 		free(thread);
283 	return NULL;
284 }
285 
286 
287 static void
288 delete_thread_struct(struct thread *thread)
289 {
290 	delete_sem(thread->exit.sem);
291 	delete_sem(thread->msg.write_sem);
292 	delete_sem(thread->msg.read_sem);
293 
294 	// ToDo: put them in the dead queue instead?
295 	free(thread);
296 }
297 
298 
299 /*! This function gets run by a new thread before anything else */
300 static void
301 thread_kthread_entry(void)
302 {
303 	struct thread *thread = thread_get_current_thread();
304 
305 	// simulates the thread spinlock release that would occur if the thread had been
306 	// rescheded from. The resched didn't happen because the thread is new.
307 	RELEASE_THREAD_LOCK();
308 
309 	// start tracking time
310 	thread->last_time = system_time();
311 
312 	enable_interrupts(); // this essentially simulates a return-from-interrupt
313 }
314 
315 
316 static void
317 thread_kthread_exit(void)
318 {
319 	struct thread *thread = thread_get_current_thread();
320 
321 	thread->exit.reason = THREAD_RETURN_EXIT;
322 	thread_exit();
323 }
324 
325 
326 /*!
327 	Initializes the thread and jumps to its userspace entry point.
328 	This function is called at creation time of every user thread,
329 	but not for a team's main thread.
330 */
331 static int
332 _create_user_thread_kentry(void)
333 {
334 	struct thread *thread = thread_get_current_thread();
335 
336 	// a signal may have been delivered here
337 	thread_at_kernel_exit();
338 
339 	// jump to the entry point in user space
340 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
341 		thread->args1, thread->args2);
342 
343 	// only get here if the above call fails
344 	return 0;
345 }
346 
347 
348 /*! Initializes the thread and calls it kernel space entry point. */
349 static int
350 _create_kernel_thread_kentry(void)
351 {
352 	struct thread *thread = thread_get_current_thread();
353 	int (*func)(void *args) = (int (*)(void *))thread->entry;
354 
355 	// call the entry function with the appropriate args
356 	return func(thread->args1);
357 }
358 
359 
360 /*!
361 	Creates a new thread in the team with the specified team ID.
362 
363 	\param threadID The ID to be assigned to the new thread. If
364 		  \code < 0 \endcode a fresh one is allocated.
365 */
366 static thread_id
367 create_thread(const char *name, team_id teamID, thread_entry_func entry,
368 	void *args1, void *args2, int32 priority, bool kernel, thread_id threadID)
369 {
370 	struct thread *thread, *currentThread;
371 	struct team *team;
372 	cpu_status state;
373 	char stack_name[B_OS_NAME_LENGTH];
374 	status_t status;
375 	bool abort = false;
376 	bool debugNewThread = false;
377 
378 	TRACE(("create_thread(%s, id = %ld, %s)\n", name, threadID,
379 		kernel ? "kernel" : "user"));
380 
381 	thread = create_thread_struct(NULL, name, threadID, NULL);
382 	if (thread == NULL)
383 		return B_NO_MEMORY;
384 
385 	thread->priority = priority == -1 ? B_NORMAL_PRIORITY : priority;
386 	thread->next_priority = thread->priority;
387 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
388 	thread->state = B_THREAD_SUSPENDED;
389 	thread->next_state = B_THREAD_SUSPENDED;
390 
391 	// init debug structure
392 	clear_thread_debug_info(&thread->debug_info, false);
393 
394 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", name, thread->id);
395 	thread->kernel_stack_area = create_area(stack_name,
396 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
397 		KERNEL_STACK_SIZE, B_FULL_LOCK,
398 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
399 
400 	if (thread->kernel_stack_area < 0) {
401 		// we're not yet part of a team, so we can just bail out
402 		status = thread->kernel_stack_area;
403 
404 		dprintf("create_thread: error creating kernel stack: %s!\n",
405 			strerror(status));
406 
407 		delete_thread_struct(thread);
408 		return status;
409 	}
410 
411 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
412 
413 	state = disable_interrupts();
414 	GRAB_THREAD_LOCK();
415 
416 	// If the new thread belongs to the same team as the current thread,
417 	// it may inherit some of the thread debug flags.
418 	currentThread = thread_get_current_thread();
419 	if (currentThread && currentThread->team->id == teamID) {
420 		// inherit all user flags...
421 		int32 debugFlags = currentThread->debug_info.flags
422 			& B_THREAD_DEBUG_USER_FLAG_MASK;
423 
424 		// ... save the syscall tracing flags, unless explicitely specified
425 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
426 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
427 				| B_THREAD_DEBUG_POST_SYSCALL);
428 		}
429 
430 		thread->debug_info.flags = debugFlags;
431 
432 		// stop the new thread, if desired
433 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
434 	}
435 
436 	// insert into global list
437 	hash_insert(sThreadHash, thread);
438 	sUsedThreads++;
439 	RELEASE_THREAD_LOCK();
440 
441 	GRAB_TEAM_LOCK();
442 	// look at the team, make sure it's not being deleted
443 	team = team_get_team_struct_locked(teamID);
444 	if (team != NULL && team->state != TEAM_STATE_DEATH) {
445 		// Debug the new thread, if the parent thread required that (see above),
446 		// or the respective global team debug flag is set. But only, if a
447 		// debugger is installed for the team.
448 		debugNewThread |= (atomic_get(&team->debug_info.flags)
449 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
450 		if (debugNewThread
451 			&& (atomic_get(&team->debug_info.flags)
452 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
453 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
454 		}
455 
456 		insert_thread_into_team(team, thread);
457 	} else
458 		abort = true;
459 
460 	RELEASE_TEAM_LOCK();
461 	if (abort) {
462 		GRAB_THREAD_LOCK();
463 		hash_remove(sThreadHash, thread);
464 		RELEASE_THREAD_LOCK();
465 	}
466 	restore_interrupts(state);
467 	if (abort) {
468 		delete_area(thread->kernel_stack_area);
469 		delete_thread_struct(thread);
470 		return B_BAD_TEAM_ID;
471 	}
472 
473 	thread->args1 = args1;
474 	thread->args2 = args2;
475 	thread->entry = entry;
476 	status = thread->id;
477 
478 	if (kernel) {
479 		// this sets up an initial kthread stack that runs the entry
480 
481 		// Note: whatever function wants to set up a user stack later for this
482 		// thread must initialize the TLS for it
483 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
484 			&thread_kthread_entry, &thread_kthread_exit);
485 	} else {
486 		// create user stack
487 
488 		// the stack will be between USER_STACK_REGION and the main thread stack area
489 		// (the user stack of the main thread is created in team_create_team())
490 		thread->user_stack_base = USER_STACK_REGION;
491 		thread->user_stack_size = USER_STACK_SIZE;
492 
493 		snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack", name, thread->id);
494 		thread->user_stack_area = create_area_etc(team, stack_name,
495 				(void **)&thread->user_stack_base, B_BASE_ADDRESS,
496 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
497 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
498 		if (thread->user_stack_area < B_OK
499 			|| arch_thread_init_tls(thread) < B_OK) {
500 			// great, we have a fully running thread without a (usable) stack
501 			dprintf("create_thread: unable to create proper user stack!\n");
502 			status = thread->user_stack_area;
503 			kill_thread(thread->id);
504 		}
505 
506 		user_debug_update_new_thread_flags(thread->id);
507 
508 		// copy the user entry over to the args field in the thread struct
509 		// the function this will call will immediately switch the thread into
510 		// user space.
511 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
512 			&thread_kthread_entry, &thread_kthread_exit);
513 	}
514 
515 	return status;
516 }
517 
518 
519 /*!
520 	Finds a free death stack for us and allocates it.
521 	Must be called with interrupts enabled.
522 */
523 static uint32
524 get_death_stack(void)
525 {
526 	cpu_status state;
527 	uint32 bit;
528 	int32 i;
529 
530 	acquire_sem(sDeathStackSem);
531 
532 	// grab the death stack and thread locks, find a free spot and release
533 
534 	state = disable_interrupts();
535 
536 	acquire_spinlock(&sDeathStackLock);
537 	GRAB_THREAD_LOCK();
538 
539 	bit = sDeathStackBitmap;
540 	bit = (~bit) & ~((~bit) - 1);
541 	sDeathStackBitmap |= bit;
542 
543 	RELEASE_THREAD_LOCK();
544 	release_spinlock(&sDeathStackLock);
545 
546 	restore_interrupts(state);
547 
548 	// sanity checks
549 	if (!bit)
550 		panic("get_death_stack: couldn't find free stack!\n");
551 
552 	if (bit & (bit - 1))
553 		panic("get_death_stack: impossible bitmap result!\n");
554 
555 	// bit to number
556 	for (i = -1; bit; i++) {
557 		bit >>= 1;
558 	}
559 
560 	TRACE(("get_death_stack: returning %#lx\n", sDeathStacks[i].address));
561 
562 	return (uint32)i;
563 }
564 
565 
566 /*!	Returns the thread's death stack to the pool.
567 	Interrupts must be disabled and the sDeathStackLock be held.
568 */
569 static void
570 put_death_stack(uint32 index)
571 {
572 	TRACE(("put_death_stack...: passed %lu\n", index));
573 
574 	if (index >= sNumDeathStacks)
575 		panic("put_death_stack: passed invalid stack index %ld\n", index);
576 
577 	if (!(sDeathStackBitmap & (1 << index)))
578 		panic("put_death_stack: passed invalid stack index %ld\n", index);
579 
580 	GRAB_THREAD_LOCK();
581 	sDeathStackBitmap &= ~(1 << index);
582 	RELEASE_THREAD_LOCK();
583 
584 	release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
585 		// we must not hold the thread lock when releasing a semaphore
586 }
587 
588 
589 static void
590 thread_exit2(void *_args)
591 {
592 	struct thread_exit_args args;
593 
594 	// copy the arguments over, since the source is probably on the kernel
595 	// stack we're about to delete
596 	memcpy(&args, _args, sizeof(struct thread_exit_args));
597 
598 	// we can't let the interrupts disabled at this point
599 	enable_interrupts();
600 
601 	TRACE(("thread_exit2, running on death stack %#lx\n", args.death_stack));
602 
603 	// delete the old kernel stack area
604 	TRACE(("thread_exit2: deleting old kernel stack id %ld for thread %ld\n",
605 		args.old_kernel_stack, args.thread->id));
606 
607 	delete_area(args.old_kernel_stack);
608 
609 	// remove this thread from all of the global lists
610 	TRACE(("thread_exit2: removing thread %ld from global lists\n",
611 		args.thread->id));
612 
613 	disable_interrupts();
614 	GRAB_TEAM_LOCK();
615 
616 	remove_thread_from_team(team_get_kernel_team(), args.thread);
617 
618 	RELEASE_TEAM_LOCK();
619 	enable_interrupts();
620 		// needed for the debugger notification below
621 
622 	TRACE(("thread_exit2: done removing thread from lists\n"));
623 
624 	if (args.death_sem >= 0)
625 		release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
626 
627 	// notify the debugger
628 	if (args.original_team_id >= 0
629 		&& args.original_team_id != team_get_kernel_team_id()) {
630 		user_debug_thread_deleted(args.original_team_id, args.thread->id);
631 	}
632 
633 	disable_interrupts();
634 
635 	// Set the next state to be gone: this will cause the thread structure
636 	// to be returned to a ready pool upon reschedule.
637 	// Note, we need to have disabled interrupts at this point, or else
638 	// we could get rescheduled too early.
639 	args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
640 
641 	// return the death stack and reschedule one last time
642 
643 	// Note that we need to hold sDeathStackLock until we've got the thread
644 	// lock. Otherwise someone else might grab our stack in the meantime.
645 	acquire_spinlock(&sDeathStackLock);
646 	put_death_stack(args.death_stack);
647 
648 	GRAB_THREAD_LOCK();
649 	release_spinlock(&sDeathStackLock);
650 
651 	scheduler_reschedule();
652 		// requires thread lock to be held
653 
654 	// never get to here
655 	panic("thread_exit2: made it where it shouldn't have!\n");
656 }
657 
658 
659 static sem_id
660 get_thread_wait_sem(struct thread* thread)
661 {
662 	if (thread->state == B_THREAD_WAITING
663 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
664 		return (sem_id)(addr_t)thread->wait.object;
665 	}
666 	return -1;
667 }
668 
669 
670 static ConditionVariable*
671 get_thread_wait_cvar(struct thread* thread)
672 {
673 	if (thread->state == B_THREAD_WAITING
674 		&& thread->wait.type == THREAD_BLOCK_TYPE_CONDITION_VARIABLE) {
675 		return (ConditionVariable*)thread->wait.object;
676 	}
677 	return NULL;
678 }
679 
680 
681 /*!
682 	Fills the thread_info structure with information from the specified
683 	thread.
684 	The thread lock must be held when called.
685 */
686 static void
687 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
688 {
689 	info->thread = thread->id;
690 	info->team = thread->team->id;
691 
692 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
693 
694 	if (thread->state == B_THREAD_WAITING) {
695 		info->state = B_THREAD_WAITING;
696 
697 		switch (thread->wait.type) {
698 			case THREAD_BLOCK_TYPE_SNOOZE:
699 				info->state = B_THREAD_ASLEEP;
700 				break;
701 
702 			case THREAD_BLOCK_TYPE_SEMAPHORE:
703 			{
704 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
705 				if (sem == thread->msg.read_sem)
706 					info->state = B_THREAD_RECEIVING;
707 				break;
708 			}
709 
710 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
711 			default:
712 				break;
713 		}
714 	} else
715 		info->state = (thread_state)thread->state;
716 
717 	info->priority = thread->priority;
718 	info->user_time = thread->user_time;
719 	info->kernel_time = thread->kernel_time;
720 	info->stack_base = (void *)thread->user_stack_base;
721 	info->stack_end = (void *)(thread->user_stack_base
722 		+ thread->user_stack_size);
723 	info->sem = get_thread_wait_sem(thread);
724 }
725 
726 static status_t
727 send_data_etc(thread_id id, int32 code, const void *buffer,
728 	size_t bufferSize, int32 flags)
729 {
730 	struct thread *target;
731 	sem_id cachedSem;
732 	cpu_status state;
733 	status_t status;
734 	cbuf *data;
735 
736 	state = disable_interrupts();
737 	GRAB_THREAD_LOCK();
738 	target = thread_get_thread_struct_locked(id);
739 	if (!target) {
740 		RELEASE_THREAD_LOCK();
741 		restore_interrupts(state);
742 		return B_BAD_THREAD_ID;
743 	}
744 	cachedSem = target->msg.write_sem;
745 	RELEASE_THREAD_LOCK();
746 	restore_interrupts(state);
747 
748 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
749 		return B_NO_MEMORY;
750 
751 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
752 	if (status == B_INTERRUPTED) {
753 		// We got interrupted by a signal
754 		return status;
755 	}
756 	if (status != B_OK) {
757 		// Any other acquisition problems may be due to thread deletion
758 		return B_BAD_THREAD_ID;
759 	}
760 
761 	if (bufferSize > 0) {
762 		data = cbuf_get_chain(bufferSize);
763 		if (data == NULL)
764 			return B_NO_MEMORY;
765 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
766 		if (status < B_OK) {
767 			cbuf_free_chain(data);
768 			return B_NO_MEMORY;
769 		}
770 	} else
771 		data = NULL;
772 
773 	state = disable_interrupts();
774 	GRAB_THREAD_LOCK();
775 
776 	// The target thread could have been deleted at this point
777 	target = thread_get_thread_struct_locked(id);
778 	if (target == NULL) {
779 		RELEASE_THREAD_LOCK();
780 		restore_interrupts(state);
781 		cbuf_free_chain(data);
782 		return B_BAD_THREAD_ID;
783 	}
784 
785 	// Save message informations
786 	target->msg.sender = thread_get_current_thread()->id;
787 	target->msg.code = code;
788 	target->msg.size = bufferSize;
789 	target->msg.buffer = data;
790 	cachedSem = target->msg.read_sem;
791 
792 	RELEASE_THREAD_LOCK();
793 	restore_interrupts(state);
794 
795 	release_sem(cachedSem);
796 	return B_OK;
797 }
798 
799 
800 static int32
801 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
802 	int32 flags)
803 {
804 	struct thread *thread = thread_get_current_thread();
805 	status_t status;
806 	size_t size;
807 	int32 code;
808 
809 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
810 	if (status < B_OK) {
811 		// Actually, we're not supposed to return error codes
812 		// but since the only reason this can fail is that we
813 		// were killed, it's probably okay to do so (but also
814 		// meaningless).
815 		return status;
816 	}
817 
818 	if (buffer != NULL && bufferSize != 0) {
819 		size = min_c(bufferSize, thread->msg.size);
820 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
821 			0, size);
822 		if (status < B_OK) {
823 			cbuf_free_chain(thread->msg.buffer);
824 			release_sem(thread->msg.write_sem);
825 			return status;
826 		}
827 	}
828 
829 	*_sender = thread->msg.sender;
830 	code = thread->msg.code;
831 
832 	cbuf_free_chain(thread->msg.buffer);
833 	release_sem(thread->msg.write_sem);
834 
835 	return code;
836 }
837 
838 
839 static status_t
840 common_getrlimit(int resource, struct rlimit * rlp)
841 {
842 	if (!rlp)
843 		return B_BAD_ADDRESS;
844 
845 	switch (resource) {
846 		case RLIMIT_NOFILE:
847 		case RLIMIT_NOVMON:
848 			return vfs_getrlimit(resource, rlp);
849 
850 		case RLIMIT_CORE:
851 			rlp->rlim_cur = 0;
852 			rlp->rlim_max = 0;
853 			return B_OK;
854 
855 		case RLIMIT_STACK:
856 		{
857 			struct thread *thread = thread_get_current_thread();
858 			if (!thread)
859 				return B_ERROR;
860 			rlp->rlim_cur = thread->user_stack_size;
861 			rlp->rlim_max = thread->user_stack_size;
862 			return B_OK;
863 		}
864 
865 		default:
866 			return EINVAL;
867 	}
868 
869 	return B_OK;
870 }
871 
872 
873 static status_t
874 common_setrlimit(int resource, const struct rlimit * rlp)
875 {
876 	if (!rlp)
877 		return B_BAD_ADDRESS;
878 
879 	switch (resource) {
880 		case RLIMIT_NOFILE:
881 		case RLIMIT_NOVMON:
882 			return vfs_setrlimit(resource, rlp);
883 
884 		case RLIMIT_CORE:
885 			// We don't support core file, so allow settings to 0/0 only.
886 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
887 				return EINVAL;
888 			return B_OK;
889 
890 		default:
891 			return EINVAL;
892 	}
893 
894 	return B_OK;
895 }
896 
897 
898 //	#pragma mark - debugger calls
899 
900 
901 static int
902 make_thread_unreal(int argc, char **argv)
903 {
904 	struct thread *thread;
905 	struct hash_iterator i;
906 	int32 id = -1;
907 
908 	if (argc > 2) {
909 		print_debugger_command_usage(argv[0]);
910 		return 0;
911 	}
912 
913 	if (argc > 1)
914 		id = strtoul(argv[1], NULL, 0);
915 
916 	hash_open(sThreadHash, &i);
917 
918 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
919 		if (id != -1 && thread->id != id)
920 			continue;
921 
922 		if (thread->priority > B_DISPLAY_PRIORITY) {
923 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
924 			kprintf("thread %ld made unreal\n", thread->id);
925 		}
926 	}
927 
928 	hash_close(sThreadHash, &i, false);
929 	return 0;
930 }
931 
932 
933 static int
934 set_thread_prio(int argc, char **argv)
935 {
936 	struct thread *thread;
937 	struct hash_iterator i;
938 	int32 id;
939 	int32 prio;
940 
941 	if (argc > 3 || argc < 2) {
942 		print_debugger_command_usage(argv[0]);
943 		return 0;
944 	}
945 
946 	prio = strtoul(argv[1], NULL, 0);
947 	if (prio > B_MAX_PRIORITY)
948 		prio = B_MAX_PRIORITY;
949 	if (prio < B_MIN_PRIORITY)
950 		prio = B_MIN_PRIORITY;
951 
952 	if (argc > 2)
953 		id = strtoul(argv[2], NULL, 0);
954 	else
955 		id = thread_get_current_thread()->id;
956 
957 	hash_open(sThreadHash, &i);
958 
959 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
960 		if (thread->id != id)
961 			continue;
962 		thread->priority = thread->next_priority = prio;
963 		kprintf("thread %ld set to priority %ld\n", id, prio);
964 		break;
965 	}
966 	if (!thread)
967 		kprintf("thread %ld (%#lx) not found\n", id, id);
968 
969 	hash_close(sThreadHash, &i, false);
970 	return 0;
971 }
972 
973 
974 static int
975 make_thread_suspended(int argc, char **argv)
976 {
977 	struct thread *thread;
978 	struct hash_iterator i;
979 	int32 id;
980 
981 	if (argc > 2) {
982 		print_debugger_command_usage(argv[0]);
983 		return 0;
984 	}
985 
986 	if (argc == 1)
987 		id = thread_get_current_thread()->id;
988 	else
989 		id = strtoul(argv[1], NULL, 0);
990 
991 	hash_open(sThreadHash, &i);
992 
993 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
994 		if (thread->id != id)
995 			continue;
996 
997 		thread->next_state = B_THREAD_SUSPENDED;
998 		kprintf("thread %ld suspended\n", id);
999 		break;
1000 	}
1001 	if (!thread)
1002 		kprintf("thread %ld (%#lx) not found\n", id, id);
1003 
1004 	hash_close(sThreadHash, &i, false);
1005 	return 0;
1006 }
1007 
1008 
1009 static int
1010 make_thread_resumed(int argc, char **argv)
1011 {
1012 	struct thread *thread;
1013 	struct hash_iterator i;
1014 	int32 id;
1015 
1016 	if (argc != 2) {
1017 		print_debugger_command_usage(argv[0]);
1018 		return 0;
1019 	}
1020 
1021 	// force user to enter a thread id, as using
1022 	// the current thread is usually not intended
1023 	id = strtoul(argv[1], NULL, 0);
1024 
1025 	hash_open(sThreadHash, &i);
1026 
1027 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1028 		if (thread->id != id)
1029 			continue;
1030 
1031 		if (thread->state == B_THREAD_SUSPENDED) {
1032 			scheduler_enqueue_in_run_queue(thread);
1033 			kprintf("thread %ld resumed\n", thread->id);
1034 		}
1035 		break;
1036 	}
1037 	if (!thread)
1038 		kprintf("thread %ld (%#lx) not found\n", id, id);
1039 
1040 	hash_close(sThreadHash, &i, false);
1041 	return 0;
1042 }
1043 
1044 
1045 static int
1046 drop_into_debugger(int argc, char **argv)
1047 {
1048 	status_t err;
1049 	int32 id;
1050 
1051 	if (argc > 2) {
1052 		print_debugger_command_usage(argv[0]);
1053 		return 0;
1054 	}
1055 
1056 	if (argc == 1)
1057 		id = thread_get_current_thread()->id;
1058 	else
1059 		id = strtoul(argv[1], NULL, 0);
1060 
1061 	err = _user_debug_thread(id);
1062 	if (err)
1063 		kprintf("drop failed\n");
1064 	else
1065 		kprintf("thread %ld dropped into user debugger\n", id);
1066 
1067 	return 0;
1068 }
1069 
1070 
1071 static const char *
1072 state_to_text(struct thread *thread, int32 state)
1073 {
1074 	switch (state) {
1075 		case B_THREAD_READY:
1076 			return "ready";
1077 
1078 		case B_THREAD_RUNNING:
1079 			return "running";
1080 
1081 		case B_THREAD_WAITING:
1082 		{
1083 			switch (thread->wait.type) {
1084 				case THREAD_BLOCK_TYPE_SNOOZE:
1085 					return "zzz";
1086 
1087 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1088 				{
1089 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1090 					if (sem == thread->msg.read_sem)
1091 						return "receive";
1092 					break;
1093 				}
1094 
1095 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1096 				default:
1097 					break;
1098 			}
1099 
1100 			return "waiting";
1101 		}
1102 
1103 		case B_THREAD_SUSPENDED:
1104 			return "suspended";
1105 
1106 		case THREAD_STATE_FREE_ON_RESCHED:
1107 			return "death";
1108 
1109 		default:
1110 			return "UNKNOWN";
1111 	}
1112 }
1113 
1114 
1115 static void
1116 _dump_thread_info(struct thread *thread)
1117 {
1118 	struct death_entry *death = NULL;
1119 
1120 	kprintf("THREAD: %p\n", thread);
1121 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1122 	kprintf("name:               \"%s\"\n", thread->name);
1123 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1124 		thread->all_next, thread->team_next, thread->queue_next);
1125 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1126 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1127 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1128 	kprintf("cpu:                %p ", thread->cpu);
1129 	if (thread->cpu)
1130 		kprintf("(%d)\n", thread->cpu->cpu_num);
1131 	else
1132 		kprintf("\n");
1133 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1134 		thread->sig_block_mask);
1135 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1136 	kprintf("sem blocking:       %ld\n", get_thread_wait_sem(thread));
1137 	kprintf("condition variable: %p\n", get_thread_wait_cvar(thread));
1138 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1139 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1140 	kprintf("entry:              %p\n", (void *)thread->entry);
1141 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1142 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1143 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1144 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1145 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1146 	kprintf("  exit.waiters:\n");
1147 	while ((death = (struct death_entry*)list_get_next_item(
1148 			&thread->exit.waiters, death)) != NULL) {
1149 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1150 	}
1151 
1152 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1153 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1154 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1155 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1156 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1157 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1158 		strerror(thread->kernel_errno));
1159 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1160 	kprintf("user_time:          %Ld\n", thread->user_time);
1161 	kprintf("flags:              0x%lx\n", thread->flags);
1162 	kprintf("architecture dependant section:\n");
1163 	arch_thread_dump_info(&thread->arch_info);
1164 }
1165 
1166 
1167 static int
1168 dump_thread_info(int argc, char **argv)
1169 {
1170 	const char *name = NULL;
1171 	struct thread *thread;
1172 	int32 id = -1;
1173 	struct hash_iterator i;
1174 	bool found = false;
1175 
1176 	if (argc > 2) {
1177 		print_debugger_command_usage(argv[0]);
1178 		return 0;
1179 	}
1180 
1181 	if (argc == 1) {
1182 		_dump_thread_info(thread_get_current_thread());
1183 		return 0;
1184 	} else {
1185 		name = argv[1];
1186 		id = strtoul(argv[1], NULL, 0);
1187 
1188 		if (IS_KERNEL_ADDRESS(id)) {
1189 			// semi-hack
1190 			_dump_thread_info((struct thread *)id);
1191 			return 0;
1192 		}
1193 	}
1194 
1195 	// walk through the thread list, trying to match name or id
1196 	hash_open(sThreadHash, &i);
1197 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1198 		if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
1199 			_dump_thread_info(thread);
1200 			found = true;
1201 			break;
1202 		}
1203 	}
1204 	hash_close(sThreadHash, &i, false);
1205 
1206 	if (!found)
1207 		kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1208 	return 0;
1209 }
1210 
1211 
1212 static int
1213 dump_thread_list(int argc, char **argv)
1214 {
1215 	struct thread *thread;
1216 	struct hash_iterator i;
1217 	bool realTimeOnly = false;
1218 	bool calling = false;
1219 	const char *callSymbol = NULL;
1220 	addr_t callStart = 0;
1221 	addr_t callEnd = 0;
1222 	int32 requiredState = 0;
1223 	team_id team = -1;
1224 	sem_id sem = -1;
1225 
1226 	if (!strcmp(argv[0], "realtime"))
1227 		realTimeOnly = true;
1228 	else if (!strcmp(argv[0], "ready"))
1229 		requiredState = B_THREAD_READY;
1230 	else if (!strcmp(argv[0], "running"))
1231 		requiredState = B_THREAD_RUNNING;
1232 	else if (!strcmp(argv[0], "waiting")) {
1233 		requiredState = B_THREAD_WAITING;
1234 
1235 		if (argc > 1) {
1236 			sem = strtoul(argv[1], NULL, 0);
1237 			if (sem == 0)
1238 				kprintf("ignoring invalid semaphore argument.\n");
1239 		}
1240 	} else if (!strcmp(argv[0], "calling")) {
1241 		if (argc < 2) {
1242 			kprintf("Need to give a symbol name or start and end arguments.\n");
1243 			return 0;
1244 		} else if (argc == 3) {
1245 			callStart = parse_expression(argv[1]);
1246 			callEnd = parse_expression(argv[2]);
1247 		} else
1248 			callSymbol = argv[1];
1249 
1250 		calling = true;
1251 	} else if (argc > 1) {
1252 		team = strtoul(argv[1], NULL, 0);
1253 		if (team == 0)
1254 			kprintf("ignoring invalid team argument.\n");
1255 	}
1256 
1257 	kprintf("thread         id  state        sem/cv cpu pri  stack      team  "
1258 		"name\n");
1259 
1260 	hash_open(sThreadHash, &i);
1261 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1262 		// filter out threads not matching the search criteria
1263 		if ((requiredState && thread->state != requiredState)
1264 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1265 					callStart, callEnd))
1266 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1267 			|| (team > 0 && thread->team->id != team)
1268 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1269 			continue;
1270 
1271 		kprintf("%p %6ld  %-9s", thread, thread->id, state_to_text(thread,
1272 			thread->state));
1273 
1274 		// does it block on a semaphore or a condition variable?
1275 		if (thread->state == B_THREAD_WAITING) {
1276 			if (get_thread_wait_cvar(thread))
1277 				kprintf("%p  ", get_thread_wait_cvar(thread));
1278 			else
1279 				kprintf("%10ld  ", get_thread_wait_sem(thread));
1280 		} else
1281 			kprintf("      -     ");
1282 
1283 		// on which CPU does it run?
1284 		if (thread->cpu)
1285 			kprintf("%2d", thread->cpu->cpu_num);
1286 		else
1287 			kprintf(" -");
1288 
1289 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1290 			(void *)thread->kernel_stack_base, thread->team->id,
1291 			thread->name != NULL ? thread->name : "<NULL>");
1292 	}
1293 	hash_close(sThreadHash, &i, false);
1294 	return 0;
1295 }
1296 
1297 
1298 //	#pragma mark - private kernel API
1299 
1300 
1301 void
1302 thread_exit(void)
1303 {
1304 	cpu_status state;
1305 	struct thread *thread = thread_get_current_thread();
1306 	struct team *team = thread->team;
1307 	thread_id parentID = -1;
1308 	bool deleteTeam = false;
1309 	sem_id cachedDeathSem = -1;
1310 	status_t status;
1311 	struct thread_debug_info debugInfo;
1312 	team_id teamID = team->id;
1313 
1314 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1315 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1316 			? "due to signal" : "normally", thread->exit.status));
1317 
1318 	if (!are_interrupts_enabled())
1319 		panic("thread_exit() called with interrupts disabled!\n");
1320 
1321 	// boost our priority to get this over with
1322 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1323 
1324 	// Cancel previously installed alarm timer, if any
1325 	cancel_timer(&thread->alarm);
1326 
1327 	// delete the user stack area first, we won't need it anymore
1328 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1329 		area_id area = thread->user_stack_area;
1330 		thread->user_stack_area = -1;
1331 		delete_area_etc(team, area);
1332 	}
1333 
1334 	struct job_control_entry *death = NULL;
1335 	struct death_entry* threadDeathEntry = NULL;
1336 
1337 	if (team != team_get_kernel_team()) {
1338 		if (team->main_thread == thread) {
1339 			// this was the main thread in this team, so we will delete that as well
1340 			deleteTeam = true;
1341 		} else
1342 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1343 
1344 		// remove this thread from the current team and add it to the kernel
1345 		// put the thread into the kernel team until it dies
1346 		state = disable_interrupts();
1347 		GRAB_TEAM_LOCK();
1348 		GRAB_THREAD_LOCK();
1349 			// removing the thread and putting its death entry to the parent
1350 			// team needs to be an atomic operation
1351 
1352 		// remember how long this thread lasted
1353 		team->dead_threads_kernel_time += thread->kernel_time;
1354 		team->dead_threads_user_time += thread->user_time;
1355 
1356 		remove_thread_from_team(team, thread);
1357 		insert_thread_into_team(team_get_kernel_team(), thread);
1358 
1359 		cachedDeathSem = team->death_sem;
1360 
1361 		if (deleteTeam) {
1362 			struct team *parent = team->parent;
1363 
1364 			// remember who our parent was so we can send a signal
1365 			parentID = parent->id;
1366 
1367 			// Set the team job control state to "dead" and detach the job
1368 			// control entry from our team struct.
1369 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1370 			death = team->job_control_entry;
1371 			team->job_control_entry = NULL;
1372 
1373 			if (death != NULL) {
1374 				death->InitDeadState();
1375 
1376 				// team_set_job_control_state() already moved our entry
1377 				// into the parent's list. We just check the soft limit of
1378 				// death entries.
1379 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1380 					death = parent->dead_children->entries.RemoveHead();
1381 					parent->dead_children->count--;
1382 				} else
1383 					death = NULL;
1384 
1385 				RELEASE_THREAD_LOCK();
1386 			} else
1387 				RELEASE_THREAD_LOCK();
1388 
1389 			team_remove_team(team);
1390 
1391 			send_signal_etc(parentID, SIGCHLD,
1392 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1393 		} else {
1394 			// The thread is not the main thread. We store a thread death
1395 			// entry for it, unless someone is already waiting it.
1396 			if (threadDeathEntry != NULL
1397 				&& list_is_empty(&thread->exit.waiters)) {
1398 				threadDeathEntry->thread = thread->id;
1399 				threadDeathEntry->status = thread->exit.status;
1400 				threadDeathEntry->reason = thread->exit.reason;
1401 				threadDeathEntry->signal = thread->exit.signal;
1402 
1403 				// add entry -- remove and old one, if we hit the limit
1404 				list_add_item(&team->dead_threads, threadDeathEntry);
1405 				team->dead_threads_count++;
1406 				threadDeathEntry = NULL;
1407 
1408 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1409 					threadDeathEntry = (death_entry*)list_remove_head_item(
1410 						&team->dead_threads);
1411 					team->dead_threads_count--;
1412 				}
1413 			}
1414 
1415 			RELEASE_THREAD_LOCK();
1416 		}
1417 
1418 		RELEASE_TEAM_LOCK();
1419 
1420 		// swap address spaces, to make sure we're running on the kernel's pgdir
1421 		vm_swap_address_space(vm_kernel_address_space());
1422 		restore_interrupts(state);
1423 
1424 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1425 	}
1426 
1427 	if (threadDeathEntry != NULL)
1428 		free(threadDeathEntry);
1429 
1430 	// delete the team if we're its main thread
1431 	if (deleteTeam) {
1432 		team_delete_team(team);
1433 
1434 		// we need to delete any death entry that made it to here
1435 		if (death != NULL)
1436 			delete death;
1437 
1438 		cachedDeathSem = -1;
1439 	}
1440 
1441 	state = disable_interrupts();
1442 	GRAB_THREAD_LOCK();
1443 
1444 	// remove thread from hash, so it's no longer accessible
1445 	hash_remove(sThreadHash, thread);
1446 	sUsedThreads--;
1447 
1448 	// Stop debugging for this thread
1449 	debugInfo = thread->debug_info;
1450 	clear_thread_debug_info(&thread->debug_info, true);
1451 
1452 	// Remove the select infos. We notify them a little later.
1453 	select_info* selectInfos = thread->select_infos;
1454 	thread->select_infos = NULL;
1455 
1456 	RELEASE_THREAD_LOCK();
1457 	restore_interrupts(state);
1458 
1459 	destroy_thread_debug_info(&debugInfo);
1460 
1461 	// notify select infos
1462 	select_info* info = selectInfos;
1463 	while (info != NULL) {
1464 		select_sync* sync = info->sync;
1465 
1466 		notify_select_events(info, B_EVENT_INVALID);
1467 		info = info->next;
1468 		put_select_sync(sync);
1469 	}
1470 
1471 	// shutdown the thread messaging
1472 
1473 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1474 	if (status == B_WOULD_BLOCK) {
1475 		// there is data waiting for us, so let us eat it
1476 		thread_id sender;
1477 
1478 		delete_sem(thread->msg.write_sem);
1479 			// first, let's remove all possibly waiting writers
1480 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1481 	} else {
1482 		// we probably own the semaphore here, and we're the last to do so
1483 		delete_sem(thread->msg.write_sem);
1484 	}
1485 	// now we can safely remove the msg.read_sem
1486 	delete_sem(thread->msg.read_sem);
1487 
1488 	// fill all death entries and delete the sem that others will use to wait on us
1489 	{
1490 		sem_id cachedExitSem = thread->exit.sem;
1491 		cpu_status state;
1492 
1493 		state = disable_interrupts();
1494 		GRAB_THREAD_LOCK();
1495 
1496 		// make sure no one will grab this semaphore again
1497 		thread->exit.sem = -1;
1498 
1499 		// fill all death entries
1500 		death_entry* entry = NULL;
1501 		while ((entry = (struct death_entry*)list_get_next_item(
1502 				&thread->exit.waiters, entry)) != NULL) {
1503 			entry->status = thread->exit.status;
1504 			entry->reason = thread->exit.reason;
1505 			entry->signal = thread->exit.signal;
1506 		}
1507 
1508 		RELEASE_THREAD_LOCK();
1509 		restore_interrupts(state);
1510 
1511 		delete_sem(cachedExitSem);
1512 	}
1513 
1514 	{
1515 		struct thread_exit_args args;
1516 
1517 		args.thread = thread;
1518 		args.old_kernel_stack = thread->kernel_stack_area;
1519 		args.death_stack = get_death_stack();
1520 		args.death_sem = cachedDeathSem;
1521 		args.original_team_id = teamID;
1522 
1523 
1524 		disable_interrupts();
1525 
1526 		// set the new kernel stack officially to the death stack, it won't be
1527 		// switched until the next function is called. This must be done now
1528 		// before a context switch, or we'll stay on the old stack
1529 		thread->kernel_stack_area = sDeathStacks[args.death_stack].area;
1530 		thread->kernel_stack_base = sDeathStacks[args.death_stack].address;
1531 
1532 		// we will continue in thread_exit2(), on the new stack
1533 		arch_thread_switch_kstack_and_call(thread, thread->kernel_stack_base
1534 			 + KERNEL_STACK_SIZE, thread_exit2, &args);
1535 	}
1536 
1537 	panic("never can get here\n");
1538 }
1539 
1540 
1541 struct thread *
1542 thread_get_thread_struct(thread_id id)
1543 {
1544 	struct thread *thread;
1545 	cpu_status state;
1546 
1547 	state = disable_interrupts();
1548 	GRAB_THREAD_LOCK();
1549 
1550 	thread = thread_get_thread_struct_locked(id);
1551 
1552 	RELEASE_THREAD_LOCK();
1553 	restore_interrupts(state);
1554 
1555 	return thread;
1556 }
1557 
1558 
1559 struct thread *
1560 thread_get_thread_struct_locked(thread_id id)
1561 {
1562 	struct thread_key key;
1563 
1564 	key.id = id;
1565 
1566 	return (struct thread*)hash_lookup(sThreadHash, &key);
1567 }
1568 
1569 
1570 /*!
1571 	Called in the interrupt handler code when a thread enters
1572 	the kernel for any reason.
1573 	Only tracks time for now.
1574 	Interrupts are disabled.
1575 */
1576 void
1577 thread_at_kernel_entry(bigtime_t now)
1578 {
1579 	struct thread *thread = thread_get_current_thread();
1580 
1581 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1582 
1583 	// track user time
1584 	thread->user_time += now - thread->last_time;
1585 	thread->last_time = now;
1586 
1587 	thread->in_kernel = true;
1588 }
1589 
1590 
1591 /*!
1592 	Called whenever a thread exits kernel space to user space.
1593 	Tracks time, handles signals, ...
1594 */
1595 void
1596 thread_at_kernel_exit(void)
1597 {
1598 	struct thread *thread = thread_get_current_thread();
1599 
1600 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1601 
1602 	while (handle_signals(thread)) {
1603 		InterruptsSpinLocker _(thread_spinlock);
1604 		scheduler_reschedule();
1605 	}
1606 
1607 	cpu_status state = disable_interrupts();
1608 
1609 	thread->in_kernel = false;
1610 
1611 	// track kernel time
1612 	bigtime_t now = system_time();
1613 	thread->kernel_time += now - thread->last_time;
1614 	thread->last_time = now;
1615 
1616 	restore_interrupts(state);
1617 }
1618 
1619 
1620 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1621 	and no debugging shall be done.
1622 	Interrupts are disabled in this case.
1623 */
1624 void
1625 thread_at_kernel_exit_no_signals(void)
1626 {
1627 	struct thread *thread = thread_get_current_thread();
1628 
1629 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1630 
1631 	thread->in_kernel = false;
1632 
1633 	// track kernel time
1634 	bigtime_t now = system_time();
1635 	thread->kernel_time += now - thread->last_time;
1636 	thread->last_time = now;
1637 }
1638 
1639 
1640 void
1641 thread_reset_for_exec(void)
1642 {
1643 	struct thread *thread = thread_get_current_thread();
1644 
1645 	cancel_timer(&thread->alarm);
1646 	reset_signals(thread);
1647 }
1648 
1649 
1650 /*! Insert a thread to the tail of a queue */
1651 void
1652 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1653 {
1654 	thread->queue_next = NULL;
1655 	if (queue->head == NULL) {
1656 		queue->head = thread;
1657 		queue->tail = thread;
1658 	} else {
1659 		queue->tail->queue_next = thread;
1660 		queue->tail = thread;
1661 	}
1662 }
1663 
1664 
1665 struct thread *
1666 thread_lookat_queue(struct thread_queue *queue)
1667 {
1668 	return queue->head;
1669 }
1670 
1671 
1672 struct thread *
1673 thread_dequeue(struct thread_queue *queue)
1674 {
1675 	struct thread *thread = queue->head;
1676 
1677 	if (thread != NULL) {
1678 		queue->head = thread->queue_next;
1679 		if (queue->tail == thread)
1680 			queue->tail = NULL;
1681 	}
1682 	return thread;
1683 }
1684 
1685 
1686 struct thread *
1687 thread_dequeue_id(struct thread_queue *q, thread_id id)
1688 {
1689 	struct thread *thread;
1690 	struct thread *last = NULL;
1691 
1692 	thread = q->head;
1693 	while (thread != NULL) {
1694 		if (thread->id == id) {
1695 			if (last == NULL)
1696 				q->head = thread->queue_next;
1697 			else
1698 				last->queue_next = thread->queue_next;
1699 
1700 			if (q->tail == thread)
1701 				q->tail = last;
1702 			break;
1703 		}
1704 		last = thread;
1705 		thread = thread->queue_next;
1706 	}
1707 	return thread;
1708 }
1709 
1710 
1711 thread_id
1712 allocate_thread_id(void)
1713 {
1714 	return atomic_add(&sNextThreadID, 1);
1715 }
1716 
1717 
1718 thread_id
1719 peek_next_thread_id(void)
1720 {
1721 	return atomic_get(&sNextThreadID);
1722 }
1723 
1724 
1725 /*!	Yield the CPU to other threads.
1726 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1727 	If \c false, it will continue to run, if there's no other thread in ready
1728 	state, and if it has a higher priority than the other ready threads, it
1729 	still has a good chance to continue.
1730 */
1731 void
1732 thread_yield(bool force)
1733 {
1734 	if (force) {
1735 		// snooze for roughly 3 thread quantums
1736 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1737 #if 0
1738 		cpu_status state;
1739 
1740 		struct thread *thread = thread_get_current_thread();
1741 		if (thread == NULL)
1742 			return;
1743 
1744 		state = disable_interrupts();
1745 		GRAB_THREAD_LOCK();
1746 
1747 		// mark the thread as yielded, so it will not be scheduled next
1748 		//thread->was_yielded = true;
1749 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1750 		scheduler_reschedule();
1751 
1752 		RELEASE_THREAD_LOCK();
1753 		restore_interrupts(state);
1754 #endif
1755 	} else {
1756 		struct thread *thread = thread_get_current_thread();
1757 		if (thread == NULL)
1758 			return;
1759 
1760 		// Don't force the thread off the CPU, just reschedule.
1761 		InterruptsSpinLocker _(thread_spinlock);
1762 		scheduler_reschedule();
1763 	}
1764 }
1765 
1766 
1767 /*!
1768 	Kernel private thread creation function.
1769 
1770 	\param threadID The ID to be assigned to the new thread. If
1771 		  \code < 0 \endcode a fresh one is allocated.
1772 */
1773 thread_id
1774 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1775 	void *arg, team_id team, thread_id threadID)
1776 {
1777 	return create_thread(name, team, (thread_entry_func)function, arg, NULL,
1778 		priority, true, threadID);
1779 }
1780 
1781 
1782 status_t
1783 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1784 	status_t *_returnCode)
1785 {
1786 	sem_id exitSem = B_BAD_THREAD_ID;
1787 	struct death_entry death;
1788 	job_control_entry* freeDeath = NULL;
1789 	struct thread *thread;
1790 	cpu_status state;
1791 	status_t status = B_OK;
1792 
1793 	if (id < B_OK)
1794 		return B_BAD_THREAD_ID;
1795 
1796 	// we need to resume the thread we're waiting for first
1797 
1798 	state = disable_interrupts();
1799 	GRAB_THREAD_LOCK();
1800 
1801 	thread = thread_get_thread_struct_locked(id);
1802 	if (thread != NULL) {
1803 		// remember the semaphore we have to wait on and place our death entry
1804 		exitSem = thread->exit.sem;
1805 		list_add_link_to_head(&thread->exit.waiters, &death);
1806 	}
1807 
1808 	death_entry* threadDeathEntry = NULL;
1809 
1810 	RELEASE_THREAD_LOCK();
1811 
1812 	if (thread == NULL) {
1813 		// we couldn't find this thread - maybe it's already gone, and we'll
1814 		// find its death entry in our team
1815 		GRAB_TEAM_LOCK();
1816 
1817 		struct team* team = thread_get_current_thread()->team;
1818 
1819 		// check the child death entries first (i.e. main threads of child
1820 		// teams)
1821 		bool deleteEntry;
1822 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1823 		if (freeDeath != NULL) {
1824 			death.status = freeDeath->status;
1825 			if (!deleteEntry)
1826 				freeDeath = NULL;
1827 		} else {
1828 			// check the thread death entries of the team (non-main threads)
1829 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1830 					&team->dead_threads, threadDeathEntry)) != NULL) {
1831 				if (threadDeathEntry->thread == id) {
1832 					list_remove_item(&team->dead_threads, threadDeathEntry);
1833 					team->dead_threads_count--;
1834 					death.status = threadDeathEntry->status;
1835 					break;
1836 				}
1837 			}
1838 
1839 			if (threadDeathEntry == NULL)
1840 				status = B_BAD_THREAD_ID;
1841 		}
1842 
1843 		RELEASE_TEAM_LOCK();
1844 	}
1845 
1846 	restore_interrupts(state);
1847 
1848 	if (thread == NULL && status == B_OK) {
1849 		// we found the thread's death entry in our team
1850 		if (_returnCode)
1851 			*_returnCode = death.status;
1852 
1853 		delete freeDeath;
1854 		free(threadDeathEntry);
1855 		return B_OK;
1856 	}
1857 
1858 	// we need to wait for the death of the thread
1859 
1860 	if (exitSem < B_OK)
1861 		return B_BAD_THREAD_ID;
1862 
1863 	resume_thread(id);
1864 		// make sure we don't wait forever on a suspended thread
1865 
1866 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1867 
1868 	if (status == B_OK) {
1869 		// this should never happen as the thread deletes the semaphore on exit
1870 		panic("could acquire exit_sem for thread %ld\n", id);
1871 	} else if (status == B_BAD_SEM_ID) {
1872 		// this is the way the thread normally exits
1873 		status = B_OK;
1874 
1875 		if (_returnCode)
1876 			*_returnCode = death.status;
1877 	} else {
1878 		// We were probably interrupted; we need to remove our death entry now.
1879 		state = disable_interrupts();
1880 		GRAB_THREAD_LOCK();
1881 
1882 		thread = thread_get_thread_struct_locked(id);
1883 		if (thread != NULL)
1884 			list_remove_link(&death);
1885 
1886 		RELEASE_THREAD_LOCK();
1887 		restore_interrupts(state);
1888 
1889 		// If the thread is already gone, we need to wait for its exit semaphore
1890 		// to make sure our death entry stays valid - it won't take long
1891 		if (thread == NULL)
1892 			acquire_sem(exitSem);
1893 	}
1894 
1895 	return status;
1896 }
1897 
1898 
1899 status_t
1900 select_thread(int32 id, struct select_info* info, bool kernel)
1901 {
1902 	InterruptsSpinLocker locker(thread_spinlock);
1903 
1904 	// get thread
1905 	struct thread* thread = thread_get_thread_struct_locked(id);
1906 	if (thread == NULL)
1907 		return B_BAD_THREAD_ID;
1908 
1909 	// We support only B_EVENT_INVALID at the moment.
1910 	info->selected_events &= B_EVENT_INVALID;
1911 
1912 	// add info to list
1913 	if (info->selected_events != 0) {
1914 		info->next = thread->select_infos;
1915 		thread->select_infos = info;
1916 
1917 		// we need a sync reference
1918 		atomic_add(&info->sync->ref_count, 1);
1919 	}
1920 
1921 	return B_OK;
1922 }
1923 
1924 
1925 status_t
1926 deselect_thread(int32 id, struct select_info* info, bool kernel)
1927 {
1928 	InterruptsSpinLocker locker(thread_spinlock);
1929 
1930 	// get thread
1931 	struct thread* thread = thread_get_thread_struct_locked(id);
1932 	if (thread == NULL)
1933 		return B_BAD_THREAD_ID;
1934 
1935 	// remove info from list
1936 	select_info** infoLocation = &thread->select_infos;
1937 	while (*infoLocation != NULL && *infoLocation != info)
1938 		infoLocation = &(*infoLocation)->next;
1939 
1940 	if (*infoLocation != info)
1941 		return B_OK;
1942 
1943 	*infoLocation = info->next;
1944 
1945 	locker.Unlock();
1946 
1947 	// surrender sync reference
1948 	put_select_sync(info->sync);
1949 
1950 	return B_OK;
1951 }
1952 
1953 
1954 int32
1955 thread_max_threads(void)
1956 {
1957 	return sMaxThreads;
1958 }
1959 
1960 
1961 int32
1962 thread_used_threads(void)
1963 {
1964 	return sUsedThreads;
1965 }
1966 
1967 
1968 status_t
1969 thread_init(kernel_args *args)
1970 {
1971 	uint32 i;
1972 
1973 	TRACE(("thread_init: entry\n"));
1974 
1975 	// create the thread hash table
1976 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
1977 		&thread_struct_compare, &thread_struct_hash);
1978 
1979 	// zero out the dead thread structure q
1980 	memset(&dead_q, 0, sizeof(dead_q));
1981 
1982 	if (arch_thread_init(args) < B_OK)
1983 		panic("arch_thread_init() failed!\n");
1984 
1985 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
1986 	sNextThreadID = B_SYSTEM_TEAM + 1;
1987 
1988 	// create an idle thread for each cpu
1989 
1990 	for (i = 0; i < args->num_cpus; i++) {
1991 		struct thread *thread;
1992 		area_info info;
1993 		char name[64];
1994 
1995 		sprintf(name, "idle thread %lu", i + 1);
1996 		thread = create_thread_struct(&sIdleThreads[i], name,
1997 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
1998 		if (thread == NULL) {
1999 			panic("error creating idle thread struct\n");
2000 			return B_NO_MEMORY;
2001 		}
2002 
2003 		thread->team = team_get_kernel_team();
2004 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
2005 		thread->state = B_THREAD_RUNNING;
2006 		thread->next_state = B_THREAD_READY;
2007 		sprintf(name, "idle thread %lu kstack", i + 1);
2008 		thread->kernel_stack_area = find_area(name);
2009 		thread->entry = NULL;
2010 
2011 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2012 			panic("error finding idle kstack area\n");
2013 
2014 		thread->kernel_stack_base = (addr_t)info.address;
2015 
2016 		hash_insert(sThreadHash, thread);
2017 		insert_thread_into_team(thread->team, thread);
2018 	}
2019 	sUsedThreads = args->num_cpus;
2020 
2021 	// create a set of death stacks
2022 
2023 	sNumDeathStacks = smp_get_num_cpus();
2024 	if (sNumDeathStacks > 8 * sizeof(sDeathStackBitmap)) {
2025 		// clamp values for really beefy machines
2026 		sNumDeathStacks = 8 * sizeof(sDeathStackBitmap);
2027 	}
2028 	sDeathStackBitmap = 0;
2029 	sDeathStacks = (struct death_stack *)malloc(sNumDeathStacks
2030 		* sizeof(struct death_stack));
2031 	if (sDeathStacks == NULL) {
2032 		panic("error creating death stacks\n");
2033 		return B_NO_MEMORY;
2034 	}
2035 	{
2036 		char temp[64];
2037 
2038 		for (i = 0; i < sNumDeathStacks; i++) {
2039 			sprintf(temp, "death stack %lu", i);
2040 			sDeathStacks[i].area = create_area(temp,
2041 				(void **)&sDeathStacks[i].address, B_ANY_KERNEL_ADDRESS,
2042 				KERNEL_STACK_SIZE, B_FULL_LOCK,
2043 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
2044 			if (sDeathStacks[i].area < 0) {
2045 				panic("error creating death stacks\n");
2046 				return sDeathStacks[i].area;
2047 			}
2048 			sDeathStacks[i].in_use = false;
2049 		}
2050 	}
2051 	sDeathStackSem = create_sem(sNumDeathStacks, "death stack availability");
2052 
2053 	// set up some debugger commands
2054 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2055 		"[ <team> ]\n"
2056 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2057 		"all threads of the specified team.\n"
2058 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2059 	add_debugger_command_etc("ready", &dump_thread_list,
2060 		"List all ready threads",
2061 		"\n"
2062 		"Prints a list of all threads in ready state.\n", 0);
2063 	add_debugger_command_etc("running", &dump_thread_list,
2064 		"List all running threads",
2065 		"\n"
2066 		"Prints a list of all threads in running state.\n", 0);
2067 	add_debugger_command_etc("waiting", &dump_thread_list,
2068 		"List all waiting threads (optionally for a specific semaphore)",
2069 		"[ <sem> ]\n"
2070 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2071 		"specified, only the threads waiting on that semaphore are listed.\n"
2072 		"  <sem>  - ID of the semaphore.\n", 0);
2073 	add_debugger_command_etc("realtime", &dump_thread_list,
2074 		"List all realtime threads",
2075 		"\n"
2076 		"Prints a list of all threads with realtime priority.\n", 0);
2077 	add_debugger_command_etc("thread", &dump_thread_info,
2078 		"Dump info about a particular thread",
2079 		"[ <id> | <address> | <name> ]\n"
2080 		"Prints information about the specified thread. If no argument is\n"
2081 		"given the current thread is selected.\n"
2082 		"  <id>       - The ID of the thread.\n"
2083 		"  <address>  - The address of the thread structure.\n"
2084 		"  <name>     - The thread's name.\n", 0);
2085 	add_debugger_command_etc("calling", &dump_thread_list,
2086 		"Show all threads that have a specific address in their call chain",
2087 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2088 	add_debugger_command_etc("unreal", &make_thread_unreal,
2089 		"Set realtime priority threads to normal priority",
2090 		"[ <id> ]\n"
2091 		"Sets the priority of all realtime threads or, if given, the one\n"
2092 		"with the specified ID to \"normal\" priority.\n"
2093 		"  <id>  - The ID of the thread.\n", 0);
2094 	add_debugger_command_etc("suspend", &make_thread_suspended,
2095 		"Suspend a thread",
2096 		"[ <id> ]\n"
2097 		"Suspends the thread with the given ID. If no ID argument is given\n"
2098 		"the current thread is selected.\n"
2099 		"  <id>  - The ID of the thread.\n", 0);
2100 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2101 		"<id>\n"
2102 		"Resumes the specified thread, if it is currently suspended.\n"
2103 		"  <id>  - The ID of the thread.\n", 0);
2104 	add_debugger_command_etc("drop", &drop_into_debugger,
2105 		"Drop a thread into the userland debugger",
2106 		"<id>\n"
2107 		"Drops the specified (userland) thread into the userland debugger\n"
2108 		"after leaving the kernel debugger.\n"
2109 		"  <id>  - The ID of the thread.\n", 0);
2110 	add_debugger_command_etc("priority", &set_thread_prio,
2111 		"Set a thread's priority",
2112 		"<priority> [ <id> ]\n"
2113 		"Sets the priority of the thread with the specified ID to the given\n"
2114 		"priority. If no thread ID is given, the current thread is selected.\n"
2115 		"  <priority>  - The thread's new priority (0 - 120)\n"
2116 		"  <id>        - The ID of the thread.\n", 0);
2117 
2118 	return B_OK;
2119 }
2120 
2121 
2122 status_t
2123 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2124 {
2125 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2126 	// so that get_current_cpu and friends will work, which is crucial for
2127 	// a lot of low level routines
2128 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2129 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2130 	return B_OK;
2131 }
2132 
2133 
2134 //	#pragma mark - thread blocking API
2135 
2136 
2137 static status_t
2138 thread_block_timeout(timer* timer)
2139 {
2140 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2141 	// we're holding the thread lock already. This makes things comfortably
2142 	// easy.
2143 
2144 	struct thread* thread = (struct thread*)timer->user_data;
2145 	if (thread_unblock_locked(thread, B_TIMED_OUT))
2146 		return B_INVOKE_SCHEDULER;
2147 
2148 	return B_HANDLED_INTERRUPT;
2149 }
2150 
2151 
2152 status_t
2153 thread_block()
2154 {
2155 	InterruptsSpinLocker _(thread_spinlock);
2156 	return thread_block_locked(thread_get_current_thread());
2157 }
2158 
2159 
2160 bool
2161 thread_unblock(status_t threadID, status_t status)
2162 {
2163 	InterruptsSpinLocker _(thread_spinlock);
2164 
2165 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2166 	if (thread == NULL)
2167 		return false;
2168 	return thread_unblock_locked(thread, status);
2169 }
2170 
2171 
2172 status_t
2173 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2174 {
2175 	InterruptsSpinLocker _(thread_spinlock);
2176 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2177 }
2178 
2179 
2180 status_t
2181 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2182 {
2183 	struct thread* thread = thread_get_current_thread();
2184 
2185 	if (thread->wait.status != 1)
2186 		return thread->wait.status;
2187 
2188 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2189 		&& timeout != B_INFINITE_TIMEOUT;
2190 
2191 	if (useTimer) {
2192 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2193 		// avoids nasty race conditions and deadlock problems that could
2194 		// otherwise occur between our cancel_timer() and a concurrently
2195 		// executing thread_block_timeout().
2196 		uint32 timerFlags = (timeoutFlags & B_RELATIVE_TIMEOUT)
2197 			? B_ONE_SHOT_RELATIVE_TIMER : B_ONE_SHOT_ABSOLUTE_TIMER;
2198 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2199 
2200 		// install the timer
2201 		thread->wait.unblock_timer.user_data = thread;
2202 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2203 			timerFlags);
2204 	}
2205 
2206 	// block
2207 	status_t error = thread_block_locked(thread);
2208 
2209 	// cancel timer, if it didn't fire
2210 	if (error != B_TIMED_OUT && useTimer)
2211 		cancel_timer(&thread->wait.unblock_timer);
2212 
2213 	return error;
2214 }
2215 
2216 
2217 //	#pragma mark - public kernel API
2218 
2219 
2220 void
2221 exit_thread(status_t returnValue)
2222 {
2223 	struct thread *thread = thread_get_current_thread();
2224 
2225 	thread->exit.status = returnValue;
2226 	thread->exit.reason = THREAD_RETURN_EXIT;
2227 
2228 	// if called from a kernel thread, we don't deliver the signal,
2229 	// we just exit directly to keep the user space behaviour of
2230 	// this function
2231 	if (thread->team != team_get_kernel_team())
2232 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2233 	else
2234 		thread_exit();
2235 }
2236 
2237 
2238 status_t
2239 kill_thread(thread_id id)
2240 {
2241 	if (id <= 0)
2242 		return B_BAD_VALUE;
2243 
2244 	return send_signal(id, SIGKILLTHR);
2245 }
2246 
2247 
2248 status_t
2249 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2250 {
2251 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2252 }
2253 
2254 
2255 int32
2256 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2257 {
2258 	return receive_data_etc(sender, buffer, bufferSize, 0);
2259 }
2260 
2261 
2262 bool
2263 has_data(thread_id thread)
2264 {
2265 	int32 count;
2266 
2267 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2268 			&count) != B_OK)
2269 		return false;
2270 
2271 	return count == 0 ? false : true;
2272 }
2273 
2274 
2275 status_t
2276 _get_thread_info(thread_id id, thread_info *info, size_t size)
2277 {
2278 	status_t status = B_OK;
2279 	struct thread *thread;
2280 	cpu_status state;
2281 
2282 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2283 		return B_BAD_VALUE;
2284 
2285 	state = disable_interrupts();
2286 	GRAB_THREAD_LOCK();
2287 
2288 	thread = thread_get_thread_struct_locked(id);
2289 	if (thread == NULL) {
2290 		status = B_BAD_VALUE;
2291 		goto err;
2292 	}
2293 
2294 	fill_thread_info(thread, info, size);
2295 
2296 err:
2297 	RELEASE_THREAD_LOCK();
2298 	restore_interrupts(state);
2299 
2300 	return status;
2301 }
2302 
2303 
2304 status_t
2305 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2306 	size_t size)
2307 {
2308 	status_t status = B_BAD_VALUE;
2309 	struct thread *thread = NULL;
2310 	cpu_status state;
2311 	int slot;
2312 	thread_id lastThreadID;
2313 
2314 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2315 		return B_BAD_VALUE;
2316 
2317 	if (team == B_CURRENT_TEAM)
2318 		team = team_get_current_team_id();
2319 	else if (!team_is_valid(team))
2320 		return B_BAD_VALUE;
2321 
2322 	slot = *_cookie;
2323 
2324 	state = disable_interrupts();
2325 	GRAB_THREAD_LOCK();
2326 
2327 	lastThreadID = peek_next_thread_id();
2328 	if (slot >= lastThreadID)
2329 		goto err;
2330 
2331 	while (slot < lastThreadID
2332 		&& (!(thread = thread_get_thread_struct_locked(slot))
2333 			|| thread->team->id != team))
2334 		slot++;
2335 
2336 	if (thread != NULL && thread->team->id == team) {
2337 		fill_thread_info(thread, info, size);
2338 
2339 		*_cookie = slot + 1;
2340 		status = B_OK;
2341 	}
2342 
2343 err:
2344 	RELEASE_THREAD_LOCK();
2345 	restore_interrupts(state);
2346 
2347 	return status;
2348 }
2349 
2350 
2351 thread_id
2352 find_thread(const char *name)
2353 {
2354 	struct hash_iterator iterator;
2355 	struct thread *thread;
2356 	cpu_status state;
2357 
2358 	if (name == NULL)
2359 		return thread_get_current_thread_id();
2360 
2361 	state = disable_interrupts();
2362 	GRAB_THREAD_LOCK();
2363 
2364 	// ToDo: this might not be in the same order as find_thread() in BeOS
2365 	//		which could be theoretically problematic.
2366 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2367 	//		cheap either - although this function is probably used very rarely.
2368 
2369 	hash_open(sThreadHash, &iterator);
2370 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2371 			!= NULL) {
2372 		// Search through hash
2373 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2374 			thread_id id = thread->id;
2375 
2376 			RELEASE_THREAD_LOCK();
2377 			restore_interrupts(state);
2378 			return id;
2379 		}
2380 	}
2381 
2382 	RELEASE_THREAD_LOCK();
2383 	restore_interrupts(state);
2384 
2385 	return B_NAME_NOT_FOUND;
2386 }
2387 
2388 
2389 status_t
2390 rename_thread(thread_id id, const char *name)
2391 {
2392 	struct thread *thread = thread_get_current_thread();
2393 	status_t status = B_BAD_THREAD_ID;
2394 	cpu_status state;
2395 
2396 	if (name == NULL)
2397 		return B_BAD_VALUE;
2398 
2399 	state = disable_interrupts();
2400 	GRAB_THREAD_LOCK();
2401 
2402 	if (thread->id != id)
2403 		thread = thread_get_thread_struct_locked(id);
2404 
2405 	if (thread != NULL) {
2406 		if (thread->team == thread_get_current_thread()->team) {
2407 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2408 			status = B_OK;
2409 		} else
2410 			status = B_NOT_ALLOWED;
2411 	}
2412 
2413 	RELEASE_THREAD_LOCK();
2414 	restore_interrupts(state);
2415 
2416 	return status;
2417 }
2418 
2419 
2420 status_t
2421 set_thread_priority(thread_id id, int32 priority)
2422 {
2423 	struct thread *thread;
2424 	int32 oldPriority;
2425 
2426 	// make sure the passed in priority is within bounds
2427 	if (priority > B_MAX_PRIORITY)
2428 		priority = B_MAX_PRIORITY;
2429 	if (priority < B_MIN_PRIORITY)
2430 		priority = B_MIN_PRIORITY;
2431 
2432 	thread = thread_get_current_thread();
2433 	if (thread->id == id) {
2434 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2435 		// our structure directly
2436 		oldPriority = thread->priority;
2437 			// note that this might not return the correct value if we are preempted
2438 			// here, and another thread changes our priority before the next line is
2439 			// executed
2440 		thread->priority = thread->next_priority = priority;
2441 	} else {
2442 		cpu_status state = disable_interrupts();
2443 		GRAB_THREAD_LOCK();
2444 
2445 		thread = thread_get_thread_struct_locked(id);
2446 		if (thread) {
2447 			oldPriority = thread->priority;
2448 			thread->next_priority = priority;
2449 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2450 				// if the thread is in the run queue, we reinsert it at a new position
2451 				scheduler_remove_from_run_queue(thread);
2452 				thread->priority = priority;
2453 				scheduler_enqueue_in_run_queue(thread);
2454 			} else
2455 				thread->priority = priority;
2456 		} else
2457 			oldPriority = B_BAD_THREAD_ID;
2458 
2459 		RELEASE_THREAD_LOCK();
2460 		restore_interrupts(state);
2461 	}
2462 
2463 	return oldPriority;
2464 }
2465 
2466 
2467 status_t
2468 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2469 {
2470 	status_t status;
2471 
2472 	if (timebase != B_SYSTEM_TIMEBASE)
2473 		return B_BAD_VALUE;
2474 
2475 	InterruptsSpinLocker _(thread_spinlock);
2476 	struct thread* thread = thread_get_current_thread();
2477 
2478 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2479 	status = thread_block_with_timeout_locked(flags, timeout);
2480 
2481 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2482 		return B_OK;
2483 
2484 	return status;
2485 }
2486 
2487 
2488 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2489 status_t
2490 snooze(bigtime_t timeout)
2491 {
2492 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2493 }
2494 
2495 
2496 /*!
2497 	snooze_until() for internal kernel use only; doesn't interrupt on
2498 	signals.
2499 */
2500 status_t
2501 snooze_until(bigtime_t timeout, int timebase)
2502 {
2503 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2504 }
2505 
2506 
2507 status_t
2508 wait_for_thread(thread_id thread, status_t *_returnCode)
2509 {
2510 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2511 }
2512 
2513 
2514 status_t
2515 suspend_thread(thread_id id)
2516 {
2517 	if (id <= 0)
2518 		return B_BAD_VALUE;
2519 
2520 	return send_signal(id, SIGSTOP);
2521 }
2522 
2523 
2524 status_t
2525 resume_thread(thread_id id)
2526 {
2527 	if (id <= 0)
2528 		return B_BAD_VALUE;
2529 
2530 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2531 		// This retains compatibility to BeOS which documents the
2532 		// combination of suspend_thread() and resume_thread() to
2533 		// interrupt threads waiting on semaphores.
2534 }
2535 
2536 
2537 thread_id
2538 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2539 	void *arg)
2540 {
2541 	return create_thread(name, team_get_kernel_team()->id,
2542 		(thread_entry_func)function, arg, NULL, priority, true, -1);
2543 }
2544 
2545 
2546 int
2547 getrlimit(int resource, struct rlimit * rlp)
2548 {
2549 	status_t error = common_getrlimit(resource, rlp);
2550 	if (error != B_OK) {
2551 		errno = error;
2552 		return -1;
2553 	}
2554 
2555 	return 0;
2556 }
2557 
2558 
2559 int
2560 setrlimit(int resource, const struct rlimit * rlp)
2561 {
2562 	status_t error = common_setrlimit(resource, rlp);
2563 	if (error != B_OK) {
2564 		errno = error;
2565 		return -1;
2566 	}
2567 
2568 	return 0;
2569 }
2570 
2571 
2572 //	#pragma mark - syscalls
2573 
2574 
2575 void
2576 _user_exit_thread(status_t returnValue)
2577 {
2578 	exit_thread(returnValue);
2579 }
2580 
2581 
2582 status_t
2583 _user_kill_thread(thread_id thread)
2584 {
2585 	return kill_thread(thread);
2586 }
2587 
2588 
2589 status_t
2590 _user_resume_thread(thread_id thread)
2591 {
2592 	return resume_thread(thread);
2593 }
2594 
2595 
2596 status_t
2597 _user_suspend_thread(thread_id thread)
2598 {
2599 	return suspend_thread(thread);
2600 }
2601 
2602 
2603 status_t
2604 _user_rename_thread(thread_id thread, const char *userName)
2605 {
2606 	char name[B_OS_NAME_LENGTH];
2607 
2608 	if (!IS_USER_ADDRESS(userName)
2609 		|| userName == NULL
2610 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2611 		return B_BAD_ADDRESS;
2612 
2613 	return rename_thread(thread, name);
2614 }
2615 
2616 
2617 int32
2618 _user_set_thread_priority(thread_id thread, int32 newPriority)
2619 {
2620 	return set_thread_priority(thread, newPriority);
2621 }
2622 
2623 
2624 thread_id
2625 _user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
2626 	int32 priority, void *data1, void *data2)
2627 {
2628 	char name[B_OS_NAME_LENGTH];
2629 	thread_id threadID;
2630 
2631 	if (!IS_USER_ADDRESS(entry) || entry == NULL
2632 		|| (userName != NULL && (!IS_USER_ADDRESS(userName)
2633 			|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)))
2634 		return B_BAD_ADDRESS;
2635 
2636 	threadID = create_thread(userName != NULL ? name : "user thread",
2637 		thread_get_current_thread()->team->id, entry,
2638 		data1, data2, priority, false, -1);
2639 
2640 	user_debug_thread_created(threadID);
2641 
2642 	return threadID;
2643 }
2644 
2645 
2646 status_t
2647 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2648 {
2649 	// NOTE: We only know the system timebase at the moment.
2650 	syscall_restart_handle_timeout_pre(flags, timeout);
2651 
2652 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2653 
2654 	return syscall_restart_handle_timeout_post(error, timeout);
2655 }
2656 
2657 
2658 void
2659 _user_thread_yield(void)
2660 {
2661 	thread_yield(true);
2662 }
2663 
2664 
2665 status_t
2666 _user_get_thread_info(thread_id id, thread_info *userInfo)
2667 {
2668 	thread_info info;
2669 	status_t status;
2670 
2671 	if (!IS_USER_ADDRESS(userInfo))
2672 		return B_BAD_ADDRESS;
2673 
2674 	status = _get_thread_info(id, &info, sizeof(thread_info));
2675 
2676 	if (status >= B_OK
2677 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2678 		return B_BAD_ADDRESS;
2679 
2680 	return status;
2681 }
2682 
2683 
2684 status_t
2685 _user_get_next_thread_info(team_id team, int32 *userCookie,
2686 	thread_info *userInfo)
2687 {
2688 	status_t status;
2689 	thread_info info;
2690 	int32 cookie;
2691 
2692 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2693 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2694 		return B_BAD_ADDRESS;
2695 
2696 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2697 	if (status < B_OK)
2698 		return status;
2699 
2700 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2701 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2702 		return B_BAD_ADDRESS;
2703 
2704 	return status;
2705 }
2706 
2707 
2708 thread_id
2709 _user_find_thread(const char *userName)
2710 {
2711 	char name[B_OS_NAME_LENGTH];
2712 
2713 	if (userName == NULL)
2714 		return find_thread(NULL);
2715 
2716 	if (!IS_USER_ADDRESS(userName)
2717 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2718 		return B_BAD_ADDRESS;
2719 
2720 	return find_thread(name);
2721 }
2722 
2723 
2724 status_t
2725 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2726 {
2727 	status_t returnCode;
2728 	status_t status;
2729 
2730 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2731 		return B_BAD_ADDRESS;
2732 
2733 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2734 
2735 	if (status == B_OK && userReturnCode != NULL
2736 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2737 		return B_BAD_ADDRESS;
2738 	}
2739 
2740 	return syscall_restart_handle_post(status);
2741 }
2742 
2743 
2744 bool
2745 _user_has_data(thread_id thread)
2746 {
2747 	return has_data(thread);
2748 }
2749 
2750 
2751 status_t
2752 _user_send_data(thread_id thread, int32 code, const void *buffer,
2753 	size_t bufferSize)
2754 {
2755 	if (!IS_USER_ADDRESS(buffer))
2756 		return B_BAD_ADDRESS;
2757 
2758 	return send_data_etc(thread, code, buffer, bufferSize,
2759 		B_KILL_CAN_INTERRUPT);
2760 		// supports userland buffers
2761 }
2762 
2763 
2764 status_t
2765 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2766 {
2767 	thread_id sender;
2768 	status_t code;
2769 
2770 	if (!IS_USER_ADDRESS(_userSender)
2771 		|| !IS_USER_ADDRESS(buffer))
2772 		return B_BAD_ADDRESS;
2773 
2774 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2775 		// supports userland buffers
2776 
2777 	if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2778 		return B_BAD_ADDRESS;
2779 
2780 	return code;
2781 }
2782 
2783 
2784 // ToDo: the following two functions don't belong here
2785 
2786 
2787 int
2788 _user_getrlimit(int resource, struct rlimit *urlp)
2789 {
2790 	struct rlimit rl;
2791 	int ret;
2792 
2793 	if (urlp == NULL)
2794 		return EINVAL;
2795 
2796 	if (!IS_USER_ADDRESS(urlp))
2797 		return B_BAD_ADDRESS;
2798 
2799 	ret = common_getrlimit(resource, &rl);
2800 
2801 	if (ret == 0) {
2802 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2803 		if (ret < 0)
2804 			return ret;
2805 
2806 		return 0;
2807 	}
2808 
2809 	return ret;
2810 }
2811 
2812 
2813 int
2814 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2815 {
2816 	struct rlimit resourceLimit;
2817 
2818 	if (userResourceLimit == NULL)
2819 		return EINVAL;
2820 
2821 	if (!IS_USER_ADDRESS(userResourceLimit)
2822 		|| user_memcpy(&resourceLimit, userResourceLimit,
2823 			sizeof(struct rlimit)) < B_OK)
2824 		return B_BAD_ADDRESS;
2825 
2826 	return common_setrlimit(resource, &resourceLimit);
2827 }
2828 
2829