xref: /haiku/src/system/kernel/thread.cpp (revision 020cbad9d40235a2c50a81a42d69912a5ff8fbc4)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/resource.h>
18 
19 #include <OS.h>
20 
21 #include <util/AutoLock.h>
22 #include <util/khash.h>
23 
24 #include <boot/kernel_args.h>
25 #include <condition_variable.h>
26 #include <cpu.h>
27 #include <int.h>
28 #include <kimage.h>
29 #include <kscheduler.h>
30 #include <ksignal.h>
31 #include <smp.h>
32 #include <syscalls.h>
33 #include <team.h>
34 #include <tls.h>
35 #include <user_runtime.h>
36 #include <vfs.h>
37 #include <vm.h>
38 #include <vm_address_space.h>
39 #include <wait_for_objects.h>
40 
41 
42 //#define TRACE_THREAD
43 #ifdef TRACE_THREAD
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 #define THREAD_MAX_MESSAGE_SIZE		65536
51 
52 // used to pass messages between thread_exit and thread_exit2
53 
54 struct thread_exit_args {
55 	struct thread	*thread;
56 	area_id			old_kernel_stack;
57 	uint32			death_stack;
58 	sem_id			death_sem;
59 	team_id			original_team_id;
60 };
61 
62 struct thread_key {
63 	thread_id id;
64 };
65 
66 // global
67 spinlock thread_spinlock = 0;
68 
69 // thread list
70 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
71 static hash_table *sThreadHash = NULL;
72 static thread_id sNextThreadID = 1;
73 
74 // some arbitrary chosen limits - should probably depend on the available
75 // memory (the limit is not yet enforced)
76 static int32 sMaxThreads = 4096;
77 static int32 sUsedThreads = 0;
78 
79 static sem_id sSnoozeSem = -1;
80 
81 // death stacks - used temporarily as a thread cleans itself up
82 struct death_stack {
83 	area_id	area;
84 	addr_t	address;
85 	bool	in_use;
86 };
87 static struct death_stack *sDeathStacks;
88 static unsigned int sNumDeathStacks;
89 static unsigned int volatile sDeathStackBitmap;
90 static sem_id sDeathStackSem;
91 static spinlock sDeathStackLock = 0;
92 
93 // The dead queue is used as a pool from which to retrieve and reuse previously
94 // allocated thread structs when creating a new thread. It should be gone once
95 // the slab allocator is in.
96 struct thread_queue dead_q;
97 
98 static void thread_kthread_entry(void);
99 static void thread_kthread_exit(void);
100 
101 
102 /*!
103 	Inserts a thread into a team.
104 	You must hold the team lock when you call this function.
105 */
106 static void
107 insert_thread_into_team(struct team *team, struct thread *thread)
108 {
109 	thread->team_next = team->thread_list;
110 	team->thread_list = thread;
111 	team->num_threads++;
112 
113 	if (team->num_threads == 1) {
114 		// this was the first thread
115 		team->main_thread = thread;
116 	}
117 	thread->team = team;
118 }
119 
120 
121 /*!
122 	Removes a thread from a team.
123 	You must hold the team lock when you call this function.
124 */
125 static void
126 remove_thread_from_team(struct team *team, struct thread *thread)
127 {
128 	struct thread *temp, *last = NULL;
129 
130 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
131 		if (temp == thread) {
132 			if (last == NULL)
133 				team->thread_list = temp->team_next;
134 			else
135 				last->team_next = temp->team_next;
136 
137 			team->num_threads--;
138 			break;
139 		}
140 		last = temp;
141 	}
142 }
143 
144 
145 static int
146 thread_struct_compare(void *_t, const void *_key)
147 {
148 	struct thread *thread = (struct thread*)_t;
149 	const struct thread_key *key = (const struct thread_key*)_key;
150 
151 	if (thread->id == key->id)
152 		return 0;
153 
154 	return 1;
155 }
156 
157 
158 static uint32
159 thread_struct_hash(void *_t, const void *_key, uint32 range)
160 {
161 	struct thread *thread = (struct thread*)_t;
162 	const struct thread_key *key = (const struct thread_key*)_key;
163 
164 	if (thread != NULL)
165 		return thread->id % range;
166 
167 	return (uint32)key->id % range;
168 }
169 
170 
171 static void
172 reset_signals(struct thread *thread)
173 {
174 	thread->sig_pending = 0;
175 	thread->sig_block_mask = 0;
176 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
177 	thread->signal_stack_base = 0;
178 	thread->signal_stack_size = 0;
179 	thread->signal_stack_enabled = false;
180 }
181 
182 
183 /*!
184 	Allocates and fills in thread structure (or reuses one from the
185 	dead queue).
186 
187 	\param threadID The ID to be assigned to the new thread. If
188 		  \code < 0 \endcode a fresh one is allocated.
189 	\param thread initialize this thread struct if nonnull
190 */
191 
192 static struct thread *
193 create_thread_struct(struct thread *inthread, const char *name,
194 	thread_id threadID, struct cpu_ent *cpu)
195 {
196 	struct thread *thread;
197 	cpu_status state;
198 	char temp[64];
199 
200 	if (inthread == NULL) {
201 		// try to recycle one from the dead queue first
202 		state = disable_interrupts();
203 		GRAB_THREAD_LOCK();
204 		thread = thread_dequeue(&dead_q);
205 		RELEASE_THREAD_LOCK();
206 		restore_interrupts(state);
207 
208 		// if not, create a new one
209 		if (thread == NULL) {
210 			thread = (struct thread *)malloc(sizeof(struct thread));
211 			if (thread == NULL)
212 				return NULL;
213 		}
214 	} else {
215 		thread = inthread;
216 	}
217 
218 	if (name != NULL)
219 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
220 	else
221 		strcpy(thread->name, "unnamed thread");
222 
223 	thread->flags = 0;
224 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
225 	thread->team = NULL;
226 	thread->cpu = cpu;
227 	thread->sem.blocking = -1;
228 	thread->condition_variable_entry = NULL;
229 	thread->fault_handler = 0;
230 	thread->page_faults_allowed = 1;
231 	thread->kernel_stack_area = -1;
232 	thread->kernel_stack_base = 0;
233 	thread->user_stack_area = -1;
234 	thread->user_stack_base = 0;
235 	thread->user_local_storage = 0;
236 	thread->kernel_errno = 0;
237 	thread->team_next = NULL;
238 	thread->queue_next = NULL;
239 	thread->priority = thread->next_priority = -1;
240 	thread->args1 = NULL;  thread->args2 = NULL;
241 	thread->alarm.period = 0;
242 	reset_signals(thread);
243 	thread->in_kernel = true;
244 	thread->was_yielded = false;
245 	thread->user_time = 0;
246 	thread->kernel_time = 0;
247 	thread->last_time = 0;
248 	thread->exit.status = 0;
249 	thread->exit.reason = 0;
250 	thread->exit.signal = 0;
251 	list_init(&thread->exit.waiters);
252 	thread->select_infos = NULL;
253 
254 	sprintf(temp, "thread_%lx_retcode_sem", thread->id);
255 	thread->exit.sem = create_sem(0, temp);
256 	if (thread->exit.sem < B_OK)
257 		goto err1;
258 
259 	sprintf(temp, "%s send", thread->name);
260 	thread->msg.write_sem = create_sem(1, temp);
261 	if (thread->msg.write_sem < B_OK)
262 		goto err2;
263 
264 	sprintf(temp, "%s receive", thread->name);
265 	thread->msg.read_sem = create_sem(0, temp);
266 	if (thread->msg.read_sem < B_OK)
267 		goto err3;
268 
269 	if (arch_thread_init_thread_struct(thread) < B_OK)
270 		goto err4;
271 
272 	return thread;
273 
274 err4:
275 	delete_sem(thread->msg.read_sem);
276 err3:
277 	delete_sem(thread->msg.write_sem);
278 err2:
279 	delete_sem(thread->exit.sem);
280 err1:
281 	// ToDo: put them in the dead queue instead?
282 	if (inthread == NULL)
283 		free(thread);
284 	return NULL;
285 }
286 
287 
288 static void
289 delete_thread_struct(struct thread *thread)
290 {
291 	delete_sem(thread->exit.sem);
292 	delete_sem(thread->msg.write_sem);
293 	delete_sem(thread->msg.read_sem);
294 
295 	// ToDo: put them in the dead queue instead?
296 	free(thread);
297 }
298 
299 
300 /*! This function gets run by a new thread before anything else */
301 static void
302 thread_kthread_entry(void)
303 {
304 	struct thread *thread = thread_get_current_thread();
305 
306 	// simulates the thread spinlock release that would occur if the thread had been
307 	// rescheded from. The resched didn't happen because the thread is new.
308 	RELEASE_THREAD_LOCK();
309 
310 	// start tracking time
311 	thread->last_time = system_time();
312 
313 	enable_interrupts(); // this essentially simulates a return-from-interrupt
314 }
315 
316 
317 static void
318 thread_kthread_exit(void)
319 {
320 	struct thread *thread = thread_get_current_thread();
321 
322 	thread->exit.reason = THREAD_RETURN_EXIT;
323 	thread_exit();
324 }
325 
326 
327 /*!
328 	Initializes the thread and jumps to its userspace entry point.
329 	This function is called at creation time of every user thread,
330 	but not for a team's main thread.
331 */
332 static int
333 _create_user_thread_kentry(void)
334 {
335 	struct thread *thread = thread_get_current_thread();
336 
337 	// a signal may have been delivered here
338 	thread_at_kernel_exit();
339 
340 	// jump to the entry point in user space
341 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
342 		thread->args1, thread->args2);
343 
344 	// only get here if the above call fails
345 	return 0;
346 }
347 
348 
349 /*! Initializes the thread and calls it kernel space entry point. */
350 static int
351 _create_kernel_thread_kentry(void)
352 {
353 	struct thread *thread = thread_get_current_thread();
354 	int (*func)(void *args) = (int (*)(void *))thread->entry;
355 
356 	// call the entry function with the appropriate args
357 	return func(thread->args1);
358 }
359 
360 
361 /*!
362 	Creates a new thread in the team with the specified team ID.
363 
364 	\param threadID The ID to be assigned to the new thread. If
365 		  \code < 0 \endcode a fresh one is allocated.
366 */
367 static thread_id
368 create_thread(const char *name, team_id teamID, thread_entry_func entry,
369 	void *args1, void *args2, int32 priority, bool kernel, thread_id threadID)
370 {
371 	struct thread *thread, *currentThread;
372 	struct team *team;
373 	cpu_status state;
374 	char stack_name[B_OS_NAME_LENGTH];
375 	status_t status;
376 	bool abort = false;
377 	bool debugNewThread = false;
378 
379 	TRACE(("create_thread(%s, id = %ld, %s)\n", name, threadID,
380 		kernel ? "kernel" : "user"));
381 
382 	thread = create_thread_struct(NULL, name, threadID, NULL);
383 	if (thread == NULL)
384 		return B_NO_MEMORY;
385 
386 	thread->priority = priority == -1 ? B_NORMAL_PRIORITY : priority;
387 	thread->next_priority = thread->priority;
388 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
389 	thread->state = B_THREAD_SUSPENDED;
390 	thread->next_state = B_THREAD_SUSPENDED;
391 
392 	// init debug structure
393 	clear_thread_debug_info(&thread->debug_info, false);
394 
395 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%lx_kstack", name, thread->id);
396 	thread->kernel_stack_area = create_area(stack_name,
397 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
398 		KERNEL_STACK_SIZE, B_FULL_LOCK,
399 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
400 
401 	if (thread->kernel_stack_area < 0) {
402 		// we're not yet part of a team, so we can just bail out
403 		status = thread->kernel_stack_area;
404 
405 		dprintf("create_thread: error creating kernel stack: %s!\n",
406 			strerror(status));
407 
408 		delete_thread_struct(thread);
409 		return status;
410 	}
411 
412 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
413 
414 	state = disable_interrupts();
415 	GRAB_THREAD_LOCK();
416 
417 	// If the new thread belongs to the same team as the current thread,
418 	// it may inherit some of the thread debug flags.
419 	currentThread = thread_get_current_thread();
420 	if (currentThread && currentThread->team->id == teamID) {
421 		// inherit all user flags...
422 		int32 debugFlags = currentThread->debug_info.flags
423 			& B_THREAD_DEBUG_USER_FLAG_MASK;
424 
425 		// ... save the syscall tracing flags, unless explicitely specified
426 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
427 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
428 				| B_THREAD_DEBUG_POST_SYSCALL);
429 		}
430 
431 		thread->debug_info.flags = debugFlags;
432 
433 		// stop the new thread, if desired
434 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
435 	}
436 
437 	// insert into global list
438 	hash_insert(sThreadHash, thread);
439 	sUsedThreads++;
440 	RELEASE_THREAD_LOCK();
441 
442 	GRAB_TEAM_LOCK();
443 	// look at the team, make sure it's not being deleted
444 	team = team_get_team_struct_locked(teamID);
445 	if (team != NULL && team->state != TEAM_STATE_DEATH) {
446 		// Debug the new thread, if the parent thread required that (see above),
447 		// or the respective global team debug flag is set. But only, if a
448 		// debugger is installed for the team.
449 		debugNewThread |= (atomic_get(&team->debug_info.flags)
450 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
451 		if (debugNewThread
452 			&& (atomic_get(&team->debug_info.flags)
453 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
454 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
455 		}
456 
457 		insert_thread_into_team(team, thread);
458 	} else
459 		abort = true;
460 
461 	RELEASE_TEAM_LOCK();
462 	if (abort) {
463 		GRAB_THREAD_LOCK();
464 		hash_remove(sThreadHash, thread);
465 		RELEASE_THREAD_LOCK();
466 	}
467 	restore_interrupts(state);
468 	if (abort) {
469 		delete_area(thread->kernel_stack_area);
470 		delete_thread_struct(thread);
471 		return B_BAD_TEAM_ID;
472 	}
473 
474 	thread->args1 = args1;
475 	thread->args2 = args2;
476 	thread->entry = entry;
477 	status = thread->id;
478 
479 	if (kernel) {
480 		// this sets up an initial kthread stack that runs the entry
481 
482 		// Note: whatever function wants to set up a user stack later for this
483 		// thread must initialize the TLS for it
484 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
485 			&thread_kthread_entry, &thread_kthread_exit);
486 	} else {
487 		// create user stack
488 
489 		// the stack will be between USER_STACK_REGION and the main thread stack area
490 		// (the user stack of the main thread is created in team_create_team())
491 		thread->user_stack_base = USER_STACK_REGION;
492 		thread->user_stack_size = USER_STACK_SIZE;
493 
494 		snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%lx_stack", name, thread->id);
495 		thread->user_stack_area = create_area_etc(team, stack_name,
496 				(void **)&thread->user_stack_base, B_BASE_ADDRESS,
497 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
498 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
499 		if (thread->user_stack_area < B_OK
500 			|| arch_thread_init_tls(thread) < B_OK) {
501 			// great, we have a fully running thread without a (usable) stack
502 			dprintf("create_thread: unable to create proper user stack!\n");
503 			status = thread->user_stack_area;
504 			kill_thread(thread->id);
505 		}
506 
507 		user_debug_update_new_thread_flags(thread->id);
508 
509 		// copy the user entry over to the args field in the thread struct
510 		// the function this will call will immediately switch the thread into
511 		// user space.
512 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
513 			&thread_kthread_entry, &thread_kthread_exit);
514 	}
515 
516 	return status;
517 }
518 
519 
520 /*!
521 	Finds a free death stack for us and allocates it.
522 	Must be called with interrupts enabled.
523 */
524 static uint32
525 get_death_stack(void)
526 {
527 	cpu_status state;
528 	uint32 bit;
529 	int32 i;
530 
531 	acquire_sem(sDeathStackSem);
532 
533 	// grab the death stack and thread locks, find a free spot and release
534 
535 	state = disable_interrupts();
536 
537 	acquire_spinlock(&sDeathStackLock);
538 	GRAB_THREAD_LOCK();
539 
540 	bit = sDeathStackBitmap;
541 	bit = (~bit) & ~((~bit) - 1);
542 	sDeathStackBitmap |= bit;
543 
544 	RELEASE_THREAD_LOCK();
545 	release_spinlock(&sDeathStackLock);
546 
547 	restore_interrupts(state);
548 
549 	// sanity checks
550 	if (!bit)
551 		panic("get_death_stack: couldn't find free stack!\n");
552 
553 	if (bit & (bit - 1))
554 		panic("get_death_stack: impossible bitmap result!\n");
555 
556 	// bit to number
557 	for (i = -1; bit; i++) {
558 		bit >>= 1;
559 	}
560 
561 	TRACE(("get_death_stack: returning %#lx\n", sDeathStacks[i].address));
562 
563 	return (uint32)i;
564 }
565 
566 
567 /*!	Returns the thread's death stack to the pool.
568 	Interrupts must be disabled and the sDeathStackLock be held.
569 */
570 static void
571 put_death_stack(uint32 index)
572 {
573 	TRACE(("put_death_stack...: passed %lu\n", index));
574 
575 	if (index >= sNumDeathStacks)
576 		panic("put_death_stack: passed invalid stack index %ld\n", index);
577 
578 	if (!(sDeathStackBitmap & (1 << index)))
579 		panic("put_death_stack: passed invalid stack index %ld\n", index);
580 
581 	GRAB_THREAD_LOCK();
582 	sDeathStackBitmap &= ~(1 << index);
583 	RELEASE_THREAD_LOCK();
584 
585 	release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
586 		// we must not hold the thread lock when releasing a semaphore
587 }
588 
589 
590 static void
591 thread_exit2(void *_args)
592 {
593 	struct thread_exit_args args;
594 
595 	// copy the arguments over, since the source is probably on the kernel
596 	// stack we're about to delete
597 	memcpy(&args, _args, sizeof(struct thread_exit_args));
598 
599 	// we can't let the interrupts disabled at this point
600 	enable_interrupts();
601 
602 	TRACE(("thread_exit2, running on death stack %#lx\n", args.death_stack));
603 
604 	// delete the old kernel stack area
605 	TRACE(("thread_exit2: deleting old kernel stack id %ld for thread %ld\n",
606 		args.old_kernel_stack, args.thread->id));
607 
608 	delete_area(args.old_kernel_stack);
609 
610 	// remove this thread from all of the global lists
611 	TRACE(("thread_exit2: removing thread %ld from global lists\n",
612 		args.thread->id));
613 
614 	disable_interrupts();
615 	GRAB_TEAM_LOCK();
616 
617 	remove_thread_from_team(team_get_kernel_team(), args.thread);
618 
619 	RELEASE_TEAM_LOCK();
620 	enable_interrupts();
621 		// needed for the debugger notification below
622 
623 	TRACE(("thread_exit2: done removing thread from lists\n"));
624 
625 	if (args.death_sem >= 0)
626 		release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
627 
628 	// notify the debugger
629 	if (args.original_team_id >= 0
630 		&& args.original_team_id != team_get_kernel_team_id()) {
631 		user_debug_thread_deleted(args.original_team_id, args.thread->id);
632 	}
633 
634 	disable_interrupts();
635 
636 	// Set the next state to be gone: this will cause the thread structure
637 	// to be returned to a ready pool upon reschedule.
638 	// Note, we need to have disabled interrupts at this point, or else
639 	// we could get rescheduled too early.
640 	args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
641 
642 	// return the death stack and reschedule one last time
643 
644 	// Note that we need to hold sDeathStackLock until we've got the thread
645 	// lock. Otherwise someone else might grab our stack in the meantime.
646 	acquire_spinlock(&sDeathStackLock);
647 	put_death_stack(args.death_stack);
648 
649 	GRAB_THREAD_LOCK();
650 	release_spinlock(&sDeathStackLock);
651 
652 	scheduler_reschedule();
653 		// requires thread lock to be held
654 
655 	// never get to here
656 	panic("thread_exit2: made it where it shouldn't have!\n");
657 }
658 
659 
660 /*!
661 	Fills the thread_info structure with information from the specified
662 	thread.
663 	The thread lock must be held when called.
664 */
665 static void
666 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
667 {
668 	info->thread = thread->id;
669 	info->team = thread->team->id;
670 
671 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
672 
673 	if (thread->state == B_THREAD_WAITING) {
674 		if (thread->sem.blocking == sSnoozeSem)
675 			info->state = B_THREAD_ASLEEP;
676 		else if (thread->sem.blocking == thread->msg.read_sem)
677 			info->state = B_THREAD_RECEIVING;
678 		else
679 			info->state = B_THREAD_WAITING;
680 	} else
681 		info->state = (thread_state)thread->state;
682 
683 	info->priority = thread->priority;
684 	info->sem = thread->sem.blocking;
685 	info->user_time = thread->user_time;
686 	info->kernel_time = thread->kernel_time;
687 	info->stack_base = (void *)thread->user_stack_base;
688 	info->stack_end = (void *)(thread->user_stack_base
689 		+ thread->user_stack_size);
690 }
691 
692 
693 static status_t
694 send_data_etc(thread_id id, int32 code, const void *buffer,
695 	size_t bufferSize, int32 flags)
696 {
697 	struct thread *target;
698 	sem_id cachedSem;
699 	cpu_status state;
700 	status_t status;
701 	cbuf *data;
702 
703 	state = disable_interrupts();
704 	GRAB_THREAD_LOCK();
705 	target = thread_get_thread_struct_locked(id);
706 	if (!target) {
707 		RELEASE_THREAD_LOCK();
708 		restore_interrupts(state);
709 		return B_BAD_THREAD_ID;
710 	}
711 	cachedSem = target->msg.write_sem;
712 	RELEASE_THREAD_LOCK();
713 	restore_interrupts(state);
714 
715 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
716 		return B_NO_MEMORY;
717 
718 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
719 	if (status == B_INTERRUPTED) {
720 		// We got interrupted by a signal
721 		return status;
722 	}
723 	if (status != B_OK) {
724 		// Any other acquisition problems may be due to thread deletion
725 		return B_BAD_THREAD_ID;
726 	}
727 
728 	if (bufferSize > 0) {
729 		data = cbuf_get_chain(bufferSize);
730 		if (data == NULL)
731 			return B_NO_MEMORY;
732 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
733 		if (status < B_OK) {
734 			cbuf_free_chain(data);
735 			return B_NO_MEMORY;
736 		}
737 	} else
738 		data = NULL;
739 
740 	state = disable_interrupts();
741 	GRAB_THREAD_LOCK();
742 
743 	// The target thread could have been deleted at this point
744 	target = thread_get_thread_struct_locked(id);
745 	if (target == NULL) {
746 		RELEASE_THREAD_LOCK();
747 		restore_interrupts(state);
748 		cbuf_free_chain(data);
749 		return B_BAD_THREAD_ID;
750 	}
751 
752 	// Save message informations
753 	target->msg.sender = thread_get_current_thread()->id;
754 	target->msg.code = code;
755 	target->msg.size = bufferSize;
756 	target->msg.buffer = data;
757 	cachedSem = target->msg.read_sem;
758 
759 	RELEASE_THREAD_LOCK();
760 	restore_interrupts(state);
761 
762 	release_sem(cachedSem);
763 	return B_OK;
764 }
765 
766 
767 static int32
768 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
769 	int32 flags)
770 {
771 	struct thread *thread = thread_get_current_thread();
772 	status_t status;
773 	size_t size;
774 	int32 code;
775 
776 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
777 	if (status < B_OK) {
778 		// Actually, we're not supposed to return error codes
779 		// but since the only reason this can fail is that we
780 		// were killed, it's probably okay to do so (but also
781 		// meaningless).
782 		return status;
783 	}
784 
785 	if (buffer != NULL && bufferSize != 0) {
786 		size = min_c(bufferSize, thread->msg.size);
787 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
788 			0, size);
789 		if (status < B_OK) {
790 			cbuf_free_chain(thread->msg.buffer);
791 			release_sem(thread->msg.write_sem);
792 			return status;
793 		}
794 	}
795 
796 	*_sender = thread->msg.sender;
797 	code = thread->msg.code;
798 
799 	cbuf_free_chain(thread->msg.buffer);
800 	release_sem(thread->msg.write_sem);
801 
802 	return code;
803 }
804 
805 
806 //	#pragma mark - debugger calls
807 
808 
809 static int
810 make_thread_unreal(int argc, char **argv)
811 {
812 	struct thread *thread;
813 	struct hash_iterator i;
814 	int32 id = -1;
815 
816 	if (argc > 2) {
817 		kprintf("usage: unreal [id]\n");
818 		return 0;
819 	}
820 
821 	if (argc > 1)
822 		id = strtoul(argv[1], NULL, 0);
823 
824 	hash_open(sThreadHash, &i);
825 
826 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
827 		if (id != -1 && thread->id != id)
828 			continue;
829 
830 		if (thread->priority > B_DISPLAY_PRIORITY) {
831 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
832 			kprintf("thread %ld made unreal\n", thread->id);
833 		}
834 	}
835 
836 	hash_close(sThreadHash, &i, false);
837 	return 0;
838 }
839 
840 
841 static int
842 set_thread_prio(int argc, char **argv)
843 {
844 	struct thread *thread;
845 	struct hash_iterator i;
846 	int32 id;
847 	int32 prio;
848 
849 	if (argc > 3 || argc < 2) {
850 		kprintf("usage: priority <priority> [thread-id]\n");
851 		return 0;
852 	}
853 
854 	prio = strtoul(argv[1], NULL, 0);
855 	if (prio > B_MAX_PRIORITY)
856 		prio = B_MAX_PRIORITY;
857 	if (prio < B_MIN_PRIORITY)
858 		prio = B_MIN_PRIORITY;
859 
860 	if (argc > 2)
861 		id = strtoul(argv[2], NULL, 0);
862 	else
863 		id = thread_get_current_thread()->id;
864 
865 	hash_open(sThreadHash, &i);
866 
867 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
868 		if (thread->id != id)
869 			continue;
870 		thread->priority = thread->next_priority = prio;
871 		kprintf("thread %ld set to priority %ld\n", id, prio);
872 		break;
873 	}
874 	if (!thread)
875 		kprintf("thread %ld (%#lx) not found\n", id, id);
876 
877 	hash_close(sThreadHash, &i, false);
878 	return 0;
879 }
880 
881 
882 static int
883 make_thread_suspended(int argc, char **argv)
884 {
885 	struct thread *thread;
886 	struct hash_iterator i;
887 	int32 id;
888 
889 	if (argc > 2) {
890 		kprintf("usage: suspend [thread-id]\n");
891 		return 0;
892 	}
893 
894 	if (argc == 1)
895 		id = thread_get_current_thread()->id;
896 	else
897 		id = strtoul(argv[1], NULL, 0);
898 
899 	hash_open(sThreadHash, &i);
900 
901 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
902 		if (thread->id != id)
903 			continue;
904 
905 		thread->state = thread->next_state = B_THREAD_SUSPENDED;
906 		kprintf("thread %ld suspended\n", id);
907 		break;
908 	}
909 	if (!thread)
910 		kprintf("thread %ld (%#lx) not found\n", id, id);
911 
912 	hash_close(sThreadHash, &i, false);
913 	return 0;
914 }
915 
916 
917 static int
918 make_thread_resumed(int argc, char **argv)
919 {
920 	struct thread *thread;
921 	struct hash_iterator i;
922 	int32 id;
923 
924 	if (argc != 2) {
925 		kprintf("usage: resume <thread-id>\n");
926 		return 0;
927 	}
928 
929 	// force user to enter a thread id, as using
930 	// the current thread is usually not intended
931 	id = strtoul(argv[1], NULL, 0);
932 
933 	hash_open(sThreadHash, &i);
934 
935 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
936 		if (thread->id != id)
937 			continue;
938 
939 		if (thread->state == B_THREAD_SUSPENDED) {
940 			thread->state = thread->next_state = B_THREAD_READY;
941 			scheduler_enqueue_in_run_queue(thread);
942 			kprintf("thread %ld resumed\n", thread->id);
943 		}
944 		break;
945 	}
946 	if (!thread)
947 		kprintf("thread %ld (%#lx) not found\n", id, id);
948 
949 	hash_close(sThreadHash, &i, false);
950 	return 0;
951 }
952 
953 
954 static int
955 drop_into_debugger(int argc, char **argv)
956 {
957 	status_t err;
958 	int32 id;
959 
960 	if (argc > 2) {
961 		kprintf("usage: drop [thread-id]\n");
962 		return 0;
963 	}
964 
965 	if (argc == 1)
966 		id = thread_get_current_thread()->id;
967 	else
968 		id = strtoul(argv[1], NULL, 0);
969 
970 	err = _user_debug_thread(id);
971 	if (err)
972 		kprintf("drop failed\n");
973 	else
974 		kprintf("thread %ld dropped into user debugger\n", id);
975 
976 	return 0;
977 }
978 
979 
980 static const char *
981 state_to_text(struct thread *thread, int32 state)
982 {
983 	switch (state) {
984 		case B_THREAD_READY:
985 			return "ready";
986 
987 		case B_THREAD_RUNNING:
988 			return "running";
989 
990 		case B_THREAD_WAITING:
991 			if (thread->sem.blocking == sSnoozeSem)
992 				return "zzz";
993 			if (thread->sem.blocking == thread->msg.read_sem)
994 				return "receive";
995 
996 			return "waiting";
997 
998 		case B_THREAD_SUSPENDED:
999 			return "suspended";
1000 
1001 		case THREAD_STATE_FREE_ON_RESCHED:
1002 			return "death";
1003 
1004 		default:
1005 			return "UNKNOWN";
1006 	}
1007 }
1008 
1009 
1010 static void
1011 _dump_thread_info(struct thread *thread)
1012 {
1013 	struct death_entry *death = NULL;
1014 
1015 	kprintf("THREAD: %p\n", thread);
1016 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1017 	kprintf("name:               \"%s\"\n", thread->name);
1018 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1019 		thread->all_next, thread->team_next, thread->queue_next);
1020 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1021 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1022 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1023 	kprintf("cpu:                %p ", thread->cpu);
1024 	if (thread->cpu)
1025 		kprintf("(%d)\n", thread->cpu->cpu_num);
1026 	else
1027 		kprintf("\n");
1028 	kprintf("sig_pending:        %#lx\n", thread->sig_pending);
1029 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1030 	kprintf("  sem.blocking:     %ld\n", thread->sem.blocking);
1031 	kprintf("  sem.count:        %ld\n", thread->sem.count);
1032 	kprintf("  sem.acquire_status: %#lx\n", thread->sem.acquire_status);
1033 	kprintf("  sem.flags:        %#lx\n", thread->sem.flags);
1034 
1035 	kprintf("condition variables:");
1036 	PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
1037 	while (entry != NULL) {
1038 		kprintf(" %p", entry->Variable());
1039 		entry = entry->ThreadNext();
1040 	}
1041 	kprintf("\n");
1042 
1043 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1044 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1045 	kprintf("entry:              %p\n", (void *)thread->entry);
1046 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1047 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1048 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1049 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1050 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1051 	kprintf("  exit.waiters:\n");
1052 	while ((death = (struct death_entry*)list_get_next_item(
1053 			&thread->exit.waiters, death)) != NULL) {
1054 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1055 	}
1056 
1057 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1058 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1059 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1060 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1061 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1062 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1063 		strerror(thread->kernel_errno));
1064 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1065 	kprintf("user_time:          %Ld\n", thread->user_time);
1066 	kprintf("flags:              0x%lx\n", thread->flags);
1067 	kprintf("architecture dependant section:\n");
1068 	arch_thread_dump_info(&thread->arch_info);
1069 }
1070 
1071 
1072 static int
1073 dump_thread_info(int argc, char **argv)
1074 {
1075 	const char *name = NULL;
1076 	struct thread *thread;
1077 	int32 id = -1;
1078 	struct hash_iterator i;
1079 	bool found = false;
1080 
1081 	if (argc > 2) {
1082 		kprintf("usage: thread [id/address/name]\n");
1083 		return 0;
1084 	}
1085 
1086 	if (argc == 1) {
1087 		_dump_thread_info(thread_get_current_thread());
1088 		return 0;
1089 	} else {
1090 		name = argv[1];
1091 		id = strtoul(argv[1], NULL, 0);
1092 
1093 		if (IS_KERNEL_ADDRESS(id)) {
1094 			// semi-hack
1095 			_dump_thread_info((struct thread *)id);
1096 			return 0;
1097 		}
1098 	}
1099 
1100 	// walk through the thread list, trying to match name or id
1101 	hash_open(sThreadHash, &i);
1102 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1103 		if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
1104 			_dump_thread_info(thread);
1105 			found = true;
1106 			break;
1107 		}
1108 	}
1109 	hash_close(sThreadHash, &i, false);
1110 
1111 	if (!found)
1112 		kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1113 	return 0;
1114 }
1115 
1116 
1117 static int
1118 dump_thread_list(int argc, char **argv)
1119 {
1120 	struct thread *thread;
1121 	struct hash_iterator i;
1122 	bool realTimeOnly = false;
1123 	int32 requiredState = 0;
1124 	team_id team = -1;
1125 	sem_id sem = -1;
1126 
1127 	if (!strcmp(argv[0], "realtime"))
1128 		realTimeOnly = true;
1129 	else if (!strcmp(argv[0], "ready"))
1130 		requiredState = B_THREAD_READY;
1131 	else if (!strcmp(argv[0], "running"))
1132 		requiredState = B_THREAD_RUNNING;
1133 	else if (!strcmp(argv[0], "waiting")) {
1134 		requiredState = B_THREAD_WAITING;
1135 
1136 		if (argc > 1) {
1137 			sem = strtoul(argv[1], NULL, 0);
1138 			if (sem == 0)
1139 				kprintf("ignoring invalid semaphore argument.\n");
1140 		}
1141 	} else if (argc > 1) {
1142 		team = strtoul(argv[1], NULL, 0);
1143 		if (team == 0)
1144 			kprintf("ignoring invalid team argument.\n");
1145 	}
1146 
1147 	kprintf("thread         id  state        sem/cv cpu pri  stack      team  "
1148 		"name\n");
1149 
1150 	hash_open(sThreadHash, &i);
1151 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1152 		// filter out threads not matching the search criteria
1153 		if ((requiredState && thread->state != requiredState)
1154 			|| (sem > 0 && thread->sem.blocking != sem)
1155 			|| (team > 0 && thread->team->id != team)
1156 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1157 			continue;
1158 
1159 		kprintf("%p %6ld  %-9s", thread, thread->id, state_to_text(thread,
1160 			thread->state));
1161 
1162 		// does it block on a semaphore or a condition variable?
1163 		if (thread->state == B_THREAD_WAITING) {
1164 			if (thread->condition_variable_entry)
1165 				kprintf("%p  ", thread->condition_variable_entry->Variable());
1166 			else
1167 				kprintf("%10ld  ", thread->sem.blocking);
1168 		} else
1169 			kprintf("      -     ");
1170 
1171 		// on which CPU does it run?
1172 		if (thread->cpu)
1173 			kprintf("%2d", thread->cpu->cpu_num);
1174 		else
1175 			kprintf(" -");
1176 
1177 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1178 			(void *)thread->kernel_stack_base, thread->team->id,
1179 			thread->name != NULL ? thread->name : "<NULL>");
1180 	}
1181 	hash_close(sThreadHash, &i, false);
1182 	return 0;
1183 }
1184 
1185 
1186 //	#pragma mark - private kernel API
1187 
1188 
1189 void
1190 thread_exit(void)
1191 {
1192 	cpu_status state;
1193 	struct thread *thread = thread_get_current_thread();
1194 	struct process_group *freeGroup = NULL;
1195 	struct team *team = thread->team;
1196 	thread_id parentID = -1;
1197 	bool deleteTeam = false;
1198 	sem_id cachedDeathSem = -1;
1199 	status_t status;
1200 	struct thread_debug_info debugInfo;
1201 	team_id teamID = team->id;
1202 
1203 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1204 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1205 			? "due to signal" : "normally", thread->exit.status));
1206 
1207 	if (!are_interrupts_enabled())
1208 		panic("thread_exit() called with interrupts disabled!\n");
1209 
1210 	// boost our priority to get this over with
1211 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1212 
1213 	// Cancel previously installed alarm timer, if any
1214 	cancel_timer(&thread->alarm);
1215 
1216 	// delete the user stack area first, we won't need it anymore
1217 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1218 		area_id area = thread->user_stack_area;
1219 		thread->user_stack_area = -1;
1220 		delete_area_etc(team, area);
1221 	}
1222 
1223 	struct job_control_entry *death = NULL;
1224 	struct death_entry* threadDeathEntry = NULL;
1225 
1226 	if (team != team_get_kernel_team()) {
1227 		if (team->main_thread == thread) {
1228 			// this was the main thread in this team, so we will delete that as well
1229 			deleteTeam = true;
1230 		} else
1231 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1232 
1233 		// remove this thread from the current team and add it to the kernel
1234 		// put the thread into the kernel team until it dies
1235 		state = disable_interrupts();
1236 		GRAB_TEAM_LOCK();
1237 		GRAB_THREAD_LOCK();
1238 			// removing the thread and putting its death entry to the parent
1239 			// team needs to be an atomic operation
1240 
1241 		// remember how long this thread lasted
1242 		team->dead_threads_kernel_time += thread->kernel_time;
1243 		team->dead_threads_user_time += thread->user_time;
1244 
1245 		remove_thread_from_team(team, thread);
1246 		insert_thread_into_team(team_get_kernel_team(), thread);
1247 
1248 		cachedDeathSem = team->death_sem;
1249 
1250 		if (deleteTeam) {
1251 			struct team *parent = team->parent;
1252 
1253 			// remember who our parent was so we can send a signal
1254 			parentID = parent->id;
1255 
1256 			// Set the team job control state to "dead" and detach the job
1257 			// control entry from our team struct.
1258 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1259 			death = team->job_control_entry;
1260 			team->job_control_entry = NULL;
1261 
1262 			if (death != NULL) {
1263 				death->team = NULL;
1264 				death->group_id = team->group_id;
1265 				death->thread = thread->id;
1266 				death->status = thread->exit.status;
1267 				death->reason = thread->exit.reason;
1268 				death->signal = thread->exit.signal;
1269 
1270 				// team_set_job_control_state() already moved our entry
1271 				// into the parent's list. We just check the soft limit of
1272 				// death entries.
1273 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1274 					death = parent->dead_children->entries.RemoveHead();
1275 					parent->dead_children->count--;
1276 				} else
1277 					death = NULL;
1278 
1279 				RELEASE_THREAD_LOCK();
1280 			} else
1281 				RELEASE_THREAD_LOCK();
1282 
1283 			team_remove_team(team, &freeGroup);
1284 
1285 			send_signal_etc(parentID, SIGCHLD,
1286 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1287 		} else {
1288 			// The thread is not the main thread. We store a thread death
1289 			// entry for it, unless someone is already waiting it.
1290 			if (threadDeathEntry != NULL
1291 				&& list_is_empty(&thread->exit.waiters)) {
1292 				threadDeathEntry->thread = thread->id;
1293 				threadDeathEntry->status = thread->exit.status;
1294 				threadDeathEntry->reason = thread->exit.reason;
1295 				threadDeathEntry->signal = thread->exit.signal;
1296 
1297 				// add entry -- remove and old one, if we hit the limit
1298 				list_add_item(&team->dead_threads, threadDeathEntry);
1299 				team->dead_threads_count++;
1300 				threadDeathEntry = NULL;
1301 
1302 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1303 					threadDeathEntry = (death_entry*)list_remove_head_item(
1304 						&team->dead_threads);
1305 					team->dead_threads_count--;
1306 				}
1307 			}
1308 
1309 			RELEASE_THREAD_LOCK();
1310 		}
1311 
1312 		RELEASE_TEAM_LOCK();
1313 
1314 		// swap address spaces, to make sure we're running on the kernel's pgdir
1315 		vm_swap_address_space(vm_kernel_address_space());
1316 		restore_interrupts(state);
1317 
1318 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1319 	}
1320 
1321 	if (threadDeathEntry != NULL)
1322 		free(threadDeathEntry);
1323 
1324 	// delete the team if we're its main thread
1325 	if (deleteTeam) {
1326 		team_delete_process_group(freeGroup);
1327 		team_delete_team(team);
1328 
1329 		// we need to delete any death entry that made it to here
1330 		if (death != NULL)
1331 			delete death;
1332 
1333 		cachedDeathSem = -1;
1334 	}
1335 
1336 	state = disable_interrupts();
1337 	GRAB_THREAD_LOCK();
1338 
1339 	// remove thread from hash, so it's no longer accessible
1340 	hash_remove(sThreadHash, thread);
1341 	sUsedThreads--;
1342 
1343 	// Stop debugging for this thread
1344 	debugInfo = thread->debug_info;
1345 	clear_thread_debug_info(&thread->debug_info, true);
1346 
1347 	// Remove the select infos. We notify them a little later.
1348 	select_info* selectInfos = thread->select_infos;
1349 	thread->select_infos = NULL;
1350 
1351 	RELEASE_THREAD_LOCK();
1352 	restore_interrupts(state);
1353 
1354 	destroy_thread_debug_info(&debugInfo);
1355 
1356 	// notify select infos
1357 	select_info* info = selectInfos;
1358 	while (info != NULL) {
1359 		select_sync* sync = info->sync;
1360 
1361 		notify_select_events(info, B_EVENT_INVALID);
1362 		info = info->next;
1363 		put_select_sync(sync);
1364 	}
1365 
1366 	// shutdown the thread messaging
1367 
1368 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1369 	if (status == B_WOULD_BLOCK) {
1370 		// there is data waiting for us, so let us eat it
1371 		thread_id sender;
1372 
1373 		delete_sem(thread->msg.write_sem);
1374 			// first, let's remove all possibly waiting writers
1375 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1376 	} else {
1377 		// we probably own the semaphore here, and we're the last to do so
1378 		delete_sem(thread->msg.write_sem);
1379 	}
1380 	// now we can safely remove the msg.read_sem
1381 	delete_sem(thread->msg.read_sem);
1382 
1383 	// fill all death entries and delete the sem that others will use to wait on us
1384 	{
1385 		sem_id cachedExitSem = thread->exit.sem;
1386 		cpu_status state;
1387 
1388 		state = disable_interrupts();
1389 		GRAB_THREAD_LOCK();
1390 
1391 		// make sure no one will grab this semaphore again
1392 		thread->exit.sem = -1;
1393 
1394 		// fill all death entries
1395 		death_entry* entry = NULL;
1396 		while ((entry = (struct death_entry*)list_get_next_item(
1397 				&thread->exit.waiters, entry)) != NULL) {
1398 			entry->status = thread->exit.status;
1399 			entry->reason = thread->exit.reason;
1400 			entry->signal = thread->exit.signal;
1401 		}
1402 
1403 		RELEASE_THREAD_LOCK();
1404 		restore_interrupts(state);
1405 
1406 		delete_sem(cachedExitSem);
1407 	}
1408 
1409 	{
1410 		struct thread_exit_args args;
1411 
1412 		args.thread = thread;
1413 		args.old_kernel_stack = thread->kernel_stack_area;
1414 		args.death_stack = get_death_stack();
1415 		args.death_sem = cachedDeathSem;
1416 		args.original_team_id = teamID;
1417 
1418 
1419 		disable_interrupts();
1420 
1421 		// set the new kernel stack officially to the death stack, it won't be
1422 		// switched until the next function is called. This must be done now
1423 		// before a context switch, or we'll stay on the old stack
1424 		thread->kernel_stack_area = sDeathStacks[args.death_stack].area;
1425 		thread->kernel_stack_base = sDeathStacks[args.death_stack].address;
1426 
1427 		// we will continue in thread_exit2(), on the new stack
1428 		arch_thread_switch_kstack_and_call(thread, thread->kernel_stack_base
1429 			 + KERNEL_STACK_SIZE, thread_exit2, &args);
1430 	}
1431 
1432 	panic("never can get here\n");
1433 }
1434 
1435 
1436 struct thread *
1437 thread_get_thread_struct(thread_id id)
1438 {
1439 	struct thread *thread;
1440 	cpu_status state;
1441 
1442 	state = disable_interrupts();
1443 	GRAB_THREAD_LOCK();
1444 
1445 	thread = thread_get_thread_struct_locked(id);
1446 
1447 	RELEASE_THREAD_LOCK();
1448 	restore_interrupts(state);
1449 
1450 	return thread;
1451 }
1452 
1453 
1454 struct thread *
1455 thread_get_thread_struct_locked(thread_id id)
1456 {
1457 	struct thread_key key;
1458 
1459 	key.id = id;
1460 
1461 	return (struct thread*)hash_lookup(sThreadHash, &key);
1462 }
1463 
1464 
1465 /*!
1466 	Called in the interrupt handler code when a thread enters
1467 	the kernel for any reason.
1468 	Only tracks time for now.
1469 	Interrupts are disabled.
1470 */
1471 void
1472 thread_at_kernel_entry(bigtime_t now)
1473 {
1474 	struct thread *thread = thread_get_current_thread();
1475 
1476 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1477 
1478 	// track user time
1479 	thread->user_time += now - thread->last_time;
1480 	thread->last_time = now;
1481 
1482 	thread->in_kernel = true;
1483 }
1484 
1485 
1486 /*!
1487 	Called whenever a thread exits kernel space to user space.
1488 	Tracks time, handles signals, ...
1489 */
1490 void
1491 thread_at_kernel_exit(void)
1492 {
1493 	struct thread *thread = thread_get_current_thread();
1494 	cpu_status state;
1495 	bigtime_t now;
1496 
1497 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1498 
1499 	if (handle_signals(thread)) {
1500 		state = disable_interrupts();
1501 		GRAB_THREAD_LOCK();
1502 
1503 		// was: smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
1504 		scheduler_reschedule();
1505 
1506 		RELEASE_THREAD_LOCK();
1507 	} else
1508 		state = disable_interrupts();
1509 
1510 	thread->in_kernel = false;
1511 
1512 	// track kernel time
1513 	now = system_time();
1514 	thread->kernel_time += now - thread->last_time;
1515 	thread->last_time = now;
1516 
1517 	restore_interrupts(state);
1518 }
1519 
1520 
1521 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1522 	and no debugging shall be done.
1523 	Interrupts are disabled in this case.
1524 */
1525 void
1526 thread_at_kernel_exit_no_signals(void)
1527 {
1528 	struct thread *thread = thread_get_current_thread();
1529 
1530 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1531 
1532 	thread->in_kernel = false;
1533 
1534 	// track kernel time
1535 	bigtime_t now = system_time();
1536 	thread->kernel_time += now - thread->last_time;
1537 	thread->last_time = now;
1538 }
1539 
1540 
1541 void
1542 thread_reset_for_exec(void)
1543 {
1544 	struct thread *thread = thread_get_current_thread();
1545 
1546 	cancel_timer(&thread->alarm);
1547 	reset_signals(thread);
1548 }
1549 
1550 
1551 /*! Insert a thread to the tail of a queue */
1552 void
1553 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1554 {
1555 	thread->queue_next = NULL;
1556 	if (queue->head == NULL) {
1557 		queue->head = thread;
1558 		queue->tail = thread;
1559 	} else {
1560 		queue->tail->queue_next = thread;
1561 		queue->tail = thread;
1562 	}
1563 }
1564 
1565 
1566 struct thread *
1567 thread_lookat_queue(struct thread_queue *queue)
1568 {
1569 	return queue->head;
1570 }
1571 
1572 
1573 struct thread *
1574 thread_dequeue(struct thread_queue *queue)
1575 {
1576 	struct thread *thread = queue->head;
1577 
1578 	if (thread != NULL) {
1579 		queue->head = thread->queue_next;
1580 		if (queue->tail == thread)
1581 			queue->tail = NULL;
1582 	}
1583 	return thread;
1584 }
1585 
1586 
1587 struct thread *
1588 thread_dequeue_id(struct thread_queue *q, thread_id id)
1589 {
1590 	struct thread *thread;
1591 	struct thread *last = NULL;
1592 
1593 	thread = q->head;
1594 	while (thread != NULL) {
1595 		if (thread->id == id) {
1596 			if (last == NULL)
1597 				q->head = thread->queue_next;
1598 			else
1599 				last->queue_next = thread->queue_next;
1600 
1601 			if (q->tail == thread)
1602 				q->tail = last;
1603 			break;
1604 		}
1605 		last = thread;
1606 		thread = thread->queue_next;
1607 	}
1608 	return thread;
1609 }
1610 
1611 
1612 thread_id
1613 allocate_thread_id(void)
1614 {
1615 	return atomic_add(&sNextThreadID, 1);
1616 }
1617 
1618 
1619 thread_id
1620 peek_next_thread_id(void)
1621 {
1622 	return atomic_get(&sNextThreadID);
1623 }
1624 
1625 
1626 void
1627 thread_yield(void)
1628 {
1629 	// snooze for roughly 3 thread quantums
1630 	snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1631 #if 0
1632 	cpu_status state;
1633 
1634 	struct thread *thread = thread_get_current_thread();
1635 	if (thread == NULL)
1636 		return;
1637 
1638 	state = disable_interrupts();
1639 	GRAB_THREAD_LOCK();
1640 
1641 	// mark the thread as yielded, so it will not be scheduled next
1642 	//thread->was_yielded = true;
1643 	thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1644 	scheduler_reschedule();
1645 
1646 	RELEASE_THREAD_LOCK();
1647 	restore_interrupts(state);
1648 #endif
1649 }
1650 
1651 
1652 /*!
1653 	Kernel private thread creation function.
1654 
1655 	\param threadID The ID to be assigned to the new thread. If
1656 		  \code < 0 \endcode a fresh one is allocated.
1657 */
1658 thread_id
1659 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1660 	void *arg, team_id team, thread_id threadID)
1661 {
1662 	return create_thread(name, team, (thread_entry_func)function, arg, NULL,
1663 		priority, true, threadID);
1664 }
1665 
1666 
1667 status_t
1668 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1669 	status_t *_returnCode)
1670 {
1671 	sem_id exitSem = B_BAD_THREAD_ID;
1672 	struct death_entry death;
1673 	job_control_entry* freeDeath = NULL;
1674 	struct thread *thread;
1675 	cpu_status state;
1676 	status_t status = B_OK;
1677 
1678 	if (id < B_OK)
1679 		return B_BAD_THREAD_ID;
1680 
1681 	// we need to resume the thread we're waiting for first
1682 
1683 	state = disable_interrupts();
1684 	GRAB_THREAD_LOCK();
1685 
1686 	thread = thread_get_thread_struct_locked(id);
1687 	if (thread != NULL) {
1688 		// remember the semaphore we have to wait on and place our death entry
1689 		exitSem = thread->exit.sem;
1690 		list_add_link_to_head(&thread->exit.waiters, &death);
1691 	}
1692 
1693 	death_entry* threadDeathEntry = NULL;
1694 
1695 	RELEASE_THREAD_LOCK();
1696 
1697 	if (thread == NULL) {
1698 		// we couldn't find this thread - maybe it's already gone, and we'll
1699 		// find its death entry in our team
1700 		GRAB_TEAM_LOCK();
1701 
1702 		struct team* team = thread_get_current_thread()->team;
1703 
1704 		// check the child death entries first (i.e. main threads of child
1705 		// teams)
1706 		bool deleteEntry;
1707 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1708 		if (freeDeath != NULL) {
1709 			death.status = freeDeath->status;
1710 			if (!deleteEntry)
1711 				freeDeath = NULL;
1712 		} else {
1713 			// check the thread death entries of the team (non-main threads)
1714 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1715 					&team->dead_threads, threadDeathEntry)) != NULL) {
1716 				if (threadDeathEntry->thread == id) {
1717 					list_remove_item(&team->dead_threads, threadDeathEntry);
1718 					team->dead_threads_count--;
1719 					death.status = threadDeathEntry->status;
1720 					break;
1721 				}
1722 			}
1723 
1724 			if (threadDeathEntry == NULL)
1725 				status = B_BAD_THREAD_ID;
1726 		}
1727 
1728 		RELEASE_TEAM_LOCK();
1729 	}
1730 
1731 	restore_interrupts(state);
1732 
1733 	if (thread == NULL && status == B_OK) {
1734 		// we found the thread's death entry in our team
1735 		if (_returnCode)
1736 			*_returnCode = death.status;
1737 
1738 		delete freeDeath;
1739 		free(threadDeathEntry);
1740 		return B_OK;
1741 	}
1742 
1743 	// we need to wait for the death of the thread
1744 
1745 	if (exitSem < B_OK)
1746 		return B_BAD_THREAD_ID;
1747 
1748 	resume_thread(id);
1749 		// make sure we don't wait forever on a suspended thread
1750 
1751 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1752 
1753 	if (status == B_OK) {
1754 		// this should never happen as the thread deletes the semaphore on exit
1755 		panic("could acquire exit_sem for thread %ld\n", id);
1756 	} else if (status == B_BAD_SEM_ID) {
1757 		// this is the way the thread normally exits
1758 		status = B_OK;
1759 
1760 		if (_returnCode)
1761 			*_returnCode = death.status;
1762 	} else {
1763 		// We were probably interrupted; we need to remove our death entry now.
1764 		state = disable_interrupts();
1765 		GRAB_THREAD_LOCK();
1766 
1767 		thread = thread_get_thread_struct_locked(id);
1768 		if (thread != NULL)
1769 			list_remove_link(&death);
1770 
1771 		RELEASE_THREAD_LOCK();
1772 		restore_interrupts(state);
1773 
1774 		// If the thread is already gone, we need to wait for its exit semaphore
1775 		// to make sure our death entry stays valid - it won't take long
1776 		if (thread == NULL)
1777 			acquire_sem(exitSem);
1778 	}
1779 
1780 	return status;
1781 }
1782 
1783 
1784 status_t
1785 select_thread(int32 id, struct select_info* info, bool kernel)
1786 {
1787 	InterruptsSpinLocker locker(thread_spinlock);
1788 
1789 	// get thread
1790 	struct thread* thread = thread_get_thread_struct_locked(id);
1791 	if (thread == NULL)
1792 		return B_BAD_THREAD_ID;
1793 
1794 	// We support only B_EVENT_INVALID at the moment.
1795 	info->selected_events &= B_EVENT_INVALID;
1796 
1797 	// add info to list
1798 	if (info->selected_events != 0) {
1799 		info->next = thread->select_infos;
1800 		thread->select_infos = info;
1801 
1802 		// we need a sync reference
1803 		atomic_add(&info->sync->ref_count, 1);
1804 	}
1805 
1806 	return B_OK;
1807 }
1808 
1809 
1810 status_t
1811 deselect_thread(int32 id, struct select_info* info, bool kernel)
1812 {
1813 	InterruptsSpinLocker locker(thread_spinlock);
1814 
1815 	// get thread
1816 	struct thread* thread = thread_get_thread_struct_locked(id);
1817 	if (thread == NULL)
1818 		return B_BAD_THREAD_ID;
1819 
1820 	// remove info from list
1821 	select_info** infoLocation = &thread->select_infos;
1822 	while (*infoLocation != NULL && *infoLocation != info)
1823 		infoLocation = &(*infoLocation)->next;
1824 
1825 	if (*infoLocation != info)
1826 		return B_OK;
1827 
1828 	*infoLocation = info->next;
1829 
1830 	locker.Unlock();
1831 
1832 	// surrender sync reference
1833 	put_select_sync(info->sync);
1834 
1835 	return B_OK;
1836 }
1837 
1838 
1839 int32
1840 thread_max_threads(void)
1841 {
1842 	return sMaxThreads;
1843 }
1844 
1845 
1846 int32
1847 thread_used_threads(void)
1848 {
1849 	return sUsedThreads;
1850 }
1851 
1852 
1853 status_t
1854 thread_init(kernel_args *args)
1855 {
1856 	uint32 i;
1857 
1858 	TRACE(("thread_init: entry\n"));
1859 
1860 	// create the thread hash table
1861 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
1862 		&thread_struct_compare, &thread_struct_hash);
1863 
1864 	// zero out the dead thread structure q
1865 	memset(&dead_q, 0, sizeof(dead_q));
1866 
1867 	// allocate snooze sem
1868 	sSnoozeSem = create_sem(0, "snooze sem");
1869 	if (sSnoozeSem < 0) {
1870 		panic("error creating snooze sem\n");
1871 		return sSnoozeSem;
1872 	}
1873 
1874 	if (arch_thread_init(args) < B_OK)
1875 		panic("arch_thread_init() failed!\n");
1876 
1877 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
1878 	sNextThreadID = B_SYSTEM_TEAM + 1;
1879 
1880 	// create an idle thread for each cpu
1881 
1882 	for (i = 0; i < args->num_cpus; i++) {
1883 		struct thread *thread;
1884 		area_info info;
1885 		char name[64];
1886 
1887 		sprintf(name, "idle thread %lu", i + 1);
1888 		thread = create_thread_struct(&sIdleThreads[i], name,
1889 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
1890 		if (thread == NULL) {
1891 			panic("error creating idle thread struct\n");
1892 			return B_NO_MEMORY;
1893 		}
1894 
1895 		thread->team = team_get_kernel_team();
1896 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
1897 		thread->state = B_THREAD_RUNNING;
1898 		thread->next_state = B_THREAD_READY;
1899 		sprintf(name, "idle thread %lu kstack", i + 1);
1900 		thread->kernel_stack_area = find_area(name);
1901 		thread->entry = NULL;
1902 
1903 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
1904 			panic("error finding idle kstack area\n");
1905 
1906 		thread->kernel_stack_base = (addr_t)info.address;
1907 
1908 		hash_insert(sThreadHash, thread);
1909 		insert_thread_into_team(thread->team, thread);
1910 	}
1911 	sUsedThreads = args->num_cpus;
1912 
1913 	// create a set of death stacks
1914 
1915 	sNumDeathStacks = smp_get_num_cpus();
1916 	if (sNumDeathStacks > 8 * sizeof(sDeathStackBitmap)) {
1917 		// clamp values for really beefy machines
1918 		sNumDeathStacks = 8 * sizeof(sDeathStackBitmap);
1919 	}
1920 	sDeathStackBitmap = 0;
1921 	sDeathStacks = (struct death_stack *)malloc(sNumDeathStacks
1922 		* sizeof(struct death_stack));
1923 	if (sDeathStacks == NULL) {
1924 		panic("error creating death stacks\n");
1925 		return B_NO_MEMORY;
1926 	}
1927 	{
1928 		char temp[64];
1929 
1930 		for (i = 0; i < sNumDeathStacks; i++) {
1931 			sprintf(temp, "death stack %lu", i);
1932 			sDeathStacks[i].area = create_area(temp,
1933 				(void **)&sDeathStacks[i].address, B_ANY_KERNEL_ADDRESS,
1934 				KERNEL_STACK_SIZE, B_FULL_LOCK,
1935 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
1936 			if (sDeathStacks[i].area < 0) {
1937 				panic("error creating death stacks\n");
1938 				return sDeathStacks[i].area;
1939 			}
1940 			sDeathStacks[i].in_use = false;
1941 		}
1942 	}
1943 	sDeathStackSem = create_sem(sNumDeathStacks, "death stack availability");
1944 
1945 	// set up some debugger commands
1946 	add_debugger_command("threads", &dump_thread_list, "list all threads");
1947 	add_debugger_command("ready", &dump_thread_list, "list all ready threads");
1948 	add_debugger_command("running", &dump_thread_list, "list all running threads");
1949 	add_debugger_command("waiting", &dump_thread_list, "list all waiting threads (optionally for a specific semaphore)");
1950 	add_debugger_command("realtime", &dump_thread_list, "list all realtime threads");
1951 	add_debugger_command("thread", &dump_thread_info, "list info about a particular thread");
1952 	add_debugger_command("unreal", &make_thread_unreal, "set realtime priority threads to normal priority");
1953 	add_debugger_command("suspend", &make_thread_suspended, "suspend a thread");
1954 	add_debugger_command("resume", &make_thread_resumed, "resume a thread");
1955 	add_debugger_command("drop", &drop_into_debugger, "drop a thread into the user-debugger");
1956 	add_debugger_command("priority", &set_thread_prio, "set a thread priority");
1957 
1958 	return B_OK;
1959 }
1960 
1961 
1962 status_t
1963 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
1964 {
1965 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
1966 	// so that get_current_cpu and friends will work, which is crucial for
1967 	// a lot of low level routines
1968 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
1969 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
1970 	return B_OK;
1971 }
1972 
1973 //	#pragma mark - public kernel API
1974 
1975 
1976 void
1977 exit_thread(status_t returnValue)
1978 {
1979 	struct thread *thread = thread_get_current_thread();
1980 
1981 	thread->exit.status = returnValue;
1982 	thread->exit.reason = THREAD_RETURN_EXIT;
1983 
1984 	// if called from a kernel thread, we don't deliver the signal,
1985 	// we just exit directly to keep the user space behaviour of
1986 	// this function
1987 	if (thread->team != team_get_kernel_team())
1988 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
1989 	else
1990 		thread_exit();
1991 }
1992 
1993 
1994 status_t
1995 kill_thread(thread_id id)
1996 {
1997 	if (id <= 0)
1998 		return B_BAD_VALUE;
1999 
2000 	return send_signal(id, SIGKILLTHR);
2001 }
2002 
2003 
2004 status_t
2005 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2006 {
2007 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2008 }
2009 
2010 
2011 int32
2012 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2013 {
2014 	return receive_data_etc(sender, buffer, bufferSize, 0);
2015 }
2016 
2017 
2018 bool
2019 has_data(thread_id thread)
2020 {
2021 	int32 count;
2022 
2023 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2024 			&count) != B_OK)
2025 		return false;
2026 
2027 	return count == 0 ? false : true;
2028 }
2029 
2030 
2031 status_t
2032 _get_thread_info(thread_id id, thread_info *info, size_t size)
2033 {
2034 	status_t status = B_OK;
2035 	struct thread *thread;
2036 	cpu_status state;
2037 
2038 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2039 		return B_BAD_VALUE;
2040 
2041 	state = disable_interrupts();
2042 	GRAB_THREAD_LOCK();
2043 
2044 	thread = thread_get_thread_struct_locked(id);
2045 	if (thread == NULL) {
2046 		status = B_BAD_VALUE;
2047 		goto err;
2048 	}
2049 
2050 	fill_thread_info(thread, info, size);
2051 
2052 err:
2053 	RELEASE_THREAD_LOCK();
2054 	restore_interrupts(state);
2055 
2056 	return status;
2057 }
2058 
2059 
2060 status_t
2061 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2062 	size_t size)
2063 {
2064 	status_t status = B_BAD_VALUE;
2065 	struct thread *thread = NULL;
2066 	cpu_status state;
2067 	int slot;
2068 	thread_id lastThreadID;
2069 
2070 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2071 		return B_BAD_VALUE;
2072 
2073 	if (team == B_CURRENT_TEAM)
2074 		team = team_get_current_team_id();
2075 	else if (!team_is_valid(team))
2076 		return B_BAD_VALUE;
2077 
2078 	slot = *_cookie;
2079 
2080 	state = disable_interrupts();
2081 	GRAB_THREAD_LOCK();
2082 
2083 	lastThreadID = peek_next_thread_id();
2084 	if (slot >= lastThreadID)
2085 		goto err;
2086 
2087 	while (slot < lastThreadID
2088 		&& (!(thread = thread_get_thread_struct_locked(slot))
2089 			|| thread->team->id != team))
2090 		slot++;
2091 
2092 	if (thread != NULL && thread->team->id == team) {
2093 		fill_thread_info(thread, info, size);
2094 
2095 		*_cookie = slot + 1;
2096 		status = B_OK;
2097 	}
2098 
2099 err:
2100 	RELEASE_THREAD_LOCK();
2101 	restore_interrupts(state);
2102 
2103 	return status;
2104 }
2105 
2106 
2107 thread_id
2108 find_thread(const char *name)
2109 {
2110 	struct hash_iterator iterator;
2111 	struct thread *thread;
2112 	cpu_status state;
2113 
2114 	if (name == NULL)
2115 		return thread_get_current_thread_id();
2116 
2117 	state = disable_interrupts();
2118 	GRAB_THREAD_LOCK();
2119 
2120 	// ToDo: this might not be in the same order as find_thread() in BeOS
2121 	//		which could be theoretically problematic.
2122 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2123 	//		cheap either - although this function is probably used very rarely.
2124 
2125 	hash_open(sThreadHash, &iterator);
2126 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2127 			!= NULL) {
2128 		// Search through hash
2129 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2130 			thread_id id = thread->id;
2131 
2132 			RELEASE_THREAD_LOCK();
2133 			restore_interrupts(state);
2134 			return id;
2135 		}
2136 	}
2137 
2138 	RELEASE_THREAD_LOCK();
2139 	restore_interrupts(state);
2140 
2141 	return B_NAME_NOT_FOUND;
2142 }
2143 
2144 
2145 status_t
2146 rename_thread(thread_id id, const char *name)
2147 {
2148 	struct thread *thread = thread_get_current_thread();
2149 	status_t status = B_BAD_THREAD_ID;
2150 	cpu_status state;
2151 
2152 	if (name == NULL)
2153 		return B_BAD_VALUE;
2154 
2155 	state = disable_interrupts();
2156 	GRAB_THREAD_LOCK();
2157 
2158 	if (thread->id != id)
2159 		thread = thread_get_thread_struct_locked(id);
2160 
2161 	if (thread != NULL) {
2162 		if (thread->team == thread_get_current_thread()->team) {
2163 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2164 			status = B_OK;
2165 		} else
2166 			status = B_NOT_ALLOWED;
2167 	}
2168 
2169 	RELEASE_THREAD_LOCK();
2170 	restore_interrupts(state);
2171 
2172 	return status;
2173 }
2174 
2175 
2176 status_t
2177 set_thread_priority(thread_id id, int32 priority)
2178 {
2179 	struct thread *thread;
2180 	int32 oldPriority;
2181 
2182 	// make sure the passed in priority is within bounds
2183 	if (priority > B_MAX_PRIORITY)
2184 		priority = B_MAX_PRIORITY;
2185 	if (priority < B_MIN_PRIORITY)
2186 		priority = B_MIN_PRIORITY;
2187 
2188 	thread = thread_get_current_thread();
2189 	if (thread->id == id) {
2190 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2191 		// our structure directly
2192 		oldPriority = thread->priority;
2193 			// note that this might not return the correct value if we are preempted
2194 			// here, and another thread changes our priority before the next line is
2195 			// executed
2196 		thread->priority = thread->next_priority = priority;
2197 	} else {
2198 		cpu_status state = disable_interrupts();
2199 		GRAB_THREAD_LOCK();
2200 
2201 		thread = thread_get_thread_struct_locked(id);
2202 		if (thread) {
2203 			oldPriority = thread->priority;
2204 			thread->next_priority = priority;
2205 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2206 				// if the thread is in the run queue, we reinsert it at a new position
2207 				scheduler_remove_from_run_queue(thread);
2208 				thread->priority = priority;
2209 				scheduler_enqueue_in_run_queue(thread);
2210 			} else
2211 				thread->priority = priority;
2212 		} else
2213 			oldPriority = B_BAD_THREAD_ID;
2214 
2215 		RELEASE_THREAD_LOCK();
2216 		restore_interrupts(state);
2217 	}
2218 
2219 	return oldPriority;
2220 }
2221 
2222 
2223 status_t
2224 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2225 {
2226 	status_t status;
2227 
2228 	if (timebase != B_SYSTEM_TIMEBASE)
2229 		return B_BAD_VALUE;
2230 
2231 	status = acquire_sem_etc(sSnoozeSem, 1, flags, timeout);
2232 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2233 		return B_OK;
2234 
2235 	return status;
2236 }
2237 
2238 
2239 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2240 status_t
2241 snooze(bigtime_t timeout)
2242 {
2243 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2244 }
2245 
2246 
2247 /*!
2248 	snooze_until() for internal kernel use only; doesn't interrupt on
2249 	signals.
2250 */
2251 status_t
2252 snooze_until(bigtime_t timeout, int timebase)
2253 {
2254 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2255 }
2256 
2257 
2258 status_t
2259 wait_for_thread(thread_id thread, status_t *_returnCode)
2260 {
2261 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2262 }
2263 
2264 
2265 status_t
2266 suspend_thread(thread_id id)
2267 {
2268 	if (id <= 0)
2269 		return B_BAD_VALUE;
2270 
2271 	return send_signal(id, SIGSTOP);
2272 }
2273 
2274 
2275 status_t
2276 resume_thread(thread_id id)
2277 {
2278 	if (id <= 0)
2279 		return B_BAD_VALUE;
2280 
2281 	return send_signal(id, SIGCONT);
2282 }
2283 
2284 
2285 thread_id
2286 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2287 	void *arg)
2288 {
2289 	return create_thread(name, team_get_kernel_team()->id,
2290 		(thread_entry_func)function, arg, NULL, priority, true, -1);
2291 }
2292 
2293 
2294 /* TODO: split this; have kernel version set kerrno */
2295 int
2296 getrlimit(int resource, struct rlimit * rlp)
2297 {
2298 	if (!rlp)
2299 		return B_BAD_ADDRESS;
2300 
2301 	switch (resource) {
2302 		case RLIMIT_NOFILE:
2303 		case RLIMIT_NOVMON:
2304 			return vfs_getrlimit(resource, rlp);
2305 
2306 		default:
2307 			return EINVAL;
2308 	}
2309 
2310 	return 0;
2311 }
2312 
2313 
2314 /* TODO: split this; have kernel version set kerrno */
2315 int
2316 setrlimit(int resource, const struct rlimit * rlp)
2317 {
2318 	if (!rlp)
2319 		return B_BAD_ADDRESS;
2320 
2321 	switch (resource) {
2322 		case RLIMIT_NOFILE:
2323 		case RLIMIT_NOVMON:
2324 			return vfs_setrlimit(resource, rlp);
2325 
2326 		default:
2327 			return EINVAL;
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 
2334 //	#pragma mark - syscalls
2335 
2336 
2337 void
2338 _user_exit_thread(status_t returnValue)
2339 {
2340 	exit_thread(returnValue);
2341 }
2342 
2343 
2344 status_t
2345 _user_kill_thread(thread_id thread)
2346 {
2347 	return kill_thread(thread);
2348 }
2349 
2350 
2351 status_t
2352 _user_resume_thread(thread_id thread)
2353 {
2354 	return resume_thread(thread);
2355 }
2356 
2357 
2358 status_t
2359 _user_suspend_thread(thread_id thread)
2360 {
2361 	return suspend_thread(thread);
2362 }
2363 
2364 
2365 status_t
2366 _user_rename_thread(thread_id thread, const char *userName)
2367 {
2368 	char name[B_OS_NAME_LENGTH];
2369 
2370 	if (!IS_USER_ADDRESS(userName)
2371 		|| userName == NULL
2372 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2373 		return B_BAD_ADDRESS;
2374 
2375 	return rename_thread(thread, name);
2376 }
2377 
2378 
2379 int32
2380 _user_set_thread_priority(thread_id thread, int32 newPriority)
2381 {
2382 	return set_thread_priority(thread, newPriority);
2383 }
2384 
2385 
2386 thread_id
2387 _user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
2388 	int32 priority, void *data1, void *data2)
2389 {
2390 	char name[B_OS_NAME_LENGTH];
2391 	thread_id threadID;
2392 
2393 	if (!IS_USER_ADDRESS(entry) || entry == NULL
2394 		|| (userName != NULL && (!IS_USER_ADDRESS(userName)
2395 			|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)))
2396 		return B_BAD_ADDRESS;
2397 
2398 	threadID = create_thread(userName != NULL ? name : "user thread",
2399 		thread_get_current_thread()->team->id, entry,
2400 		data1, data2, priority, false, -1);
2401 
2402 	user_debug_thread_created(threadID);
2403 
2404 	return threadID;
2405 }
2406 
2407 
2408 status_t
2409 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2410 {
2411 	return snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2412 }
2413 
2414 
2415 void
2416 _user_thread_yield(void)
2417 {
2418 	thread_yield();
2419 }
2420 
2421 
2422 status_t
2423 _user_get_thread_info(thread_id id, thread_info *userInfo)
2424 {
2425 	thread_info info;
2426 	status_t status;
2427 
2428 	if (!IS_USER_ADDRESS(userInfo))
2429 		return B_BAD_ADDRESS;
2430 
2431 	status = _get_thread_info(id, &info, sizeof(thread_info));
2432 
2433 	if (status >= B_OK
2434 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2435 		return B_BAD_ADDRESS;
2436 
2437 	return status;
2438 }
2439 
2440 
2441 status_t
2442 _user_get_next_thread_info(team_id team, int32 *userCookie,
2443 	thread_info *userInfo)
2444 {
2445 	status_t status;
2446 	thread_info info;
2447 	int32 cookie;
2448 
2449 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2450 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2451 		return B_BAD_ADDRESS;
2452 
2453 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2454 	if (status < B_OK)
2455 		return status;
2456 
2457 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2458 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2459 		return B_BAD_ADDRESS;
2460 
2461 	return status;
2462 }
2463 
2464 
2465 thread_id
2466 _user_find_thread(const char *userName)
2467 {
2468 	char name[B_OS_NAME_LENGTH];
2469 
2470 	if (userName == NULL)
2471 		return find_thread(NULL);
2472 
2473 	if (!IS_USER_ADDRESS(userName)
2474 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2475 		return B_BAD_ADDRESS;
2476 
2477 	return find_thread(name);
2478 }
2479 
2480 
2481 status_t
2482 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2483 {
2484 	status_t returnCode;
2485 	status_t status;
2486 
2487 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2488 		return B_BAD_ADDRESS;
2489 
2490 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2491 
2492 	if (status == B_OK && userReturnCode != NULL
2493 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK)
2494 		return B_BAD_ADDRESS;
2495 
2496 	return status;
2497 }
2498 
2499 
2500 bool
2501 _user_has_data(thread_id thread)
2502 {
2503 	return has_data(thread);
2504 }
2505 
2506 
2507 status_t
2508 _user_send_data(thread_id thread, int32 code, const void *buffer,
2509 	size_t bufferSize)
2510 {
2511 	if (!IS_USER_ADDRESS(buffer))
2512 		return B_BAD_ADDRESS;
2513 
2514 	return send_data_etc(thread, code, buffer, bufferSize,
2515 		B_KILL_CAN_INTERRUPT);
2516 		// supports userland buffers
2517 }
2518 
2519 
2520 status_t
2521 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2522 {
2523 	thread_id sender;
2524 	status_t code;
2525 
2526 	if (!IS_USER_ADDRESS(_userSender)
2527 		|| !IS_USER_ADDRESS(buffer))
2528 		return B_BAD_ADDRESS;
2529 
2530 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2531 		// supports userland buffers
2532 
2533 	if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2534 		return B_BAD_ADDRESS;
2535 
2536 	return code;
2537 }
2538 
2539 
2540 // ToDo: the following two functions don't belong here
2541 
2542 
2543 int
2544 _user_getrlimit(int resource, struct rlimit *urlp)
2545 {
2546 	struct rlimit rl;
2547 	int ret;
2548 
2549 	if (urlp == NULL)
2550 		return EINVAL;
2551 
2552 	if (!IS_USER_ADDRESS(urlp))
2553 		return B_BAD_ADDRESS;
2554 
2555 	ret = getrlimit(resource, &rl);
2556 
2557 	if (ret == 0) {
2558 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2559 		if (ret < 0)
2560 			return ret;
2561 
2562 		return 0;
2563 	}
2564 
2565 	return ret;
2566 }
2567 
2568 
2569 int
2570 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2571 {
2572 	struct rlimit resourceLimit;
2573 
2574 	if (userResourceLimit == NULL)
2575 		return EINVAL;
2576 
2577 	if (!IS_USER_ADDRESS(userResourceLimit)
2578 		|| user_memcpy(&resourceLimit, userResourceLimit,
2579 			sizeof(struct rlimit)) < B_OK)
2580 		return B_BAD_ADDRESS;
2581 
2582 	return setrlimit(resource, &resourceLimit);
2583 }
2584 
2585