xref: /haiku/src/system/kernel/thread.cpp (revision 1214ef1b2100f2b3299fc9d8d6142e46f70a4c3f)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/resource.h>
18 
19 #include <OS.h>
20 
21 #include <util/AutoLock.h>
22 #include <util/khash.h>
23 
24 #include <boot/kernel_args.h>
25 #include <condition_variable.h>
26 #include <cpu.h>
27 #include <int.h>
28 #include <kimage.h>
29 #include <kscheduler.h>
30 #include <ksignal.h>
31 #include <smp.h>
32 #include <syscalls.h>
33 #include <team.h>
34 #include <tls.h>
35 #include <user_runtime.h>
36 #include <vfs.h>
37 #include <vm.h>
38 #include <vm_address_space.h>
39 #include <wait_for_objects.h>
40 
41 
42 //#define TRACE_THREAD
43 #ifdef TRACE_THREAD
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 #define THREAD_MAX_MESSAGE_SIZE		65536
51 
52 // used to pass messages between thread_exit and thread_exit2
53 
54 struct thread_exit_args {
55 	struct thread	*thread;
56 	area_id			old_kernel_stack;
57 	uint32			death_stack;
58 	sem_id			death_sem;
59 	team_id			original_team_id;
60 };
61 
62 struct thread_key {
63 	thread_id id;
64 };
65 
66 // global
67 spinlock thread_spinlock = 0;
68 
69 // thread list
70 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
71 static hash_table *sThreadHash = NULL;
72 static thread_id sNextThreadID = 1;
73 
74 // some arbitrary chosen limits - should probably depend on the available
75 // memory (the limit is not yet enforced)
76 static int32 sMaxThreads = 4096;
77 static int32 sUsedThreads = 0;
78 
79 static sem_id sSnoozeSem = -1;
80 
81 // death stacks - used temporarily as a thread cleans itself up
82 struct death_stack {
83 	area_id	area;
84 	addr_t	address;
85 	bool	in_use;
86 };
87 static struct death_stack *sDeathStacks;
88 static unsigned int sNumDeathStacks;
89 static unsigned int volatile sDeathStackBitmap;
90 static sem_id sDeathStackSem;
91 static spinlock sDeathStackLock = 0;
92 
93 // The dead queue is used as a pool from which to retrieve and reuse previously
94 // allocated thread structs when creating a new thread. It should be gone once
95 // the slab allocator is in.
96 struct thread_queue dead_q;
97 
98 static void thread_kthread_entry(void);
99 static void thread_kthread_exit(void);
100 
101 
102 /*!
103 	Inserts a thread into a team.
104 	You must hold the team lock when you call this function.
105 */
106 static void
107 insert_thread_into_team(struct team *team, struct thread *thread)
108 {
109 	thread->team_next = team->thread_list;
110 	team->thread_list = thread;
111 	team->num_threads++;
112 
113 	if (team->num_threads == 1) {
114 		// this was the first thread
115 		team->main_thread = thread;
116 	}
117 	thread->team = team;
118 }
119 
120 
121 /*!
122 	Removes a thread from a team.
123 	You must hold the team lock when you call this function.
124 */
125 static void
126 remove_thread_from_team(struct team *team, struct thread *thread)
127 {
128 	struct thread *temp, *last = NULL;
129 
130 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
131 		if (temp == thread) {
132 			if (last == NULL)
133 				team->thread_list = temp->team_next;
134 			else
135 				last->team_next = temp->team_next;
136 
137 			team->num_threads--;
138 			break;
139 		}
140 		last = temp;
141 	}
142 }
143 
144 
145 static int
146 thread_struct_compare(void *_t, const void *_key)
147 {
148 	struct thread *thread = (struct thread*)_t;
149 	const struct thread_key *key = (const struct thread_key*)_key;
150 
151 	if (thread->id == key->id)
152 		return 0;
153 
154 	return 1;
155 }
156 
157 
158 static uint32
159 thread_struct_hash(void *_t, const void *_key, uint32 range)
160 {
161 	struct thread *thread = (struct thread*)_t;
162 	const struct thread_key *key = (const struct thread_key*)_key;
163 
164 	if (thread != NULL)
165 		return thread->id % range;
166 
167 	return (uint32)key->id % range;
168 }
169 
170 
171 static void
172 reset_signals(struct thread *thread)
173 {
174 	thread->sig_pending = 0;
175 	thread->sig_block_mask = 0;
176 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
177 	thread->signal_stack_base = 0;
178 	thread->signal_stack_size = 0;
179 	thread->signal_stack_enabled = false;
180 }
181 
182 
183 /*!
184 	Allocates and fills in thread structure (or reuses one from the
185 	dead queue).
186 
187 	\param threadID The ID to be assigned to the new thread. If
188 		  \code < 0 \endcode a fresh one is allocated.
189 	\param thread initialize this thread struct if nonnull
190 */
191 
192 static struct thread *
193 create_thread_struct(struct thread *inthread, const char *name,
194 	thread_id threadID, struct cpu_ent *cpu)
195 {
196 	struct thread *thread;
197 	cpu_status state;
198 	char temp[64];
199 
200 	if (inthread == NULL) {
201 		// try to recycle one from the dead queue first
202 		state = disable_interrupts();
203 		GRAB_THREAD_LOCK();
204 		thread = thread_dequeue(&dead_q);
205 		RELEASE_THREAD_LOCK();
206 		restore_interrupts(state);
207 
208 		// if not, create a new one
209 		if (thread == NULL) {
210 			thread = (struct thread *)malloc(sizeof(struct thread));
211 			if (thread == NULL)
212 				return NULL;
213 		}
214 	} else {
215 		thread = inthread;
216 	}
217 
218 	if (name != NULL)
219 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
220 	else
221 		strcpy(thread->name, "unnamed thread");
222 
223 	thread->flags = 0;
224 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
225 	thread->team = NULL;
226 	thread->cpu = cpu;
227 	thread->sem.blocking = -1;
228 	thread->condition_variable_entry = NULL;
229 	thread->fault_handler = 0;
230 	thread->page_faults_allowed = 1;
231 	thread->kernel_stack_area = -1;
232 	thread->kernel_stack_base = 0;
233 	thread->user_stack_area = -1;
234 	thread->user_stack_base = 0;
235 	thread->user_local_storage = 0;
236 	thread->kernel_errno = 0;
237 	thread->team_next = NULL;
238 	thread->queue_next = NULL;
239 	thread->priority = thread->next_priority = -1;
240 	thread->args1 = NULL;  thread->args2 = NULL;
241 	thread->alarm.period = 0;
242 	reset_signals(thread);
243 	thread->in_kernel = true;
244 	thread->was_yielded = false;
245 	thread->user_time = 0;
246 	thread->kernel_time = 0;
247 	thread->last_time = 0;
248 	thread->exit.status = 0;
249 	thread->exit.reason = 0;
250 	thread->exit.signal = 0;
251 	list_init(&thread->exit.waiters);
252 	thread->select_infos = NULL;
253 
254 	sprintf(temp, "thread_%lx_retcode_sem", thread->id);
255 	thread->exit.sem = create_sem(0, temp);
256 	if (thread->exit.sem < B_OK)
257 		goto err1;
258 
259 	sprintf(temp, "%s send", thread->name);
260 	thread->msg.write_sem = create_sem(1, temp);
261 	if (thread->msg.write_sem < B_OK)
262 		goto err2;
263 
264 	sprintf(temp, "%s receive", thread->name);
265 	thread->msg.read_sem = create_sem(0, temp);
266 	if (thread->msg.read_sem < B_OK)
267 		goto err3;
268 
269 	if (arch_thread_init_thread_struct(thread) < B_OK)
270 		goto err4;
271 
272 	return thread;
273 
274 err4:
275 	delete_sem(thread->msg.read_sem);
276 err3:
277 	delete_sem(thread->msg.write_sem);
278 err2:
279 	delete_sem(thread->exit.sem);
280 err1:
281 	// ToDo: put them in the dead queue instead?
282 	if (inthread == NULL)
283 		free(thread);
284 	return NULL;
285 }
286 
287 
288 static void
289 delete_thread_struct(struct thread *thread)
290 {
291 	delete_sem(thread->exit.sem);
292 	delete_sem(thread->msg.write_sem);
293 	delete_sem(thread->msg.read_sem);
294 
295 	// ToDo: put them in the dead queue instead?
296 	free(thread);
297 }
298 
299 
300 /*! This function gets run by a new thread before anything else */
301 static void
302 thread_kthread_entry(void)
303 {
304 	struct thread *thread = thread_get_current_thread();
305 
306 	// simulates the thread spinlock release that would occur if the thread had been
307 	// rescheded from. The resched didn't happen because the thread is new.
308 	RELEASE_THREAD_LOCK();
309 
310 	// start tracking time
311 	thread->last_time = system_time();
312 
313 	enable_interrupts(); // this essentially simulates a return-from-interrupt
314 }
315 
316 
317 static void
318 thread_kthread_exit(void)
319 {
320 	struct thread *thread = thread_get_current_thread();
321 
322 	thread->exit.reason = THREAD_RETURN_EXIT;
323 	thread_exit();
324 }
325 
326 
327 /*!
328 	Initializes the thread and jumps to its userspace entry point.
329 	This function is called at creation time of every user thread,
330 	but not for a team's main thread.
331 */
332 static int
333 _create_user_thread_kentry(void)
334 {
335 	struct thread *thread = thread_get_current_thread();
336 
337 	// a signal may have been delivered here
338 	thread_at_kernel_exit();
339 
340 	// jump to the entry point in user space
341 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
342 		thread->args1, thread->args2);
343 
344 	// only get here if the above call fails
345 	return 0;
346 }
347 
348 
349 /*! Initializes the thread and calls it kernel space entry point. */
350 static int
351 _create_kernel_thread_kentry(void)
352 {
353 	struct thread *thread = thread_get_current_thread();
354 	int (*func)(void *args) = (int (*)(void *))thread->entry;
355 
356 	// call the entry function with the appropriate args
357 	return func(thread->args1);
358 }
359 
360 
361 /*!
362 	Creates a new thread in the team with the specified team ID.
363 
364 	\param threadID The ID to be assigned to the new thread. If
365 		  \code < 0 \endcode a fresh one is allocated.
366 */
367 static thread_id
368 create_thread(const char *name, team_id teamID, thread_entry_func entry,
369 	void *args1, void *args2, int32 priority, bool kernel, thread_id threadID)
370 {
371 	struct thread *thread, *currentThread;
372 	struct team *team;
373 	cpu_status state;
374 	char stack_name[B_OS_NAME_LENGTH];
375 	status_t status;
376 	bool abort = false;
377 	bool debugNewThread = false;
378 
379 	TRACE(("create_thread(%s, id = %ld, %s)\n", name, threadID,
380 		kernel ? "kernel" : "user"));
381 
382 	thread = create_thread_struct(NULL, name, threadID, NULL);
383 	if (thread == NULL)
384 		return B_NO_MEMORY;
385 
386 	thread->priority = priority == -1 ? B_NORMAL_PRIORITY : priority;
387 	thread->next_priority = thread->priority;
388 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
389 	thread->state = B_THREAD_SUSPENDED;
390 	thread->next_state = B_THREAD_SUSPENDED;
391 
392 	// init debug structure
393 	clear_thread_debug_info(&thread->debug_info, false);
394 
395 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%lx_kstack", name, thread->id);
396 	thread->kernel_stack_area = create_area(stack_name,
397 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
398 		KERNEL_STACK_SIZE, B_FULL_LOCK,
399 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
400 
401 	if (thread->kernel_stack_area < 0) {
402 		// we're not yet part of a team, so we can just bail out
403 		status = thread->kernel_stack_area;
404 
405 		dprintf("create_thread: error creating kernel stack: %s!\n",
406 			strerror(status));
407 
408 		delete_thread_struct(thread);
409 		return status;
410 	}
411 
412 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
413 
414 	state = disable_interrupts();
415 	GRAB_THREAD_LOCK();
416 
417 	// If the new thread belongs to the same team as the current thread,
418 	// it may inherit some of the thread debug flags.
419 	currentThread = thread_get_current_thread();
420 	if (currentThread && currentThread->team->id == teamID) {
421 		// inherit all user flags...
422 		int32 debugFlags = currentThread->debug_info.flags
423 			& B_THREAD_DEBUG_USER_FLAG_MASK;
424 
425 		// ... save the syscall tracing flags, unless explicitely specified
426 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
427 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
428 				| B_THREAD_DEBUG_POST_SYSCALL);
429 		}
430 
431 		thread->debug_info.flags = debugFlags;
432 
433 		// stop the new thread, if desired
434 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
435 	}
436 
437 	// insert into global list
438 	hash_insert(sThreadHash, thread);
439 	sUsedThreads++;
440 	RELEASE_THREAD_LOCK();
441 
442 	GRAB_TEAM_LOCK();
443 	// look at the team, make sure it's not being deleted
444 	team = team_get_team_struct_locked(teamID);
445 	if (team != NULL && team->state != TEAM_STATE_DEATH) {
446 		// Debug the new thread, if the parent thread required that (see above),
447 		// or the respective global team debug flag is set. But only, if a
448 		// debugger is installed for the team.
449 		debugNewThread |= (atomic_get(&team->debug_info.flags)
450 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
451 		if (debugNewThread
452 			&& (atomic_get(&team->debug_info.flags)
453 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
454 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
455 		}
456 
457 		insert_thread_into_team(team, thread);
458 	} else
459 		abort = true;
460 
461 	RELEASE_TEAM_LOCK();
462 	if (abort) {
463 		GRAB_THREAD_LOCK();
464 		hash_remove(sThreadHash, thread);
465 		RELEASE_THREAD_LOCK();
466 	}
467 	restore_interrupts(state);
468 	if (abort) {
469 		delete_area(thread->kernel_stack_area);
470 		delete_thread_struct(thread);
471 		return B_BAD_TEAM_ID;
472 	}
473 
474 	thread->args1 = args1;
475 	thread->args2 = args2;
476 	thread->entry = entry;
477 	status = thread->id;
478 
479 	if (kernel) {
480 		// this sets up an initial kthread stack that runs the entry
481 
482 		// Note: whatever function wants to set up a user stack later for this
483 		// thread must initialize the TLS for it
484 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
485 			&thread_kthread_entry, &thread_kthread_exit);
486 	} else {
487 		// create user stack
488 
489 		// the stack will be between USER_STACK_REGION and the main thread stack area
490 		// (the user stack of the main thread is created in team_create_team())
491 		thread->user_stack_base = USER_STACK_REGION;
492 		thread->user_stack_size = USER_STACK_SIZE;
493 
494 		snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%lx_stack", name, thread->id);
495 		thread->user_stack_area = create_area_etc(team, stack_name,
496 				(void **)&thread->user_stack_base, B_BASE_ADDRESS,
497 				thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
498 				B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
499 		if (thread->user_stack_area < B_OK
500 			|| arch_thread_init_tls(thread) < B_OK) {
501 			// great, we have a fully running thread without a (usable) stack
502 			dprintf("create_thread: unable to create proper user stack!\n");
503 			status = thread->user_stack_area;
504 			kill_thread(thread->id);
505 		}
506 
507 		user_debug_update_new_thread_flags(thread->id);
508 
509 		// copy the user entry over to the args field in the thread struct
510 		// the function this will call will immediately switch the thread into
511 		// user space.
512 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
513 			&thread_kthread_entry, &thread_kthread_exit);
514 	}
515 
516 	return status;
517 }
518 
519 
520 /*!
521 	Finds a free death stack for us and allocates it.
522 	Must be called with interrupts enabled.
523 */
524 static uint32
525 get_death_stack(void)
526 {
527 	cpu_status state;
528 	uint32 bit;
529 	int32 i;
530 
531 	acquire_sem(sDeathStackSem);
532 
533 	// grab the death stack and thread locks, find a free spot and release
534 
535 	state = disable_interrupts();
536 
537 	acquire_spinlock(&sDeathStackLock);
538 	GRAB_THREAD_LOCK();
539 
540 	bit = sDeathStackBitmap;
541 	bit = (~bit) & ~((~bit) - 1);
542 	sDeathStackBitmap |= bit;
543 
544 	RELEASE_THREAD_LOCK();
545 	release_spinlock(&sDeathStackLock);
546 
547 	restore_interrupts(state);
548 
549 	// sanity checks
550 	if (!bit)
551 		panic("get_death_stack: couldn't find free stack!\n");
552 
553 	if (bit & (bit - 1))
554 		panic("get_death_stack: impossible bitmap result!\n");
555 
556 	// bit to number
557 	for (i = -1; bit; i++) {
558 		bit >>= 1;
559 	}
560 
561 	TRACE(("get_death_stack: returning %#lx\n", sDeathStacks[i].address));
562 
563 	return (uint32)i;
564 }
565 
566 
567 /*!	Returns the thread's death stack to the pool.
568 	Interrupts must be disabled and the sDeathStackLock be held.
569 */
570 static void
571 put_death_stack(uint32 index)
572 {
573 	TRACE(("put_death_stack...: passed %lu\n", index));
574 
575 	if (index >= sNumDeathStacks)
576 		panic("put_death_stack: passed invalid stack index %ld\n", index);
577 
578 	if (!(sDeathStackBitmap & (1 << index)))
579 		panic("put_death_stack: passed invalid stack index %ld\n", index);
580 
581 	GRAB_THREAD_LOCK();
582 	sDeathStackBitmap &= ~(1 << index);
583 	RELEASE_THREAD_LOCK();
584 
585 	release_sem_etc(sDeathStackSem, 1, B_DO_NOT_RESCHEDULE);
586 		// we must not hold the thread lock when releasing a semaphore
587 }
588 
589 
590 static void
591 thread_exit2(void *_args)
592 {
593 	struct thread_exit_args args;
594 
595 	// copy the arguments over, since the source is probably on the kernel
596 	// stack we're about to delete
597 	memcpy(&args, _args, sizeof(struct thread_exit_args));
598 
599 	// we can't let the interrupts disabled at this point
600 	enable_interrupts();
601 
602 	TRACE(("thread_exit2, running on death stack %#lx\n", args.death_stack));
603 
604 	// delete the old kernel stack area
605 	TRACE(("thread_exit2: deleting old kernel stack id %ld for thread %ld\n",
606 		args.old_kernel_stack, args.thread->id));
607 
608 	delete_area(args.old_kernel_stack);
609 
610 	// remove this thread from all of the global lists
611 	TRACE(("thread_exit2: removing thread %ld from global lists\n",
612 		args.thread->id));
613 
614 	disable_interrupts();
615 	GRAB_TEAM_LOCK();
616 
617 	remove_thread_from_team(team_get_kernel_team(), args.thread);
618 
619 	RELEASE_TEAM_LOCK();
620 	enable_interrupts();
621 		// needed for the debugger notification below
622 
623 	TRACE(("thread_exit2: done removing thread from lists\n"));
624 
625 	if (args.death_sem >= 0)
626 		release_sem_etc(args.death_sem, 1, B_DO_NOT_RESCHEDULE);
627 
628 	// notify the debugger
629 	if (args.original_team_id >= 0
630 		&& args.original_team_id != team_get_kernel_team_id()) {
631 		user_debug_thread_deleted(args.original_team_id, args.thread->id);
632 	}
633 
634 	disable_interrupts();
635 
636 	// Set the next state to be gone: this will cause the thread structure
637 	// to be returned to a ready pool upon reschedule.
638 	// Note, we need to have disabled interrupts at this point, or else
639 	// we could get rescheduled too early.
640 	args.thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
641 
642 	// return the death stack and reschedule one last time
643 
644 	// Note that we need to hold sDeathStackLock until we've got the thread
645 	// lock. Otherwise someone else might grab our stack in the meantime.
646 	acquire_spinlock(&sDeathStackLock);
647 	put_death_stack(args.death_stack);
648 
649 	GRAB_THREAD_LOCK();
650 	release_spinlock(&sDeathStackLock);
651 
652 	scheduler_reschedule();
653 		// requires thread lock to be held
654 
655 	// never get to here
656 	panic("thread_exit2: made it where it shouldn't have!\n");
657 }
658 
659 
660 /*!
661 	Fills the thread_info structure with information from the specified
662 	thread.
663 	The thread lock must be held when called.
664 */
665 static void
666 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
667 {
668 	info->thread = thread->id;
669 	info->team = thread->team->id;
670 
671 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
672 
673 	if (thread->state == B_THREAD_WAITING) {
674 		if (thread->sem.blocking == sSnoozeSem)
675 			info->state = B_THREAD_ASLEEP;
676 		else if (thread->sem.blocking == thread->msg.read_sem)
677 			info->state = B_THREAD_RECEIVING;
678 		else
679 			info->state = B_THREAD_WAITING;
680 	} else
681 		info->state = (thread_state)thread->state;
682 
683 	info->priority = thread->priority;
684 	info->sem = thread->sem.blocking;
685 	info->user_time = thread->user_time;
686 	info->kernel_time = thread->kernel_time;
687 	info->stack_base = (void *)thread->user_stack_base;
688 	info->stack_end = (void *)(thread->user_stack_base
689 		+ thread->user_stack_size);
690 }
691 
692 
693 static status_t
694 send_data_etc(thread_id id, int32 code, const void *buffer,
695 	size_t bufferSize, int32 flags)
696 {
697 	struct thread *target;
698 	sem_id cachedSem;
699 	cpu_status state;
700 	status_t status;
701 	cbuf *data;
702 
703 	state = disable_interrupts();
704 	GRAB_THREAD_LOCK();
705 	target = thread_get_thread_struct_locked(id);
706 	if (!target) {
707 		RELEASE_THREAD_LOCK();
708 		restore_interrupts(state);
709 		return B_BAD_THREAD_ID;
710 	}
711 	cachedSem = target->msg.write_sem;
712 	RELEASE_THREAD_LOCK();
713 	restore_interrupts(state);
714 
715 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
716 		return B_NO_MEMORY;
717 
718 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
719 	if (status == B_INTERRUPTED) {
720 		// We got interrupted by a signal
721 		return status;
722 	}
723 	if (status != B_OK) {
724 		// Any other acquisition problems may be due to thread deletion
725 		return B_BAD_THREAD_ID;
726 	}
727 
728 	if (bufferSize > 0) {
729 		data = cbuf_get_chain(bufferSize);
730 		if (data == NULL)
731 			return B_NO_MEMORY;
732 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
733 		if (status < B_OK) {
734 			cbuf_free_chain(data);
735 			return B_NO_MEMORY;
736 		}
737 	} else
738 		data = NULL;
739 
740 	state = disable_interrupts();
741 	GRAB_THREAD_LOCK();
742 
743 	// The target thread could have been deleted at this point
744 	target = thread_get_thread_struct_locked(id);
745 	if (target == NULL) {
746 		RELEASE_THREAD_LOCK();
747 		restore_interrupts(state);
748 		cbuf_free_chain(data);
749 		return B_BAD_THREAD_ID;
750 	}
751 
752 	// Save message informations
753 	target->msg.sender = thread_get_current_thread()->id;
754 	target->msg.code = code;
755 	target->msg.size = bufferSize;
756 	target->msg.buffer = data;
757 	cachedSem = target->msg.read_sem;
758 
759 	RELEASE_THREAD_LOCK();
760 	restore_interrupts(state);
761 
762 	release_sem(cachedSem);
763 	return B_OK;
764 }
765 
766 
767 static int32
768 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
769 	int32 flags)
770 {
771 	struct thread *thread = thread_get_current_thread();
772 	status_t status;
773 	size_t size;
774 	int32 code;
775 
776 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
777 	if (status < B_OK) {
778 		// Actually, we're not supposed to return error codes
779 		// but since the only reason this can fail is that we
780 		// were killed, it's probably okay to do so (but also
781 		// meaningless).
782 		return status;
783 	}
784 
785 	if (buffer != NULL && bufferSize != 0) {
786 		size = min_c(bufferSize, thread->msg.size);
787 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
788 			0, size);
789 		if (status < B_OK) {
790 			cbuf_free_chain(thread->msg.buffer);
791 			release_sem(thread->msg.write_sem);
792 			return status;
793 		}
794 	}
795 
796 	*_sender = thread->msg.sender;
797 	code = thread->msg.code;
798 
799 	cbuf_free_chain(thread->msg.buffer);
800 	release_sem(thread->msg.write_sem);
801 
802 	return code;
803 }
804 
805 
806 //	#pragma mark - debugger calls
807 
808 
809 static int
810 make_thread_unreal(int argc, char **argv)
811 {
812 	struct thread *thread;
813 	struct hash_iterator i;
814 	int32 id = -1;
815 
816 	if (argc > 2) {
817 		kprintf("usage: unreal [id]\n");
818 		return 0;
819 	}
820 
821 	if (argc > 1)
822 		id = strtoul(argv[1], NULL, 0);
823 
824 	hash_open(sThreadHash, &i);
825 
826 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
827 		if (id != -1 && thread->id != id)
828 			continue;
829 
830 		if (thread->priority > B_DISPLAY_PRIORITY) {
831 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
832 			kprintf("thread %ld made unreal\n", thread->id);
833 		}
834 	}
835 
836 	hash_close(sThreadHash, &i, false);
837 	return 0;
838 }
839 
840 
841 static int
842 set_thread_prio(int argc, char **argv)
843 {
844 	struct thread *thread;
845 	struct hash_iterator i;
846 	int32 id;
847 	int32 prio;
848 
849 	if (argc > 3 || argc < 2) {
850 		kprintf("usage: priority <priority> [thread-id]\n");
851 		return 0;
852 	}
853 
854 	prio = strtoul(argv[1], NULL, 0);
855 	if (prio > B_MAX_PRIORITY)
856 		prio = B_MAX_PRIORITY;
857 	if (prio < B_MIN_PRIORITY)
858 		prio = B_MIN_PRIORITY;
859 
860 	if (argc > 2)
861 		id = strtoul(argv[2], NULL, 0);
862 	else
863 		id = thread_get_current_thread()->id;
864 
865 	hash_open(sThreadHash, &i);
866 
867 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
868 		if (thread->id != id)
869 			continue;
870 		thread->priority = thread->next_priority = prio;
871 		kprintf("thread %ld set to priority %ld\n", id, prio);
872 		break;
873 	}
874 	if (!thread)
875 		kprintf("thread %ld (%#lx) not found\n", id, id);
876 
877 	hash_close(sThreadHash, &i, false);
878 	return 0;
879 }
880 
881 
882 static int
883 make_thread_suspended(int argc, char **argv)
884 {
885 	struct thread *thread;
886 	struct hash_iterator i;
887 	int32 id;
888 
889 	if (argc > 2) {
890 		kprintf("usage: suspend [thread-id]\n");
891 		return 0;
892 	}
893 
894 	if (argc == 1)
895 		id = thread_get_current_thread()->id;
896 	else
897 		id = strtoul(argv[1], NULL, 0);
898 
899 	hash_open(sThreadHash, &i);
900 
901 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
902 		if (thread->id != id)
903 			continue;
904 
905 		thread->state = thread->next_state = B_THREAD_SUSPENDED;
906 		kprintf("thread %ld suspended\n", id);
907 		break;
908 	}
909 	if (!thread)
910 		kprintf("thread %ld (%#lx) not found\n", id, id);
911 
912 	hash_close(sThreadHash, &i, false);
913 	return 0;
914 }
915 
916 
917 static int
918 make_thread_resumed(int argc, char **argv)
919 {
920 	struct thread *thread;
921 	struct hash_iterator i;
922 	int32 id;
923 
924 	if (argc != 2) {
925 		kprintf("usage: resume <thread-id>\n");
926 		return 0;
927 	}
928 
929 	// force user to enter a thread id, as using
930 	// the current thread is usually not intended
931 	id = strtoul(argv[1], NULL, 0);
932 
933 	hash_open(sThreadHash, &i);
934 
935 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
936 		if (thread->id != id)
937 			continue;
938 
939 		if (thread->state == B_THREAD_SUSPENDED) {
940 			thread->state = thread->next_state = B_THREAD_READY;
941 			scheduler_enqueue_in_run_queue(thread);
942 			kprintf("thread %ld resumed\n", thread->id);
943 		}
944 		break;
945 	}
946 	if (!thread)
947 		kprintf("thread %ld (%#lx) not found\n", id, id);
948 
949 	hash_close(sThreadHash, &i, false);
950 	return 0;
951 }
952 
953 
954 static int
955 drop_into_debugger(int argc, char **argv)
956 {
957 	status_t err;
958 	int32 id;
959 
960 	if (argc > 2) {
961 		kprintf("usage: drop [thread-id]\n");
962 		return 0;
963 	}
964 
965 	if (argc == 1)
966 		id = thread_get_current_thread()->id;
967 	else
968 		id = strtoul(argv[1], NULL, 0);
969 
970 	err = _user_debug_thread(id);
971 	if (err)
972 		kprintf("drop failed\n");
973 	else
974 		kprintf("thread %ld dropped into user debugger\n", id);
975 
976 	return 0;
977 }
978 
979 
980 static const char *
981 state_to_text(struct thread *thread, int32 state)
982 {
983 	switch (state) {
984 		case B_THREAD_READY:
985 			return "ready";
986 
987 		case B_THREAD_RUNNING:
988 			return "running";
989 
990 		case B_THREAD_WAITING:
991 			if (thread->sem.blocking == sSnoozeSem)
992 				return "zzz";
993 			if (thread->sem.blocking == thread->msg.read_sem)
994 				return "receive";
995 
996 			return "waiting";
997 
998 		case B_THREAD_SUSPENDED:
999 			return "suspended";
1000 
1001 		case THREAD_STATE_FREE_ON_RESCHED:
1002 			return "death";
1003 
1004 		default:
1005 			return "UNKNOWN";
1006 	}
1007 }
1008 
1009 
1010 static void
1011 _dump_thread_info(struct thread *thread)
1012 {
1013 	struct death_entry *death = NULL;
1014 
1015 	kprintf("THREAD: %p\n", thread);
1016 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1017 	kprintf("name:               \"%s\"\n", thread->name);
1018 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1019 		thread->all_next, thread->team_next, thread->queue_next);
1020 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1021 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1022 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1023 	kprintf("cpu:                %p ", thread->cpu);
1024 	if (thread->cpu)
1025 		kprintf("(%d)\n", thread->cpu->cpu_num);
1026 	else
1027 		kprintf("\n");
1028 	kprintf("sig_pending:        %#lx\n", thread->sig_pending);
1029 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1030 	kprintf("  sem.blocking:     %ld\n", thread->sem.blocking);
1031 	kprintf("  sem.count:        %ld\n", thread->sem.count);
1032 	kprintf("  sem.acquire_status: %#lx\n", thread->sem.acquire_status);
1033 	kprintf("  sem.flags:        %#lx\n", thread->sem.flags);
1034 
1035 	kprintf("condition variables:");
1036 	PrivateConditionVariableEntry* entry = thread->condition_variable_entry;
1037 	while (entry != NULL) {
1038 		kprintf(" %p", entry->Variable());
1039 		entry = entry->ThreadNext();
1040 	}
1041 	kprintf("\n");
1042 
1043 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1044 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1045 	kprintf("entry:              %p\n", (void *)thread->entry);
1046 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1047 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1048 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1049 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1050 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1051 	kprintf("  exit.waiters:\n");
1052 	while ((death = (struct death_entry*)list_get_next_item(
1053 			&thread->exit.waiters, death)) != NULL) {
1054 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1055 	}
1056 
1057 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1058 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1059 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1060 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1061 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1062 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1063 		strerror(thread->kernel_errno));
1064 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1065 	kprintf("user_time:          %Ld\n", thread->user_time);
1066 	kprintf("flags:              0x%lx\n", thread->flags);
1067 	kprintf("architecture dependant section:\n");
1068 	arch_thread_dump_info(&thread->arch_info);
1069 }
1070 
1071 
1072 static int
1073 dump_thread_info(int argc, char **argv)
1074 {
1075 	const char *name = NULL;
1076 	struct thread *thread;
1077 	int32 id = -1;
1078 	struct hash_iterator i;
1079 	bool found = false;
1080 
1081 	if (argc > 2) {
1082 		kprintf("usage: thread [id/address/name]\n");
1083 		return 0;
1084 	}
1085 
1086 	if (argc == 1) {
1087 		_dump_thread_info(thread_get_current_thread());
1088 		return 0;
1089 	} else {
1090 		name = argv[1];
1091 		id = strtoul(argv[1], NULL, 0);
1092 
1093 		if (IS_KERNEL_ADDRESS(id)) {
1094 			// semi-hack
1095 			_dump_thread_info((struct thread *)id);
1096 			return 0;
1097 		}
1098 	}
1099 
1100 	// walk through the thread list, trying to match name or id
1101 	hash_open(sThreadHash, &i);
1102 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1103 		if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
1104 			_dump_thread_info(thread);
1105 			found = true;
1106 			break;
1107 		}
1108 	}
1109 	hash_close(sThreadHash, &i, false);
1110 
1111 	if (!found)
1112 		kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1113 	return 0;
1114 }
1115 
1116 
1117 static int
1118 dump_thread_list(int argc, char **argv)
1119 {
1120 	struct thread *thread;
1121 	struct hash_iterator i;
1122 	bool realTimeOnly = false;
1123 	int32 requiredState = 0;
1124 	team_id team = -1;
1125 	sem_id sem = -1;
1126 
1127 	if (!strcmp(argv[0], "realtime"))
1128 		realTimeOnly = true;
1129 	else if (!strcmp(argv[0], "ready"))
1130 		requiredState = B_THREAD_READY;
1131 	else if (!strcmp(argv[0], "running"))
1132 		requiredState = B_THREAD_RUNNING;
1133 	else if (!strcmp(argv[0], "waiting")) {
1134 		requiredState = B_THREAD_WAITING;
1135 
1136 		if (argc > 1) {
1137 			sem = strtoul(argv[1], NULL, 0);
1138 			if (sem == 0)
1139 				kprintf("ignoring invalid semaphore argument.\n");
1140 		}
1141 	} else if (argc > 1) {
1142 		team = strtoul(argv[1], NULL, 0);
1143 		if (team == 0)
1144 			kprintf("ignoring invalid team argument.\n");
1145 	}
1146 
1147 	kprintf("thread         id  state        sem/cv cpu pri  stack      team  "
1148 		"name\n");
1149 
1150 	hash_open(sThreadHash, &i);
1151 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1152 		// filter out threads not matching the search criteria
1153 		if ((requiredState && thread->state != requiredState)
1154 			|| (sem > 0 && thread->sem.blocking != sem)
1155 			|| (team > 0 && thread->team->id != team)
1156 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1157 			continue;
1158 
1159 		kprintf("%p %6ld  %-9s", thread, thread->id, state_to_text(thread,
1160 			thread->state));
1161 
1162 		// does it block on a semaphore or a condition variable?
1163 		if (thread->state == B_THREAD_WAITING) {
1164 			if (thread->condition_variable_entry)
1165 				kprintf("%p  ", thread->condition_variable_entry->Variable());
1166 			else
1167 				kprintf("%10ld  ", thread->sem.blocking);
1168 		} else
1169 			kprintf("      -     ");
1170 
1171 		// on which CPU does it run?
1172 		if (thread->cpu)
1173 			kprintf("%2d", thread->cpu->cpu_num);
1174 		else
1175 			kprintf(" -");
1176 
1177 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1178 			(void *)thread->kernel_stack_base, thread->team->id,
1179 			thread->name != NULL ? thread->name : "<NULL>");
1180 	}
1181 	hash_close(sThreadHash, &i, false);
1182 	return 0;
1183 }
1184 
1185 
1186 //	#pragma mark - private kernel API
1187 
1188 
1189 void
1190 thread_exit(void)
1191 {
1192 	cpu_status state;
1193 	struct thread *thread = thread_get_current_thread();
1194 	struct process_group *freeGroup = NULL;
1195 	struct team *team = thread->team;
1196 	thread_id parentID = -1;
1197 	bool deleteTeam = false;
1198 	sem_id cachedDeathSem = -1;
1199 	status_t status;
1200 	struct thread_debug_info debugInfo;
1201 	team_id teamID = team->id;
1202 
1203 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1204 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1205 			? "due to signal" : "normally", thread->exit.status));
1206 
1207 	if (!are_interrupts_enabled())
1208 		panic("thread_exit() called with interrupts disabled!\n");
1209 
1210 	// boost our priority to get this over with
1211 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1212 
1213 	// Cancel previously installed alarm timer, if any
1214 	cancel_timer(&thread->alarm);
1215 
1216 	// delete the user stack area first, we won't need it anymore
1217 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1218 		area_id area = thread->user_stack_area;
1219 		thread->user_stack_area = -1;
1220 		delete_area_etc(team, area);
1221 	}
1222 
1223 	struct job_control_entry *death = NULL;
1224 	struct death_entry* threadDeathEntry = NULL;
1225 
1226 	if (team != team_get_kernel_team()) {
1227 		if (team->main_thread == thread) {
1228 			// this was the main thread in this team, so we will delete that as well
1229 			deleteTeam = true;
1230 		} else
1231 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1232 
1233 		// remove this thread from the current team and add it to the kernel
1234 		// put the thread into the kernel team until it dies
1235 		state = disable_interrupts();
1236 		GRAB_TEAM_LOCK();
1237 		GRAB_THREAD_LOCK();
1238 			// removing the thread and putting its death entry to the parent
1239 			// team needs to be an atomic operation
1240 
1241 		// remember how long this thread lasted
1242 		team->dead_threads_kernel_time += thread->kernel_time;
1243 		team->dead_threads_user_time += thread->user_time;
1244 
1245 		remove_thread_from_team(team, thread);
1246 		insert_thread_into_team(team_get_kernel_team(), thread);
1247 
1248 		cachedDeathSem = team->death_sem;
1249 
1250 		if (deleteTeam) {
1251 			struct team *parent = team->parent;
1252 
1253 			// remember who our parent was so we can send a signal
1254 			parentID = parent->id;
1255 
1256 			// Set the team job control state to "dead" and detach the job
1257 			// control entry from our team struct.
1258 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1259 			death = team->job_control_entry;
1260 			team->job_control_entry = NULL;
1261 
1262 			if (death != NULL) {
1263 				death->team = NULL;
1264 				death->group_id = team->group_id;
1265 				death->thread = thread->id;
1266 				death->status = thread->exit.status;
1267 				death->reason = thread->exit.reason;
1268 				death->signal = thread->exit.signal;
1269 
1270 				// team_set_job_control_state() already moved our entry
1271 				// into the parent's list. We just check the soft limit of
1272 				// death entries.
1273 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1274 					death = parent->dead_children->entries.RemoveHead();
1275 					parent->dead_children->count--;
1276 				} else
1277 					death = NULL;
1278 
1279 				RELEASE_THREAD_LOCK();
1280 			} else
1281 				RELEASE_THREAD_LOCK();
1282 
1283 			team_remove_team(team, &freeGroup);
1284 		} else {
1285 			// The thread is not the main thread. We store a thread death
1286 			// entry for it, unless someone is already waiting it.
1287 			if (threadDeathEntry != NULL
1288 				&& list_is_empty(&thread->exit.waiters)) {
1289 				threadDeathEntry->thread = thread->id;
1290 				threadDeathEntry->status = thread->exit.status;
1291 				threadDeathEntry->reason = thread->exit.reason;
1292 				threadDeathEntry->signal = thread->exit.signal;
1293 
1294 				// add entry -- remove and old one, if we hit the limit
1295 				list_add_item(&team->dead_threads, threadDeathEntry);
1296 				team->dead_threads_count++;
1297 				threadDeathEntry = NULL;
1298 
1299 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1300 					threadDeathEntry = (death_entry*)list_remove_head_item(
1301 						&team->dead_threads);
1302 					team->dead_threads_count--;
1303 				}
1304 			}
1305 
1306 			RELEASE_THREAD_LOCK();
1307 		}
1308 
1309 		RELEASE_TEAM_LOCK();
1310 
1311 		// swap address spaces, to make sure we're running on the kernel's pgdir
1312 		vm_swap_address_space(vm_kernel_address_space());
1313 		restore_interrupts(state);
1314 
1315 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1316 	}
1317 
1318 	if (threadDeathEntry != NULL)
1319 		free(threadDeathEntry);
1320 
1321 	// delete the team if we're its main thread
1322 	if (deleteTeam) {
1323 		team_delete_process_group(freeGroup);
1324 		team_delete_team(team);
1325 
1326 		// we need to delete any death entry that made it to here
1327 		if (death != NULL)
1328 			delete death;
1329 
1330 		send_signal_etc(parentID, SIGCHLD, B_DO_NOT_RESCHEDULE);
1331 		cachedDeathSem = -1;
1332 	}
1333 
1334 	state = disable_interrupts();
1335 	GRAB_THREAD_LOCK();
1336 
1337 	// remove thread from hash, so it's no longer accessible
1338 	hash_remove(sThreadHash, thread);
1339 	sUsedThreads--;
1340 
1341 	// Stop debugging for this thread
1342 	debugInfo = thread->debug_info;
1343 	clear_thread_debug_info(&thread->debug_info, true);
1344 
1345 	// Remove the select infos. We notify them a little later.
1346 	select_info* selectInfos = thread->select_infos;
1347 	thread->select_infos = NULL;
1348 
1349 	RELEASE_THREAD_LOCK();
1350 	restore_interrupts(state);
1351 
1352 	destroy_thread_debug_info(&debugInfo);
1353 
1354 	// notify select infos
1355 	select_info* info = selectInfos;
1356 	while (info != NULL) {
1357 		select_sync* sync = info->sync;
1358 
1359 		notify_select_events(info, B_EVENT_INVALID);
1360 		info = info->next;
1361 		put_select_sync(sync);
1362 	}
1363 
1364 	// shutdown the thread messaging
1365 
1366 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1367 	if (status == B_WOULD_BLOCK) {
1368 		// there is data waiting for us, so let us eat it
1369 		thread_id sender;
1370 
1371 		delete_sem(thread->msg.write_sem);
1372 			// first, let's remove all possibly waiting writers
1373 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1374 	} else {
1375 		// we probably own the semaphore here, and we're the last to do so
1376 		delete_sem(thread->msg.write_sem);
1377 	}
1378 	// now we can safely remove the msg.read_sem
1379 	delete_sem(thread->msg.read_sem);
1380 
1381 	// fill all death entries and delete the sem that others will use to wait on us
1382 	{
1383 		sem_id cachedExitSem = thread->exit.sem;
1384 		cpu_status state;
1385 
1386 		state = disable_interrupts();
1387 		GRAB_THREAD_LOCK();
1388 
1389 		// make sure no one will grab this semaphore again
1390 		thread->exit.sem = -1;
1391 
1392 		// fill all death entries
1393 		death_entry* entry = NULL;
1394 		while ((entry = (struct death_entry*)list_get_next_item(
1395 				&thread->exit.waiters, entry)) != NULL) {
1396 			entry->status = thread->exit.status;
1397 			entry->reason = thread->exit.reason;
1398 			entry->signal = thread->exit.signal;
1399 		}
1400 
1401 		RELEASE_THREAD_LOCK();
1402 		restore_interrupts(state);
1403 
1404 		delete_sem(cachedExitSem);
1405 	}
1406 
1407 	{
1408 		struct thread_exit_args args;
1409 
1410 		args.thread = thread;
1411 		args.old_kernel_stack = thread->kernel_stack_area;
1412 		args.death_stack = get_death_stack();
1413 		args.death_sem = cachedDeathSem;
1414 		args.original_team_id = teamID;
1415 
1416 
1417 		disable_interrupts();
1418 
1419 		// set the new kernel stack officially to the death stack, it won't be
1420 		// switched until the next function is called. This must be done now
1421 		// before a context switch, or we'll stay on the old stack
1422 		thread->kernel_stack_area = sDeathStacks[args.death_stack].area;
1423 		thread->kernel_stack_base = sDeathStacks[args.death_stack].address;
1424 
1425 		// we will continue in thread_exit2(), on the new stack
1426 		arch_thread_switch_kstack_and_call(thread, thread->kernel_stack_base
1427 			 + KERNEL_STACK_SIZE, thread_exit2, &args);
1428 	}
1429 
1430 	panic("never can get here\n");
1431 }
1432 
1433 
1434 struct thread *
1435 thread_get_thread_struct(thread_id id)
1436 {
1437 	struct thread *thread;
1438 	cpu_status state;
1439 
1440 	state = disable_interrupts();
1441 	GRAB_THREAD_LOCK();
1442 
1443 	thread = thread_get_thread_struct_locked(id);
1444 
1445 	RELEASE_THREAD_LOCK();
1446 	restore_interrupts(state);
1447 
1448 	return thread;
1449 }
1450 
1451 
1452 struct thread *
1453 thread_get_thread_struct_locked(thread_id id)
1454 {
1455 	struct thread_key key;
1456 
1457 	key.id = id;
1458 
1459 	return (struct thread*)hash_lookup(sThreadHash, &key);
1460 }
1461 
1462 
1463 /*!
1464 	Called in the interrupt handler code when a thread enters
1465 	the kernel for any reason.
1466 	Only tracks time for now.
1467 	Interrupts are disabled.
1468 */
1469 void
1470 thread_at_kernel_entry(bigtime_t now)
1471 {
1472 	struct thread *thread = thread_get_current_thread();
1473 
1474 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1475 
1476 	// track user time
1477 	thread->user_time += now - thread->last_time;
1478 	thread->last_time = now;
1479 
1480 	thread->in_kernel = true;
1481 }
1482 
1483 
1484 /*!
1485 	Called whenever a thread exits kernel space to user space.
1486 	Tracks time, handles signals, ...
1487 */
1488 void
1489 thread_at_kernel_exit(void)
1490 {
1491 	struct thread *thread = thread_get_current_thread();
1492 	cpu_status state;
1493 	bigtime_t now;
1494 
1495 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1496 
1497 	if (handle_signals(thread)) {
1498 		state = disable_interrupts();
1499 		GRAB_THREAD_LOCK();
1500 
1501 		// was: smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
1502 		scheduler_reschedule();
1503 
1504 		RELEASE_THREAD_LOCK();
1505 	} else
1506 		state = disable_interrupts();
1507 
1508 	thread->in_kernel = false;
1509 
1510 	// track kernel time
1511 	now = system_time();
1512 	thread->kernel_time += now - thread->last_time;
1513 	thread->last_time = now;
1514 
1515 	restore_interrupts(state);
1516 }
1517 
1518 
1519 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1520 	and no debugging shall be done.
1521 	Interrupts are disabled in this case.
1522 */
1523 void
1524 thread_at_kernel_exit_no_signals(void)
1525 {
1526 	struct thread *thread = thread_get_current_thread();
1527 
1528 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1529 
1530 	thread->in_kernel = false;
1531 
1532 	// track kernel time
1533 	bigtime_t now = system_time();
1534 	thread->kernel_time += now - thread->last_time;
1535 	thread->last_time = now;
1536 }
1537 
1538 
1539 void
1540 thread_reset_for_exec(void)
1541 {
1542 	struct thread *thread = thread_get_current_thread();
1543 
1544 	cancel_timer(&thread->alarm);
1545 	reset_signals(thread);
1546 }
1547 
1548 
1549 /*! Insert a thread to the tail of a queue */
1550 void
1551 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1552 {
1553 	thread->queue_next = NULL;
1554 	if (queue->head == NULL) {
1555 		queue->head = thread;
1556 		queue->tail = thread;
1557 	} else {
1558 		queue->tail->queue_next = thread;
1559 		queue->tail = thread;
1560 	}
1561 }
1562 
1563 
1564 struct thread *
1565 thread_lookat_queue(struct thread_queue *queue)
1566 {
1567 	return queue->head;
1568 }
1569 
1570 
1571 struct thread *
1572 thread_dequeue(struct thread_queue *queue)
1573 {
1574 	struct thread *thread = queue->head;
1575 
1576 	if (thread != NULL) {
1577 		queue->head = thread->queue_next;
1578 		if (queue->tail == thread)
1579 			queue->tail = NULL;
1580 	}
1581 	return thread;
1582 }
1583 
1584 
1585 struct thread *
1586 thread_dequeue_id(struct thread_queue *q, thread_id id)
1587 {
1588 	struct thread *thread;
1589 	struct thread *last = NULL;
1590 
1591 	thread = q->head;
1592 	while (thread != NULL) {
1593 		if (thread->id == id) {
1594 			if (last == NULL)
1595 				q->head = thread->queue_next;
1596 			else
1597 				last->queue_next = thread->queue_next;
1598 
1599 			if (q->tail == thread)
1600 				q->tail = last;
1601 			break;
1602 		}
1603 		last = thread;
1604 		thread = thread->queue_next;
1605 	}
1606 	return thread;
1607 }
1608 
1609 
1610 thread_id
1611 allocate_thread_id(void)
1612 {
1613 	return atomic_add(&sNextThreadID, 1);
1614 }
1615 
1616 
1617 thread_id
1618 peek_next_thread_id(void)
1619 {
1620 	return atomic_get(&sNextThreadID);
1621 }
1622 
1623 
1624 void
1625 thread_yield(void)
1626 {
1627 	// snooze for roughly 3 thread quantums
1628 	snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1629 #if 0
1630 	cpu_status state;
1631 
1632 	struct thread *thread = thread_get_current_thread();
1633 	if (thread == NULL)
1634 		return;
1635 
1636 	state = disable_interrupts();
1637 	GRAB_THREAD_LOCK();
1638 
1639 	// mark the thread as yielded, so it will not be scheduled next
1640 	//thread->was_yielded = true;
1641 	thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1642 	scheduler_reschedule();
1643 
1644 	RELEASE_THREAD_LOCK();
1645 	restore_interrupts(state);
1646 #endif
1647 }
1648 
1649 
1650 /*!
1651 	Kernel private thread creation function.
1652 
1653 	\param threadID The ID to be assigned to the new thread. If
1654 		  \code < 0 \endcode a fresh one is allocated.
1655 */
1656 thread_id
1657 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1658 	void *arg, team_id team, thread_id threadID)
1659 {
1660 	return create_thread(name, team, (thread_entry_func)function, arg, NULL,
1661 		priority, true, threadID);
1662 }
1663 
1664 
1665 status_t
1666 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1667 	status_t *_returnCode)
1668 {
1669 	sem_id exitSem = B_BAD_THREAD_ID;
1670 	struct death_entry death;
1671 	job_control_entry* freeDeath = NULL;
1672 	struct thread *thread;
1673 	cpu_status state;
1674 	status_t status = B_OK;
1675 
1676 	if (id < B_OK)
1677 		return B_BAD_THREAD_ID;
1678 
1679 	// we need to resume the thread we're waiting for first
1680 
1681 	state = disable_interrupts();
1682 	GRAB_THREAD_LOCK();
1683 
1684 	thread = thread_get_thread_struct_locked(id);
1685 	if (thread != NULL) {
1686 		// remember the semaphore we have to wait on and place our death entry
1687 		exitSem = thread->exit.sem;
1688 		list_add_link_to_head(&thread->exit.waiters, &death);
1689 	}
1690 
1691 	death_entry* threadDeathEntry = NULL;
1692 
1693 	RELEASE_THREAD_LOCK();
1694 
1695 	if (thread == NULL) {
1696 		// we couldn't find this thread - maybe it's already gone, and we'll
1697 		// find its death entry in our team
1698 		GRAB_TEAM_LOCK();
1699 
1700 		struct team* team = thread_get_current_thread()->team;
1701 
1702 		// check the child death entries first (i.e. main threads of child
1703 		// teams)
1704 		bool deleteEntry;
1705 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1706 		if (freeDeath != NULL) {
1707 			death.status = freeDeath->status;
1708 			if (!deleteEntry)
1709 				freeDeath = NULL;
1710 		} else {
1711 			// check the thread death entries of the team (non-main threads)
1712 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1713 					&team->dead_threads, threadDeathEntry)) != NULL) {
1714 				if (threadDeathEntry->thread == id) {
1715 					list_remove_item(&team->dead_threads, threadDeathEntry);
1716 					team->dead_threads_count--;
1717 					death.status = threadDeathEntry->status;
1718 					break;
1719 				}
1720 			}
1721 
1722 			if (threadDeathEntry == NULL)
1723 				status = B_BAD_THREAD_ID;
1724 		}
1725 
1726 		RELEASE_TEAM_LOCK();
1727 	}
1728 
1729 	restore_interrupts(state);
1730 
1731 	if (thread == NULL && status == B_OK) {
1732 		// we found the thread's death entry in our team
1733 		if (_returnCode)
1734 			*_returnCode = death.status;
1735 
1736 		delete freeDeath;
1737 		free(threadDeathEntry);
1738 		return B_OK;
1739 	}
1740 
1741 	// we need to wait for the death of the thread
1742 
1743 	if (exitSem < B_OK)
1744 		return B_BAD_THREAD_ID;
1745 
1746 	resume_thread(id);
1747 		// make sure we don't wait forever on a suspended thread
1748 
1749 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1750 
1751 	if (status == B_OK) {
1752 		// this should never happen as the thread deletes the semaphore on exit
1753 		panic("could acquire exit_sem for thread %ld\n", id);
1754 	} else if (status == B_BAD_SEM_ID) {
1755 		// this is the way the thread normally exits
1756 		status = B_OK;
1757 
1758 		if (_returnCode)
1759 			*_returnCode = death.status;
1760 	} else {
1761 		// We were probably interrupted; we need to remove our death entry now.
1762 		state = disable_interrupts();
1763 		GRAB_THREAD_LOCK();
1764 
1765 		thread = thread_get_thread_struct_locked(id);
1766 		if (thread != NULL)
1767 			list_remove_link(&death);
1768 
1769 		RELEASE_THREAD_LOCK();
1770 		restore_interrupts(state);
1771 
1772 		// If the thread is already gone, we need to wait for its exit semaphore
1773 		// to make sure our death entry stays valid - it won't take long
1774 		if (thread == NULL)
1775 			acquire_sem(exitSem);
1776 	}
1777 
1778 	return status;
1779 }
1780 
1781 
1782 status_t
1783 select_thread(int32 id, struct select_info* info, bool kernel)
1784 {
1785 	InterruptsSpinLocker locker(thread_spinlock);
1786 
1787 	// get thread
1788 	struct thread* thread = thread_get_thread_struct_locked(id);
1789 	if (thread == NULL)
1790 		return B_BAD_THREAD_ID;
1791 
1792 	// We support only B_EVENT_INVALID at the moment.
1793 	info->selected_events &= B_EVENT_INVALID;
1794 
1795 	// add info to list
1796 	if (info->selected_events != 0) {
1797 		info->next = thread->select_infos;
1798 		thread->select_infos = info;
1799 
1800 		// we need a sync reference
1801 		atomic_add(&info->sync->ref_count, 1);
1802 	}
1803 
1804 	return B_OK;
1805 }
1806 
1807 
1808 status_t
1809 deselect_thread(int32 id, struct select_info* info, bool kernel)
1810 {
1811 	InterruptsSpinLocker locker(thread_spinlock);
1812 
1813 	// get thread
1814 	struct thread* thread = thread_get_thread_struct_locked(id);
1815 	if (thread == NULL)
1816 		return B_BAD_THREAD_ID;
1817 
1818 	// remove info from list
1819 	select_info** infoLocation = &thread->select_infos;
1820 	while (*infoLocation != NULL && *infoLocation != info)
1821 		infoLocation = &(*infoLocation)->next;
1822 
1823 	if (*infoLocation != info)
1824 		return B_OK;
1825 
1826 	*infoLocation = info->next;
1827 
1828 	locker.Unlock();
1829 
1830 	// surrender sync reference
1831 	put_select_sync(info->sync);
1832 
1833 	return B_OK;
1834 }
1835 
1836 
1837 int32
1838 thread_max_threads(void)
1839 {
1840 	return sMaxThreads;
1841 }
1842 
1843 
1844 int32
1845 thread_used_threads(void)
1846 {
1847 	return sUsedThreads;
1848 }
1849 
1850 
1851 status_t
1852 thread_init(kernel_args *args)
1853 {
1854 	uint32 i;
1855 
1856 	TRACE(("thread_init: entry\n"));
1857 
1858 	// create the thread hash table
1859 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
1860 		&thread_struct_compare, &thread_struct_hash);
1861 
1862 	// zero out the dead thread structure q
1863 	memset(&dead_q, 0, sizeof(dead_q));
1864 
1865 	// allocate snooze sem
1866 	sSnoozeSem = create_sem(0, "snooze sem");
1867 	if (sSnoozeSem < 0) {
1868 		panic("error creating snooze sem\n");
1869 		return sSnoozeSem;
1870 	}
1871 
1872 	if (arch_thread_init(args) < B_OK)
1873 		panic("arch_thread_init() failed!\n");
1874 
1875 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
1876 	sNextThreadID = B_SYSTEM_TEAM + 1;
1877 
1878 	// create an idle thread for each cpu
1879 
1880 	for (i = 0; i < args->num_cpus; i++) {
1881 		struct thread *thread;
1882 		area_info info;
1883 		char name[64];
1884 
1885 		sprintf(name, "idle thread %lu", i + 1);
1886 		thread = create_thread_struct(&sIdleThreads[i], name,
1887 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
1888 		if (thread == NULL) {
1889 			panic("error creating idle thread struct\n");
1890 			return B_NO_MEMORY;
1891 		}
1892 
1893 		thread->team = team_get_kernel_team();
1894 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
1895 		thread->state = B_THREAD_RUNNING;
1896 		thread->next_state = B_THREAD_READY;
1897 		sprintf(name, "idle thread %lu kstack", i + 1);
1898 		thread->kernel_stack_area = find_area(name);
1899 		thread->entry = NULL;
1900 
1901 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
1902 			panic("error finding idle kstack area\n");
1903 
1904 		thread->kernel_stack_base = (addr_t)info.address;
1905 
1906 		hash_insert(sThreadHash, thread);
1907 		insert_thread_into_team(thread->team, thread);
1908 	}
1909 	sUsedThreads = args->num_cpus;
1910 
1911 	// create a set of death stacks
1912 
1913 	sNumDeathStacks = smp_get_num_cpus();
1914 	if (sNumDeathStacks > 8 * sizeof(sDeathStackBitmap)) {
1915 		// clamp values for really beefy machines
1916 		sNumDeathStacks = 8 * sizeof(sDeathStackBitmap);
1917 	}
1918 	sDeathStackBitmap = 0;
1919 	sDeathStacks = (struct death_stack *)malloc(sNumDeathStacks
1920 		* sizeof(struct death_stack));
1921 	if (sDeathStacks == NULL) {
1922 		panic("error creating death stacks\n");
1923 		return B_NO_MEMORY;
1924 	}
1925 	{
1926 		char temp[64];
1927 
1928 		for (i = 0; i < sNumDeathStacks; i++) {
1929 			sprintf(temp, "death stack %lu", i);
1930 			sDeathStacks[i].area = create_area(temp,
1931 				(void **)&sDeathStacks[i].address, B_ANY_KERNEL_ADDRESS,
1932 				KERNEL_STACK_SIZE, B_FULL_LOCK,
1933 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
1934 			if (sDeathStacks[i].area < 0) {
1935 				panic("error creating death stacks\n");
1936 				return sDeathStacks[i].area;
1937 			}
1938 			sDeathStacks[i].in_use = false;
1939 		}
1940 	}
1941 	sDeathStackSem = create_sem(sNumDeathStacks, "death stack availability");
1942 
1943 	// set up some debugger commands
1944 	add_debugger_command("threads", &dump_thread_list, "list all threads");
1945 	add_debugger_command("ready", &dump_thread_list, "list all ready threads");
1946 	add_debugger_command("running", &dump_thread_list, "list all running threads");
1947 	add_debugger_command("waiting", &dump_thread_list, "list all waiting threads (optionally for a specific semaphore)");
1948 	add_debugger_command("realtime", &dump_thread_list, "list all realtime threads");
1949 	add_debugger_command("thread", &dump_thread_info, "list info about a particular thread");
1950 	add_debugger_command("unreal", &make_thread_unreal, "set realtime priority threads to normal priority");
1951 	add_debugger_command("suspend", &make_thread_suspended, "suspend a thread");
1952 	add_debugger_command("resume", &make_thread_resumed, "resume a thread");
1953 	add_debugger_command("drop", &drop_into_debugger, "drop a thread into the user-debugger");
1954 	add_debugger_command("priority", &set_thread_prio, "set a thread priority");
1955 
1956 	return B_OK;
1957 }
1958 
1959 
1960 status_t
1961 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
1962 {
1963 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
1964 	// so that get_current_cpu and friends will work, which is crucial for
1965 	// a lot of low level routines
1966 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
1967 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
1968 	return B_OK;
1969 }
1970 
1971 //	#pragma mark - public kernel API
1972 
1973 
1974 void
1975 exit_thread(status_t returnValue)
1976 {
1977 	struct thread *thread = thread_get_current_thread();
1978 
1979 	thread->exit.status = returnValue;
1980 	thread->exit.reason = THREAD_RETURN_EXIT;
1981 
1982 	// if called from a kernel thread, we don't deliver the signal,
1983 	// we just exit directly to keep the user space behaviour of
1984 	// this function
1985 	if (thread->team != team_get_kernel_team())
1986 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
1987 	else
1988 		thread_exit();
1989 }
1990 
1991 
1992 status_t
1993 kill_thread(thread_id id)
1994 {
1995 	if (id <= 0)
1996 		return B_BAD_VALUE;
1997 
1998 	return send_signal(id, SIGKILLTHR);
1999 }
2000 
2001 
2002 status_t
2003 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2004 {
2005 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2006 }
2007 
2008 
2009 int32
2010 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2011 {
2012 	return receive_data_etc(sender, buffer, bufferSize, 0);
2013 }
2014 
2015 
2016 bool
2017 has_data(thread_id thread)
2018 {
2019 	int32 count;
2020 
2021 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2022 			&count) != B_OK)
2023 		return false;
2024 
2025 	return count == 0 ? false : true;
2026 }
2027 
2028 
2029 status_t
2030 _get_thread_info(thread_id id, thread_info *info, size_t size)
2031 {
2032 	status_t status = B_OK;
2033 	struct thread *thread;
2034 	cpu_status state;
2035 
2036 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2037 		return B_BAD_VALUE;
2038 
2039 	state = disable_interrupts();
2040 	GRAB_THREAD_LOCK();
2041 
2042 	thread = thread_get_thread_struct_locked(id);
2043 	if (thread == NULL) {
2044 		status = B_BAD_VALUE;
2045 		goto err;
2046 	}
2047 
2048 	fill_thread_info(thread, info, size);
2049 
2050 err:
2051 	RELEASE_THREAD_LOCK();
2052 	restore_interrupts(state);
2053 
2054 	return status;
2055 }
2056 
2057 
2058 status_t
2059 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2060 	size_t size)
2061 {
2062 	status_t status = B_BAD_VALUE;
2063 	struct thread *thread = NULL;
2064 	cpu_status state;
2065 	int slot;
2066 	thread_id lastThreadID;
2067 
2068 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2069 		return B_BAD_VALUE;
2070 
2071 	if (team == B_CURRENT_TEAM)
2072 		team = team_get_current_team_id();
2073 	else if (!team_is_valid(team))
2074 		return B_BAD_VALUE;
2075 
2076 	slot = *_cookie;
2077 
2078 	state = disable_interrupts();
2079 	GRAB_THREAD_LOCK();
2080 
2081 	lastThreadID = peek_next_thread_id();
2082 	if (slot >= lastThreadID)
2083 		goto err;
2084 
2085 	while (slot < lastThreadID
2086 		&& (!(thread = thread_get_thread_struct_locked(slot))
2087 			|| thread->team->id != team))
2088 		slot++;
2089 
2090 	if (thread != NULL && thread->team->id == team) {
2091 		fill_thread_info(thread, info, size);
2092 
2093 		*_cookie = slot + 1;
2094 		status = B_OK;
2095 	}
2096 
2097 err:
2098 	RELEASE_THREAD_LOCK();
2099 	restore_interrupts(state);
2100 
2101 	return status;
2102 }
2103 
2104 
2105 thread_id
2106 find_thread(const char *name)
2107 {
2108 	struct hash_iterator iterator;
2109 	struct thread *thread;
2110 	cpu_status state;
2111 
2112 	if (name == NULL)
2113 		return thread_get_current_thread_id();
2114 
2115 	state = disable_interrupts();
2116 	GRAB_THREAD_LOCK();
2117 
2118 	// ToDo: this might not be in the same order as find_thread() in BeOS
2119 	//		which could be theoretically problematic.
2120 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2121 	//		cheap either - although this function is probably used very rarely.
2122 
2123 	hash_open(sThreadHash, &iterator);
2124 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2125 			!= NULL) {
2126 		// Search through hash
2127 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2128 			thread_id id = thread->id;
2129 
2130 			RELEASE_THREAD_LOCK();
2131 			restore_interrupts(state);
2132 			return id;
2133 		}
2134 	}
2135 
2136 	RELEASE_THREAD_LOCK();
2137 	restore_interrupts(state);
2138 
2139 	return B_NAME_NOT_FOUND;
2140 }
2141 
2142 
2143 status_t
2144 rename_thread(thread_id id, const char *name)
2145 {
2146 	struct thread *thread = thread_get_current_thread();
2147 	status_t status = B_BAD_THREAD_ID;
2148 	cpu_status state;
2149 
2150 	if (name == NULL)
2151 		return B_BAD_VALUE;
2152 
2153 	state = disable_interrupts();
2154 	GRAB_THREAD_LOCK();
2155 
2156 	if (thread->id != id)
2157 		thread = thread_get_thread_struct_locked(id);
2158 
2159 	if (thread != NULL) {
2160 		if (thread->team == thread_get_current_thread()->team) {
2161 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2162 			status = B_OK;
2163 		} else
2164 			status = B_NOT_ALLOWED;
2165 	}
2166 
2167 	RELEASE_THREAD_LOCK();
2168 	restore_interrupts(state);
2169 
2170 	return status;
2171 }
2172 
2173 
2174 status_t
2175 set_thread_priority(thread_id id, int32 priority)
2176 {
2177 	struct thread *thread;
2178 	int32 oldPriority;
2179 
2180 	// make sure the passed in priority is within bounds
2181 	if (priority > B_MAX_PRIORITY)
2182 		priority = B_MAX_PRIORITY;
2183 	if (priority < B_MIN_PRIORITY)
2184 		priority = B_MIN_PRIORITY;
2185 
2186 	thread = thread_get_current_thread();
2187 	if (thread->id == id) {
2188 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2189 		// our structure directly
2190 		oldPriority = thread->priority;
2191 			// note that this might not return the correct value if we are preempted
2192 			// here, and another thread changes our priority before the next line is
2193 			// executed
2194 		thread->priority = thread->next_priority = priority;
2195 	} else {
2196 		cpu_status state = disable_interrupts();
2197 		GRAB_THREAD_LOCK();
2198 
2199 		thread = thread_get_thread_struct_locked(id);
2200 		if (thread) {
2201 			oldPriority = thread->priority;
2202 			thread->next_priority = priority;
2203 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2204 				// if the thread is in the run queue, we reinsert it at a new position
2205 				scheduler_remove_from_run_queue(thread);
2206 				thread->priority = priority;
2207 				scheduler_enqueue_in_run_queue(thread);
2208 			} else
2209 				thread->priority = priority;
2210 		} else
2211 			oldPriority = B_BAD_THREAD_ID;
2212 
2213 		RELEASE_THREAD_LOCK();
2214 		restore_interrupts(state);
2215 	}
2216 
2217 	return oldPriority;
2218 }
2219 
2220 
2221 status_t
2222 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2223 {
2224 	status_t status;
2225 
2226 	if (timebase != B_SYSTEM_TIMEBASE)
2227 		return B_BAD_VALUE;
2228 
2229 	status = acquire_sem_etc(sSnoozeSem, 1, flags, timeout);
2230 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2231 		return B_OK;
2232 
2233 	return status;
2234 }
2235 
2236 
2237 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2238 status_t
2239 snooze(bigtime_t timeout)
2240 {
2241 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2242 }
2243 
2244 
2245 /*!
2246 	snooze_until() for internal kernel use only; doesn't interrupt on
2247 	signals.
2248 */
2249 status_t
2250 snooze_until(bigtime_t timeout, int timebase)
2251 {
2252 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2253 }
2254 
2255 
2256 status_t
2257 wait_for_thread(thread_id thread, status_t *_returnCode)
2258 {
2259 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2260 }
2261 
2262 
2263 status_t
2264 suspend_thread(thread_id id)
2265 {
2266 	if (id <= 0)
2267 		return B_BAD_VALUE;
2268 
2269 	return send_signal(id, SIGSTOP);
2270 }
2271 
2272 
2273 status_t
2274 resume_thread(thread_id id)
2275 {
2276 	if (id <= 0)
2277 		return B_BAD_VALUE;
2278 
2279 	return send_signal(id, SIGCONT);
2280 }
2281 
2282 
2283 thread_id
2284 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2285 	void *arg)
2286 {
2287 	return create_thread(name, team_get_kernel_team()->id,
2288 		(thread_entry_func)function, arg, NULL, priority, true, -1);
2289 }
2290 
2291 
2292 /* TODO: split this; have kernel version set kerrno */
2293 int
2294 getrlimit(int resource, struct rlimit * rlp)
2295 {
2296 	if (!rlp)
2297 		return B_BAD_ADDRESS;
2298 
2299 	switch (resource) {
2300 		case RLIMIT_NOFILE:
2301 		case RLIMIT_NOVMON:
2302 			return vfs_getrlimit(resource, rlp);
2303 
2304 		default:
2305 			return EINVAL;
2306 	}
2307 
2308 	return 0;
2309 }
2310 
2311 
2312 /* TODO: split this; have kernel version set kerrno */
2313 int
2314 setrlimit(int resource, const struct rlimit * rlp)
2315 {
2316 	if (!rlp)
2317 		return B_BAD_ADDRESS;
2318 
2319 	switch (resource) {
2320 		case RLIMIT_NOFILE:
2321 		case RLIMIT_NOVMON:
2322 			return vfs_setrlimit(resource, rlp);
2323 
2324 		default:
2325 			return EINVAL;
2326 	}
2327 
2328 	return 0;
2329 }
2330 
2331 
2332 //	#pragma mark - syscalls
2333 
2334 
2335 void
2336 _user_exit_thread(status_t returnValue)
2337 {
2338 	exit_thread(returnValue);
2339 }
2340 
2341 
2342 status_t
2343 _user_kill_thread(thread_id thread)
2344 {
2345 	return kill_thread(thread);
2346 }
2347 
2348 
2349 status_t
2350 _user_resume_thread(thread_id thread)
2351 {
2352 	return resume_thread(thread);
2353 }
2354 
2355 
2356 status_t
2357 _user_suspend_thread(thread_id thread)
2358 {
2359 	return suspend_thread(thread);
2360 }
2361 
2362 
2363 status_t
2364 _user_rename_thread(thread_id thread, const char *userName)
2365 {
2366 	char name[B_OS_NAME_LENGTH];
2367 
2368 	if (!IS_USER_ADDRESS(userName)
2369 		|| userName == NULL
2370 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2371 		return B_BAD_ADDRESS;
2372 
2373 	return rename_thread(thread, name);
2374 }
2375 
2376 
2377 int32
2378 _user_set_thread_priority(thread_id thread, int32 newPriority)
2379 {
2380 	return set_thread_priority(thread, newPriority);
2381 }
2382 
2383 
2384 thread_id
2385 _user_spawn_thread(int32 (*entry)(thread_func, void *), const char *userName,
2386 	int32 priority, void *data1, void *data2)
2387 {
2388 	char name[B_OS_NAME_LENGTH];
2389 	thread_id threadID;
2390 
2391 	if (!IS_USER_ADDRESS(entry) || entry == NULL
2392 		|| (userName != NULL && (!IS_USER_ADDRESS(userName)
2393 			|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)))
2394 		return B_BAD_ADDRESS;
2395 
2396 	threadID = create_thread(userName != NULL ? name : "user thread",
2397 		thread_get_current_thread()->team->id, entry,
2398 		data1, data2, priority, false, -1);
2399 
2400 	user_debug_thread_created(threadID);
2401 
2402 	return threadID;
2403 }
2404 
2405 
2406 status_t
2407 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2408 {
2409 	return snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2410 }
2411 
2412 
2413 void
2414 _user_thread_yield(void)
2415 {
2416 	thread_yield();
2417 }
2418 
2419 
2420 status_t
2421 _user_get_thread_info(thread_id id, thread_info *userInfo)
2422 {
2423 	thread_info info;
2424 	status_t status;
2425 
2426 	if (!IS_USER_ADDRESS(userInfo))
2427 		return B_BAD_ADDRESS;
2428 
2429 	status = _get_thread_info(id, &info, sizeof(thread_info));
2430 
2431 	if (status >= B_OK
2432 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2433 		return B_BAD_ADDRESS;
2434 
2435 	return status;
2436 }
2437 
2438 
2439 status_t
2440 _user_get_next_thread_info(team_id team, int32 *userCookie,
2441 	thread_info *userInfo)
2442 {
2443 	status_t status;
2444 	thread_info info;
2445 	int32 cookie;
2446 
2447 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2448 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2449 		return B_BAD_ADDRESS;
2450 
2451 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2452 	if (status < B_OK)
2453 		return status;
2454 
2455 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2456 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2457 		return B_BAD_ADDRESS;
2458 
2459 	return status;
2460 }
2461 
2462 
2463 thread_id
2464 _user_find_thread(const char *userName)
2465 {
2466 	char name[B_OS_NAME_LENGTH];
2467 
2468 	if (userName == NULL)
2469 		return find_thread(NULL);
2470 
2471 	if (!IS_USER_ADDRESS(userName)
2472 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2473 		return B_BAD_ADDRESS;
2474 
2475 	return find_thread(name);
2476 }
2477 
2478 
2479 status_t
2480 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2481 {
2482 	status_t returnCode;
2483 	status_t status;
2484 
2485 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2486 		return B_BAD_ADDRESS;
2487 
2488 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2489 
2490 	if (status == B_OK && userReturnCode != NULL
2491 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK)
2492 		return B_BAD_ADDRESS;
2493 
2494 	return status;
2495 }
2496 
2497 
2498 bool
2499 _user_has_data(thread_id thread)
2500 {
2501 	return has_data(thread);
2502 }
2503 
2504 
2505 status_t
2506 _user_send_data(thread_id thread, int32 code, const void *buffer,
2507 	size_t bufferSize)
2508 {
2509 	if (!IS_USER_ADDRESS(buffer))
2510 		return B_BAD_ADDRESS;
2511 
2512 	return send_data_etc(thread, code, buffer, bufferSize,
2513 		B_KILL_CAN_INTERRUPT);
2514 		// supports userland buffers
2515 }
2516 
2517 
2518 status_t
2519 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2520 {
2521 	thread_id sender;
2522 	status_t code;
2523 
2524 	if (!IS_USER_ADDRESS(_userSender)
2525 		|| !IS_USER_ADDRESS(buffer))
2526 		return B_BAD_ADDRESS;
2527 
2528 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2529 		// supports userland buffers
2530 
2531 	if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2532 		return B_BAD_ADDRESS;
2533 
2534 	return code;
2535 }
2536 
2537 
2538 // ToDo: the following two functions don't belong here
2539 
2540 
2541 int
2542 _user_getrlimit(int resource, struct rlimit *urlp)
2543 {
2544 	struct rlimit rl;
2545 	int ret;
2546 
2547 	if (urlp == NULL)
2548 		return EINVAL;
2549 
2550 	if (!IS_USER_ADDRESS(urlp))
2551 		return B_BAD_ADDRESS;
2552 
2553 	ret = getrlimit(resource, &rl);
2554 
2555 	if (ret == 0) {
2556 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2557 		if (ret < 0)
2558 			return ret;
2559 
2560 		return 0;
2561 	}
2562 
2563 	return ret;
2564 }
2565 
2566 
2567 int
2568 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2569 {
2570 	struct rlimit resourceLimit;
2571 
2572 	if (userResourceLimit == NULL)
2573 		return EINVAL;
2574 
2575 	if (!IS_USER_ADDRESS(userResourceLimit)
2576 		|| user_memcpy(&resourceLimit, userResourceLimit,
2577 			sizeof(struct rlimit)) < B_OK)
2578 		return B_BAD_ADDRESS;
2579 
2580 	return setrlimit(resource, &resourceLimit);
2581 }
2582 
2583