xref: /haiku/src/system/kernel/thread.cpp (revision 2ed64c408dbaaf00502572ff020260e6c379a014)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Threading routines */
10 
11 
12 #include <thread.h>
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <sys/resource.h>
19 
20 #include <OS.h>
21 
22 #include <util/AutoLock.h>
23 #include <util/khash.h>
24 
25 #include <arch/debug.h>
26 #include <boot/kernel_args.h>
27 #include <condition_variable.h>
28 #include <cpu.h>
29 #include <int.h>
30 #include <kimage.h>
31 #include <kscheduler.h>
32 #include <ksignal.h>
33 #include <real_time_clock.h>
34 #include <smp.h>
35 #include <syscalls.h>
36 #include <syscall_restart.h>
37 #include <team.h>
38 #include <tls.h>
39 #include <user_runtime.h>
40 #include <user_thread.h>
41 #include <vfs.h>
42 #include <vm.h>
43 #include <vm_address_space.h>
44 #include <wait_for_objects.h>
45 
46 
47 //#define TRACE_THREAD
48 #ifdef TRACE_THREAD
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 
55 #define THREAD_MAX_MESSAGE_SIZE		65536
56 
57 
58 struct thread_key {
59 	thread_id id;
60 };
61 
62 // global
63 spinlock thread_spinlock = 0;
64 
65 // thread list
66 static struct thread sIdleThreads[B_MAX_CPU_COUNT];
67 static hash_table *sThreadHash = NULL;
68 static thread_id sNextThreadID = 1;
69 
70 // some arbitrary chosen limits - should probably depend on the available
71 // memory (the limit is not yet enforced)
72 static int32 sMaxThreads = 4096;
73 static int32 sUsedThreads = 0;
74 
75 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
76 	struct thread*	thread;
77 	team_id			teamID;
78 	sem_id			deathSem;
79 
80 	UndertakerEntry(struct thread* thread, team_id teamID, sem_id deathSem)
81 		:
82 		thread(thread),
83 		teamID(teamID),
84 		deathSem(deathSem)
85 	{
86 	}
87 };
88 
89 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
90 static ConditionVariable sUndertakerCondition;
91 
92 // The dead queue is used as a pool from which to retrieve and reuse previously
93 // allocated thread structs when creating a new thread. It should be gone once
94 // the slab allocator is in.
95 static struct thread_queue dead_q;
96 
97 static void thread_kthread_entry(void);
98 static void thread_kthread_exit(void);
99 
100 
101 /*!
102 	Inserts a thread into a team.
103 	You must hold the team lock when you call this function.
104 */
105 static void
106 insert_thread_into_team(struct team *team, struct thread *thread)
107 {
108 	thread->team_next = team->thread_list;
109 	team->thread_list = thread;
110 	team->num_threads++;
111 
112 	if (team->num_threads == 1) {
113 		// this was the first thread
114 		team->main_thread = thread;
115 	}
116 	thread->team = team;
117 }
118 
119 
120 /*!
121 	Removes a thread from a team.
122 	You must hold the team lock when you call this function.
123 */
124 static void
125 remove_thread_from_team(struct team *team, struct thread *thread)
126 {
127 	struct thread *temp, *last = NULL;
128 
129 	for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
130 		if (temp == thread) {
131 			if (last == NULL)
132 				team->thread_list = temp->team_next;
133 			else
134 				last->team_next = temp->team_next;
135 
136 			team->num_threads--;
137 			break;
138 		}
139 		last = temp;
140 	}
141 }
142 
143 
144 static int
145 thread_struct_compare(void *_t, const void *_key)
146 {
147 	struct thread *thread = (struct thread*)_t;
148 	const struct thread_key *key = (const struct thread_key*)_key;
149 
150 	if (thread->id == key->id)
151 		return 0;
152 
153 	return 1;
154 }
155 
156 
157 static uint32
158 thread_struct_hash(void *_t, const void *_key, uint32 range)
159 {
160 	struct thread *thread = (struct thread*)_t;
161 	const struct thread_key *key = (const struct thread_key*)_key;
162 
163 	if (thread != NULL)
164 		return thread->id % range;
165 
166 	return (uint32)key->id % range;
167 }
168 
169 
170 static void
171 reset_signals(struct thread *thread)
172 {
173 	thread->sig_pending = 0;
174 	thread->sig_block_mask = 0;
175 	memset(thread->sig_action, 0, 32 * sizeof(struct sigaction));
176 	thread->signal_stack_base = 0;
177 	thread->signal_stack_size = 0;
178 	thread->signal_stack_enabled = false;
179 }
180 
181 
182 /*!
183 	Allocates and fills in thread structure (or reuses one from the
184 	dead queue).
185 
186 	\param threadID The ID to be assigned to the new thread. If
187 		  \code < 0 \endcode a fresh one is allocated.
188 	\param thread initialize this thread struct if nonnull
189 */
190 
191 static struct thread *
192 create_thread_struct(struct thread *inthread, const char *name,
193 	thread_id threadID, struct cpu_ent *cpu)
194 {
195 	struct thread *thread;
196 	cpu_status state;
197 	char temp[64];
198 
199 	if (inthread == NULL) {
200 		// try to recycle one from the dead queue first
201 		state = disable_interrupts();
202 		GRAB_THREAD_LOCK();
203 		thread = thread_dequeue(&dead_q);
204 		RELEASE_THREAD_LOCK();
205 		restore_interrupts(state);
206 
207 		// if not, create a new one
208 		if (thread == NULL) {
209 			thread = (struct thread *)malloc(sizeof(struct thread));
210 			if (thread == NULL)
211 				return NULL;
212 		}
213 	} else {
214 		thread = inthread;
215 	}
216 
217 	if (name != NULL)
218 		strlcpy(thread->name, name, B_OS_NAME_LENGTH);
219 	else
220 		strcpy(thread->name, "unnamed thread");
221 
222 	thread->flags = 0;
223 	thread->id = threadID >= 0 ? threadID : allocate_thread_id();
224 	thread->team = NULL;
225 	thread->cpu = cpu;
226 	thread->fault_handler = 0;
227 	thread->page_faults_allowed = 1;
228 	thread->kernel_stack_area = -1;
229 	thread->kernel_stack_base = 0;
230 	thread->user_stack_area = -1;
231 	thread->user_stack_base = 0;
232 	thread->user_local_storage = 0;
233 	thread->kernel_errno = 0;
234 	thread->team_next = NULL;
235 	thread->queue_next = NULL;
236 	thread->priority = thread->next_priority = -1;
237 	thread->args1 = NULL;  thread->args2 = NULL;
238 	thread->alarm.period = 0;
239 	reset_signals(thread);
240 	thread->in_kernel = true;
241 	thread->was_yielded = false;
242 	thread->user_time = 0;
243 	thread->kernel_time = 0;
244 	thread->last_time = 0;
245 	thread->exit.status = 0;
246 	thread->exit.reason = 0;
247 	thread->exit.signal = 0;
248 	list_init(&thread->exit.waiters);
249 	thread->select_infos = NULL;
250 
251 	sprintf(temp, "thread_%ld_retcode_sem", thread->id);
252 	thread->exit.sem = create_sem(0, temp);
253 	if (thread->exit.sem < B_OK)
254 		goto err1;
255 
256 	sprintf(temp, "%s send", thread->name);
257 	thread->msg.write_sem = create_sem(1, temp);
258 	if (thread->msg.write_sem < B_OK)
259 		goto err2;
260 
261 	sprintf(temp, "%s receive", thread->name);
262 	thread->msg.read_sem = create_sem(0, temp);
263 	if (thread->msg.read_sem < B_OK)
264 		goto err3;
265 
266 	if (arch_thread_init_thread_struct(thread) < B_OK)
267 		goto err4;
268 
269 	return thread;
270 
271 err4:
272 	delete_sem(thread->msg.read_sem);
273 err3:
274 	delete_sem(thread->msg.write_sem);
275 err2:
276 	delete_sem(thread->exit.sem);
277 err1:
278 	// ToDo: put them in the dead queue instead?
279 	if (inthread == NULL)
280 		free(thread);
281 	return NULL;
282 }
283 
284 
285 static void
286 delete_thread_struct(struct thread *thread)
287 {
288 	delete_sem(thread->exit.sem);
289 	delete_sem(thread->msg.write_sem);
290 	delete_sem(thread->msg.read_sem);
291 
292 	// ToDo: put them in the dead queue instead?
293 	free(thread);
294 }
295 
296 
297 /*! This function gets run by a new thread before anything else */
298 static void
299 thread_kthread_entry(void)
300 {
301 	struct thread *thread = thread_get_current_thread();
302 
303 	// simulates the thread spinlock release that would occur if the thread had been
304 	// rescheded from. The resched didn't happen because the thread is new.
305 	RELEASE_THREAD_LOCK();
306 
307 	// start tracking time
308 	thread->last_time = system_time();
309 
310 	enable_interrupts(); // this essentially simulates a return-from-interrupt
311 }
312 
313 
314 static void
315 thread_kthread_exit(void)
316 {
317 	struct thread *thread = thread_get_current_thread();
318 
319 	thread->exit.reason = THREAD_RETURN_EXIT;
320 	thread_exit();
321 }
322 
323 
324 /*!
325 	Initializes the thread and jumps to its userspace entry point.
326 	This function is called at creation time of every user thread,
327 	but not for a team's main thread.
328 */
329 static int
330 _create_user_thread_kentry(void)
331 {
332 	struct thread *thread = thread_get_current_thread();
333 
334 	// a signal may have been delivered here
335 	thread_at_kernel_exit();
336 
337 	// jump to the entry point in user space
338 	arch_thread_enter_userspace(thread, (addr_t)thread->entry,
339 		thread->args1, thread->args2);
340 
341 	// only get here if the above call fails
342 	return 0;
343 }
344 
345 
346 /*! Initializes the thread and calls it kernel space entry point. */
347 static int
348 _create_kernel_thread_kentry(void)
349 {
350 	struct thread *thread = thread_get_current_thread();
351 	int (*func)(void *args) = (int (*)(void *))thread->entry;
352 
353 	// call the entry function with the appropriate args
354 	return func(thread->args1);
355 }
356 
357 
358 /*!
359 	Creates a new thread in the team with the specified team ID.
360 
361 	\param threadID The ID to be assigned to the new thread. If
362 		  \code < 0 \endcode a fresh one is allocated.
363 */
364 static thread_id
365 create_thread(thread_creation_attributes& attributes, bool kernel)
366 {
367 	struct thread *thread, *currentThread;
368 	struct team *team;
369 	cpu_status state;
370 	char stack_name[B_OS_NAME_LENGTH];
371 	status_t status;
372 	bool abort = false;
373 	bool debugNewThread = false;
374 
375 	TRACE(("create_thread(%s, id = %ld, %s)\n", attributes.name,
376 		attributes.thread, kernel ? "kernel" : "user"));
377 
378 	thread = create_thread_struct(NULL, attributes.name, attributes.thread,
379 		NULL);
380 	if (thread == NULL)
381 		return B_NO_MEMORY;
382 
383 	thread->priority = attributes.priority == -1
384 		? B_NORMAL_PRIORITY : attributes.priority;
385 	thread->next_priority = thread->priority;
386 	// ToDo: this could be dangerous in case someone calls resume_thread() on us
387 	thread->state = B_THREAD_SUSPENDED;
388 	thread->next_state = B_THREAD_SUSPENDED;
389 
390 	// init debug structure
391 	clear_thread_debug_info(&thread->debug_info, false);
392 
393 	snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
394 		thread->id);
395 	thread->kernel_stack_area = create_area(stack_name,
396 		(void **)&thread->kernel_stack_base, B_ANY_KERNEL_ADDRESS,
397 		KERNEL_STACK_SIZE, B_FULL_LOCK,
398 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_KERNEL_STACK_AREA);
399 
400 	if (thread->kernel_stack_area < 0) {
401 		// we're not yet part of a team, so we can just bail out
402 		status = thread->kernel_stack_area;
403 
404 		dprintf("create_thread: error creating kernel stack: %s!\n",
405 			strerror(status));
406 
407 		delete_thread_struct(thread);
408 		return status;
409 	}
410 
411 	thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
412 
413 	state = disable_interrupts();
414 	GRAB_THREAD_LOCK();
415 
416 	// If the new thread belongs to the same team as the current thread,
417 	// it may inherit some of the thread debug flags.
418 	currentThread = thread_get_current_thread();
419 	if (currentThread && currentThread->team->id == attributes.team) {
420 		// inherit all user flags...
421 		int32 debugFlags = currentThread->debug_info.flags
422 			& B_THREAD_DEBUG_USER_FLAG_MASK;
423 
424 		// ... save the syscall tracing flags, unless explicitely specified
425 		if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
426 			debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
427 				| B_THREAD_DEBUG_POST_SYSCALL);
428 		}
429 
430 		thread->debug_info.flags = debugFlags;
431 
432 		// stop the new thread, if desired
433 		debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
434 	}
435 
436 	// insert into global list
437 	hash_insert(sThreadHash, thread);
438 	sUsedThreads++;
439 	RELEASE_THREAD_LOCK();
440 
441 	GRAB_TEAM_LOCK();
442 	// look at the team, make sure it's not being deleted
443 	team = team_get_team_struct_locked(attributes.team);
444 
445 	if (team == NULL || team->state == TEAM_STATE_DEATH)
446 		abort = true;
447 
448 	if (!abort && !kernel) {
449 		thread->user_thread = team_allocate_user_thread(team);
450 		abort = thread->user_thread == NULL;
451 	}
452 
453 	if (!abort) {
454 		// Debug the new thread, if the parent thread required that (see above),
455 		// or the respective global team debug flag is set. But only, if a
456 		// debugger is installed for the team.
457 		debugNewThread |= (atomic_get(&team->debug_info.flags)
458 			& B_TEAM_DEBUG_STOP_NEW_THREADS);
459 		if (debugNewThread
460 			&& (atomic_get(&team->debug_info.flags)
461 				& B_TEAM_DEBUG_DEBUGGER_INSTALLED)) {
462 			thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
463 		}
464 
465 		insert_thread_into_team(team, thread);
466 	}
467 
468 	RELEASE_TEAM_LOCK();
469 	if (abort) {
470 		GRAB_THREAD_LOCK();
471 		hash_remove(sThreadHash, thread);
472 		RELEASE_THREAD_LOCK();
473 	}
474 	restore_interrupts(state);
475 	if (abort) {
476 		delete_area(thread->kernel_stack_area);
477 		delete_thread_struct(thread);
478 		return B_BAD_TEAM_ID;
479 	}
480 
481 	thread->args1 = attributes.args1;
482 	thread->args2 = attributes.args2;
483 	thread->entry = attributes.entry;
484 	status = thread->id;
485 
486 	if (kernel) {
487 		// this sets up an initial kthread stack that runs the entry
488 
489 		// Note: whatever function wants to set up a user stack later for this
490 		// thread must initialize the TLS for it
491 		arch_thread_init_kthread_stack(thread, &_create_kernel_thread_kentry,
492 			&thread_kthread_entry, &thread_kthread_exit);
493 	} else {
494 		// create user stack
495 
496 		// the stack will be between USER_STACK_REGION and the main thread stack
497 		// area (the user stack of the main thread is created in
498 		// team_create_team())
499 		if (attributes.stack_address == NULL) {
500 			thread->user_stack_base = USER_STACK_REGION;
501 			if (attributes.stack_size <= 0)
502 				thread->user_stack_size = USER_STACK_SIZE;
503 			else
504 				thread->user_stack_size = PAGE_ALIGN(attributes.stack_size);
505 
506 			snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
507 				attributes.name, thread->id);
508 			thread->user_stack_area = create_area_etc(team, stack_name,
509 					(void **)&thread->user_stack_base, B_BASE_ADDRESS,
510 					thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
511 					B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
512 			if (thread->user_stack_area < B_OK
513 				|| arch_thread_init_tls(thread) < B_OK) {
514 				// great, we have a fully running thread without a (usable)
515 				// stack
516 				dprintf("create_thread: unable to create proper user stack!\n");
517 				status = thread->user_stack_area;
518 				kill_thread(thread->id);
519 			}
520 		} else {
521 			thread->user_stack_base = (addr_t)attributes.stack_address;
522 			thread->user_stack_size = attributes.stack_size;
523 		}
524 
525 		user_debug_update_new_thread_flags(thread->id);
526 
527 		// copy the user entry over to the args field in the thread struct
528 		// the function this will call will immediately switch the thread into
529 		// user space.
530 		arch_thread_init_kthread_stack(thread, &_create_user_thread_kentry,
531 			&thread_kthread_entry, &thread_kthread_exit);
532 	}
533 
534 	return status;
535 }
536 
537 
538 static status_t
539 undertaker(void* /*args*/)
540 {
541 	while (true) {
542 		// wait for a thread to bury
543 		ConditionVariableEntry conditionEntry;
544 
545 		InterruptsSpinLocker locker(thread_spinlock);
546 		sUndertakerCondition.Add(&conditionEntry);
547 		locker.Unlock();
548 
549 		conditionEntry.Wait();
550 
551 		locker.Lock();
552 		UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
553 		locker.Unlock();
554 
555 		if (_entry == NULL)
556 			continue;
557 
558 		UndertakerEntry entry = *_entry;
559 			// we need a copy, since the original entry is on the thread's stack
560 
561 		// we've got an entry
562 		struct thread* thread = entry.thread;
563 
564 		// delete the old kernel stack area
565 		delete_area(thread->kernel_stack_area);
566 
567 		// remove this thread from all of the global lists
568 		disable_interrupts();
569 		GRAB_TEAM_LOCK();
570 
571 		remove_thread_from_team(team_get_kernel_team(), thread);
572 
573 		RELEASE_TEAM_LOCK();
574 		enable_interrupts();
575 			// needed for the debugger notification below
576 
577 		if (entry.deathSem >= 0)
578 			release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
579 
580 		// notify the debugger
581 		if (entry.teamID >= 0
582 			&& entry.teamID != team_get_kernel_team_id()) {
583 			user_debug_thread_deleted(entry.teamID, thread->id);
584 		}
585 
586 		// free the thread structure
587 		thread_enqueue(thread, &dead_q);
588 			// TODO: Use the slab allocator!
589 	}
590 }
591 
592 
593 static sem_id
594 get_thread_wait_sem(struct thread* thread)
595 {
596 	if (thread->state == B_THREAD_WAITING
597 		&& thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
598 		return (sem_id)(addr_t)thread->wait.object;
599 	}
600 	return -1;
601 }
602 
603 
604 /*!
605 	Fills the thread_info structure with information from the specified
606 	thread.
607 	The thread lock must be held when called.
608 */
609 static void
610 fill_thread_info(struct thread *thread, thread_info *info, size_t size)
611 {
612 	info->thread = thread->id;
613 	info->team = thread->team->id;
614 
615 	strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
616 
617 	if (thread->state == B_THREAD_WAITING) {
618 		info->state = B_THREAD_WAITING;
619 
620 		switch (thread->wait.type) {
621 			case THREAD_BLOCK_TYPE_SNOOZE:
622 				info->state = B_THREAD_ASLEEP;
623 				break;
624 
625 			case THREAD_BLOCK_TYPE_SEMAPHORE:
626 			{
627 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
628 				if (sem == thread->msg.read_sem)
629 					info->state = B_THREAD_RECEIVING;
630 				break;
631 			}
632 
633 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
634 			default:
635 				break;
636 		}
637 	} else
638 		info->state = (thread_state)thread->state;
639 
640 	info->priority = thread->priority;
641 	info->user_time = thread->user_time;
642 	info->kernel_time = thread->kernel_time;
643 	info->stack_base = (void *)thread->user_stack_base;
644 	info->stack_end = (void *)(thread->user_stack_base
645 		+ thread->user_stack_size);
646 	info->sem = get_thread_wait_sem(thread);
647 }
648 
649 static status_t
650 send_data_etc(thread_id id, int32 code, const void *buffer,
651 	size_t bufferSize, int32 flags)
652 {
653 	struct thread *target;
654 	sem_id cachedSem;
655 	cpu_status state;
656 	status_t status;
657 	cbuf *data;
658 
659 	state = disable_interrupts();
660 	GRAB_THREAD_LOCK();
661 	target = thread_get_thread_struct_locked(id);
662 	if (!target) {
663 		RELEASE_THREAD_LOCK();
664 		restore_interrupts(state);
665 		return B_BAD_THREAD_ID;
666 	}
667 	cachedSem = target->msg.write_sem;
668 	RELEASE_THREAD_LOCK();
669 	restore_interrupts(state);
670 
671 	if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
672 		return B_NO_MEMORY;
673 
674 	status = acquire_sem_etc(cachedSem, 1, flags, 0);
675 	if (status == B_INTERRUPTED) {
676 		// We got interrupted by a signal
677 		return status;
678 	}
679 	if (status != B_OK) {
680 		// Any other acquisition problems may be due to thread deletion
681 		return B_BAD_THREAD_ID;
682 	}
683 
684 	if (bufferSize > 0) {
685 		data = cbuf_get_chain(bufferSize);
686 		if (data == NULL)
687 			return B_NO_MEMORY;
688 		status = cbuf_user_memcpy_to_chain(data, 0, buffer, bufferSize);
689 		if (status < B_OK) {
690 			cbuf_free_chain(data);
691 			return B_NO_MEMORY;
692 		}
693 	} else
694 		data = NULL;
695 
696 	state = disable_interrupts();
697 	GRAB_THREAD_LOCK();
698 
699 	// The target thread could have been deleted at this point
700 	target = thread_get_thread_struct_locked(id);
701 	if (target == NULL) {
702 		RELEASE_THREAD_LOCK();
703 		restore_interrupts(state);
704 		cbuf_free_chain(data);
705 		return B_BAD_THREAD_ID;
706 	}
707 
708 	// Save message informations
709 	target->msg.sender = thread_get_current_thread()->id;
710 	target->msg.code = code;
711 	target->msg.size = bufferSize;
712 	target->msg.buffer = data;
713 	cachedSem = target->msg.read_sem;
714 
715 	RELEASE_THREAD_LOCK();
716 	restore_interrupts(state);
717 
718 	release_sem(cachedSem);
719 	return B_OK;
720 }
721 
722 
723 static int32
724 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
725 	int32 flags)
726 {
727 	struct thread *thread = thread_get_current_thread();
728 	status_t status;
729 	size_t size;
730 	int32 code;
731 
732 	status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
733 	if (status < B_OK) {
734 		// Actually, we're not supposed to return error codes
735 		// but since the only reason this can fail is that we
736 		// were killed, it's probably okay to do so (but also
737 		// meaningless).
738 		return status;
739 	}
740 
741 	if (buffer != NULL && bufferSize != 0) {
742 		size = min_c(bufferSize, thread->msg.size);
743 		status = cbuf_user_memcpy_from_chain(buffer, thread->msg.buffer,
744 			0, size);
745 		if (status < B_OK) {
746 			cbuf_free_chain(thread->msg.buffer);
747 			release_sem(thread->msg.write_sem);
748 			return status;
749 		}
750 	}
751 
752 	*_sender = thread->msg.sender;
753 	code = thread->msg.code;
754 
755 	cbuf_free_chain(thread->msg.buffer);
756 	release_sem(thread->msg.write_sem);
757 
758 	return code;
759 }
760 
761 
762 static status_t
763 common_getrlimit(int resource, struct rlimit * rlp)
764 {
765 	if (!rlp)
766 		return B_BAD_ADDRESS;
767 
768 	switch (resource) {
769 		case RLIMIT_NOFILE:
770 		case RLIMIT_NOVMON:
771 			return vfs_getrlimit(resource, rlp);
772 
773 		case RLIMIT_CORE:
774 			rlp->rlim_cur = 0;
775 			rlp->rlim_max = 0;
776 			return B_OK;
777 
778 		case RLIMIT_STACK:
779 		{
780 			struct thread *thread = thread_get_current_thread();
781 			if (!thread)
782 				return B_ERROR;
783 			rlp->rlim_cur = thread->user_stack_size;
784 			rlp->rlim_max = thread->user_stack_size;
785 			return B_OK;
786 		}
787 
788 		default:
789 			return EINVAL;
790 	}
791 
792 	return B_OK;
793 }
794 
795 
796 static status_t
797 common_setrlimit(int resource, const struct rlimit * rlp)
798 {
799 	if (!rlp)
800 		return B_BAD_ADDRESS;
801 
802 	switch (resource) {
803 		case RLIMIT_NOFILE:
804 		case RLIMIT_NOVMON:
805 			return vfs_setrlimit(resource, rlp);
806 
807 		case RLIMIT_CORE:
808 			// We don't support core file, so allow settings to 0/0 only.
809 			if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
810 				return EINVAL;
811 			return B_OK;
812 
813 		default:
814 			return EINVAL;
815 	}
816 
817 	return B_OK;
818 }
819 
820 
821 //	#pragma mark - debugger calls
822 
823 
824 static int
825 make_thread_unreal(int argc, char **argv)
826 {
827 	struct thread *thread;
828 	struct hash_iterator i;
829 	int32 id = -1;
830 
831 	if (argc > 2) {
832 		print_debugger_command_usage(argv[0]);
833 		return 0;
834 	}
835 
836 	if (argc > 1)
837 		id = strtoul(argv[1], NULL, 0);
838 
839 	hash_open(sThreadHash, &i);
840 
841 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
842 		if (id != -1 && thread->id != id)
843 			continue;
844 
845 		if (thread->priority > B_DISPLAY_PRIORITY) {
846 			thread->priority = thread->next_priority = B_NORMAL_PRIORITY;
847 			kprintf("thread %ld made unreal\n", thread->id);
848 		}
849 	}
850 
851 	hash_close(sThreadHash, &i, false);
852 	return 0;
853 }
854 
855 
856 static int
857 set_thread_prio(int argc, char **argv)
858 {
859 	struct thread *thread;
860 	struct hash_iterator i;
861 	int32 id;
862 	int32 prio;
863 
864 	if (argc > 3 || argc < 2) {
865 		print_debugger_command_usage(argv[0]);
866 		return 0;
867 	}
868 
869 	prio = strtoul(argv[1], NULL, 0);
870 	if (prio > B_MAX_PRIORITY)
871 		prio = B_MAX_PRIORITY;
872 	if (prio < B_MIN_PRIORITY)
873 		prio = B_MIN_PRIORITY;
874 
875 	if (argc > 2)
876 		id = strtoul(argv[2], NULL, 0);
877 	else
878 		id = thread_get_current_thread()->id;
879 
880 	hash_open(sThreadHash, &i);
881 
882 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
883 		if (thread->id != id)
884 			continue;
885 		thread->priority = thread->next_priority = prio;
886 		kprintf("thread %ld set to priority %ld\n", id, prio);
887 		break;
888 	}
889 	if (!thread)
890 		kprintf("thread %ld (%#lx) not found\n", id, id);
891 
892 	hash_close(sThreadHash, &i, false);
893 	return 0;
894 }
895 
896 
897 static int
898 make_thread_suspended(int argc, char **argv)
899 {
900 	struct thread *thread;
901 	struct hash_iterator i;
902 	int32 id;
903 
904 	if (argc > 2) {
905 		print_debugger_command_usage(argv[0]);
906 		return 0;
907 	}
908 
909 	if (argc == 1)
910 		id = thread_get_current_thread()->id;
911 	else
912 		id = strtoul(argv[1], NULL, 0);
913 
914 	hash_open(sThreadHash, &i);
915 
916 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
917 		if (thread->id != id)
918 			continue;
919 
920 		thread->next_state = B_THREAD_SUSPENDED;
921 		kprintf("thread %ld suspended\n", id);
922 		break;
923 	}
924 	if (!thread)
925 		kprintf("thread %ld (%#lx) not found\n", id, id);
926 
927 	hash_close(sThreadHash, &i, false);
928 	return 0;
929 }
930 
931 
932 static int
933 make_thread_resumed(int argc, char **argv)
934 {
935 	struct thread *thread;
936 	struct hash_iterator i;
937 	int32 id;
938 
939 	if (argc != 2) {
940 		print_debugger_command_usage(argv[0]);
941 		return 0;
942 	}
943 
944 	// force user to enter a thread id, as using
945 	// the current thread is usually not intended
946 	id = strtoul(argv[1], NULL, 0);
947 
948 	hash_open(sThreadHash, &i);
949 
950 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
951 		if (thread->id != id)
952 			continue;
953 
954 		if (thread->state == B_THREAD_SUSPENDED) {
955 			scheduler_enqueue_in_run_queue(thread);
956 			kprintf("thread %ld resumed\n", thread->id);
957 		}
958 		break;
959 	}
960 	if (!thread)
961 		kprintf("thread %ld (%#lx) not found\n", id, id);
962 
963 	hash_close(sThreadHash, &i, false);
964 	return 0;
965 }
966 
967 
968 static int
969 drop_into_debugger(int argc, char **argv)
970 {
971 	status_t err;
972 	int32 id;
973 
974 	if (argc > 2) {
975 		print_debugger_command_usage(argv[0]);
976 		return 0;
977 	}
978 
979 	if (argc == 1)
980 		id = thread_get_current_thread()->id;
981 	else
982 		id = strtoul(argv[1], NULL, 0);
983 
984 	err = _user_debug_thread(id);
985 	if (err)
986 		kprintf("drop failed\n");
987 	else
988 		kprintf("thread %ld dropped into user debugger\n", id);
989 
990 	return 0;
991 }
992 
993 
994 static const char *
995 state_to_text(struct thread *thread, int32 state)
996 {
997 	switch (state) {
998 		case B_THREAD_READY:
999 			return "ready";
1000 
1001 		case B_THREAD_RUNNING:
1002 			return "running";
1003 
1004 		case B_THREAD_WAITING:
1005 		{
1006 			switch (thread->wait.type) {
1007 				case THREAD_BLOCK_TYPE_SNOOZE:
1008 					return "zzz";
1009 
1010 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1011 				{
1012 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1013 					if (sem == thread->msg.read_sem)
1014 						return "receive";
1015 					break;
1016 				}
1017 			}
1018 
1019 			return "waiting";
1020 		}
1021 
1022 		case B_THREAD_SUSPENDED:
1023 			return "suspended";
1024 
1025 		case THREAD_STATE_FREE_ON_RESCHED:
1026 			return "death";
1027 
1028 		default:
1029 			return "UNKNOWN";
1030 	}
1031 }
1032 
1033 
1034 static void
1035 _dump_thread_info(struct thread *thread)
1036 {
1037 	struct death_entry *death = NULL;
1038 
1039 	kprintf("THREAD: %p\n", thread);
1040 	kprintf("id:                 %ld (%#lx)\n", thread->id, thread->id);
1041 	kprintf("name:               \"%s\"\n", thread->name);
1042 	kprintf("all_next:           %p\nteam_next:          %p\nq_next:             %p\n",
1043 		thread->all_next, thread->team_next, thread->queue_next);
1044 	kprintf("priority:           %ld (next %ld)\n", thread->priority, thread->next_priority);
1045 	kprintf("state:              %s\n", state_to_text(thread, thread->state));
1046 	kprintf("next_state:         %s\n", state_to_text(thread, thread->next_state));
1047 	kprintf("cpu:                %p ", thread->cpu);
1048 	if (thread->cpu)
1049 		kprintf("(%d)\n", thread->cpu->cpu_num);
1050 	else
1051 		kprintf("\n");
1052 	kprintf("sig_pending:        %#lx (blocked: %#lx)\n", thread->sig_pending,
1053 		thread->sig_block_mask);
1054 	kprintf("in_kernel:          %d\n", thread->in_kernel);
1055 
1056 	kprintf("waiting for:        ");
1057 
1058 	if (thread->state == B_THREAD_WAITING) {
1059 		switch (thread->wait.type) {
1060 			case THREAD_BLOCK_TYPE_SEMAPHORE:
1061 			{
1062 				sem_id sem = (sem_id)(addr_t)thread->wait.object;
1063 				if (sem == thread->msg.read_sem)
1064 					kprintf("data\n");
1065 				else
1066 					kprintf("semaphore %ld\n", sem);
1067 				break;
1068 			}
1069 
1070 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1071 				kprintf("condition variable %p\n", thread->wait.object);
1072 				break;
1073 
1074 			case THREAD_BLOCK_TYPE_SNOOZE:
1075 				kprintf("snooze()\n");
1076 				break;
1077 
1078 			case THREAD_BLOCK_TYPE_SIGNAL:
1079 				kprintf("signal\n");
1080 				break;
1081 
1082 			case THREAD_BLOCK_TYPE_MUTEX:
1083 				kprintf("mutex %p\n", thread->wait.object);
1084 				break;
1085 
1086 			case THREAD_BLOCK_TYPE_OTHER:
1087 				kprintf("other (%s)\n", (char*)thread->wait.object);
1088 				break;
1089 
1090 			default:
1091 				kprintf("unknown (%p)\n", thread->wait.object);
1092 				break;
1093 		}
1094 	}
1095 
1096 	kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
1097 	kprintf("args:               %p %p\n", thread->args1, thread->args2);
1098 	kprintf("entry:              %p\n", (void *)thread->entry);
1099 	kprintf("team:               %p, \"%s\"\n", thread->team, thread->team->name);
1100 	kprintf("  exit.sem:         %ld\n", thread->exit.sem);
1101 	kprintf("  exit.status:      %#lx (%s)\n", thread->exit.status, strerror(thread->exit.status));
1102 	kprintf("  exit.reason:      %#x\n", thread->exit.reason);
1103 	kprintf("  exit.signal:      %#x\n", thread->exit.signal);
1104 	kprintf("  exit.waiters:\n");
1105 	while ((death = (struct death_entry*)list_get_next_item(
1106 			&thread->exit.waiters, death)) != NULL) {
1107 		kprintf("\t%p (group %ld, thread %ld)\n", death, death->group_id, death->thread);
1108 	}
1109 
1110 	kprintf("kernel_stack_area:  %ld\n", thread->kernel_stack_area);
1111 	kprintf("kernel_stack_base:  %p\n", (void *)thread->kernel_stack_base);
1112 	kprintf("user_stack_area:    %ld\n", thread->user_stack_area);
1113 	kprintf("user_stack_base:    %p\n", (void *)thread->user_stack_base);
1114 	kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1115 	kprintf("kernel_errno:       %#x (%s)\n", thread->kernel_errno,
1116 		strerror(thread->kernel_errno));
1117 	kprintf("kernel_time:        %Ld\n", thread->kernel_time);
1118 	kprintf("user_time:          %Ld\n", thread->user_time);
1119 	kprintf("flags:              0x%lx\n", thread->flags);
1120 	kprintf("architecture dependant section:\n");
1121 	arch_thread_dump_info(&thread->arch_info);
1122 }
1123 
1124 
1125 static int
1126 dump_thread_info(int argc, char **argv)
1127 {
1128 	const char *name = NULL;
1129 	struct thread *thread;
1130 	int32 id = -1;
1131 	struct hash_iterator i;
1132 	bool found = false;
1133 
1134 	if (argc > 2) {
1135 		print_debugger_command_usage(argv[0]);
1136 		return 0;
1137 	}
1138 
1139 	if (argc == 1) {
1140 		_dump_thread_info(thread_get_current_thread());
1141 		return 0;
1142 	} else {
1143 		name = argv[1];
1144 		id = strtoul(argv[1], NULL, 0);
1145 
1146 		if (IS_KERNEL_ADDRESS(id)) {
1147 			// semi-hack
1148 			_dump_thread_info((struct thread *)id);
1149 			return 0;
1150 		}
1151 	}
1152 
1153 	// walk through the thread list, trying to match name or id
1154 	hash_open(sThreadHash, &i);
1155 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1156 		if ((name != NULL && !strcmp(name, thread->name)) || thread->id == id) {
1157 			_dump_thread_info(thread);
1158 			found = true;
1159 			break;
1160 		}
1161 	}
1162 	hash_close(sThreadHash, &i, false);
1163 
1164 	if (!found)
1165 		kprintf("thread \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1166 	return 0;
1167 }
1168 
1169 
1170 static int
1171 dump_thread_list(int argc, char **argv)
1172 {
1173 	struct thread *thread;
1174 	struct hash_iterator i;
1175 	bool realTimeOnly = false;
1176 	bool calling = false;
1177 	const char *callSymbol = NULL;
1178 	addr_t callStart = 0;
1179 	addr_t callEnd = 0;
1180 	int32 requiredState = 0;
1181 	team_id team = -1;
1182 	sem_id sem = -1;
1183 
1184 	if (!strcmp(argv[0], "realtime"))
1185 		realTimeOnly = true;
1186 	else if (!strcmp(argv[0], "ready"))
1187 		requiredState = B_THREAD_READY;
1188 	else if (!strcmp(argv[0], "running"))
1189 		requiredState = B_THREAD_RUNNING;
1190 	else if (!strcmp(argv[0], "waiting")) {
1191 		requiredState = B_THREAD_WAITING;
1192 
1193 		if (argc > 1) {
1194 			sem = strtoul(argv[1], NULL, 0);
1195 			if (sem == 0)
1196 				kprintf("ignoring invalid semaphore argument.\n");
1197 		}
1198 	} else if (!strcmp(argv[0], "calling")) {
1199 		if (argc < 2) {
1200 			kprintf("Need to give a symbol name or start and end arguments.\n");
1201 			return 0;
1202 		} else if (argc == 3) {
1203 			callStart = parse_expression(argv[1]);
1204 			callEnd = parse_expression(argv[2]);
1205 		} else
1206 			callSymbol = argv[1];
1207 
1208 		calling = true;
1209 	} else if (argc > 1) {
1210 		team = strtoul(argv[1], NULL, 0);
1211 		if (team == 0)
1212 			kprintf("ignoring invalid team argument.\n");
1213 	}
1214 
1215 	kprintf("thread         id  state     wait for  object  cpu pri  stack    "
1216 		"  team  name\n");
1217 
1218 	hash_open(sThreadHash, &i);
1219 	while ((thread = (struct thread*)hash_next(sThreadHash, &i)) != NULL) {
1220 		// filter out threads not matching the search criteria
1221 		if ((requiredState && thread->state != requiredState)
1222 			|| (calling && !arch_debug_contains_call(thread, callSymbol,
1223 					callStart, callEnd))
1224 			|| (sem > 0 && get_thread_wait_sem(thread) != sem)
1225 			|| (team > 0 && thread->team->id != team)
1226 			|| (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1227 			continue;
1228 
1229 		kprintf("%p %6ld  %-10s", thread, thread->id, state_to_text(thread,
1230 			thread->state));
1231 
1232 		// does it block on a semaphore or a condition variable?
1233 		if (thread->state == B_THREAD_WAITING) {
1234 			switch (thread->wait.type) {
1235 				case THREAD_BLOCK_TYPE_SEMAPHORE:
1236 				{
1237 					sem_id sem = (sem_id)(addr_t)thread->wait.object;
1238 					if (sem == thread->msg.read_sem)
1239 						kprintf("                   ");
1240 					else
1241 						kprintf("sem %12ld   ", sem);
1242 					break;
1243 				}
1244 
1245 				case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1246 					kprintf("cvar  %p   ", thread->wait.object);
1247 					break;
1248 
1249 				case THREAD_BLOCK_TYPE_SNOOZE:
1250 					kprintf("                   ");
1251 					break;
1252 
1253 				case THREAD_BLOCK_TYPE_SIGNAL:
1254 					kprintf("signal             ");
1255 					break;
1256 
1257 				case THREAD_BLOCK_TYPE_MUTEX:
1258 					kprintf("mutex %p   ", thread->wait.object);
1259 					break;
1260 
1261 				case THREAD_BLOCK_TYPE_OTHER:
1262 					kprintf("other              ");
1263 					break;
1264 
1265 				default:
1266 					kprintf("???   %p   ", thread->wait.object);
1267 					break;
1268 			}
1269 		} else
1270 			kprintf("       -           ");
1271 
1272 		// on which CPU does it run?
1273 		if (thread->cpu)
1274 			kprintf("%2d", thread->cpu->cpu_num);
1275 		else
1276 			kprintf(" -");
1277 
1278 		kprintf("%4ld  %p%5ld  %s\n", thread->priority,
1279 			(void *)thread->kernel_stack_base, thread->team->id,
1280 			thread->name != NULL ? thread->name : "<NULL>");
1281 	}
1282 	hash_close(sThreadHash, &i, false);
1283 	return 0;
1284 }
1285 
1286 
1287 //	#pragma mark - private kernel API
1288 
1289 
1290 void
1291 thread_exit(void)
1292 {
1293 	cpu_status state;
1294 	struct thread *thread = thread_get_current_thread();
1295 	struct team *team = thread->team;
1296 	thread_id parentID = -1;
1297 	bool deleteTeam = false;
1298 	sem_id cachedDeathSem = -1;
1299 	status_t status;
1300 	struct thread_debug_info debugInfo;
1301 	team_id teamID = team->id;
1302 
1303 	TRACE(("thread %ld exiting %s w/return code %#lx\n", thread->id,
1304 		thread->exit.reason == THREAD_RETURN_INTERRUPTED
1305 			? "due to signal" : "normally", thread->exit.status));
1306 
1307 	if (!are_interrupts_enabled())
1308 		panic("thread_exit() called with interrupts disabled!\n");
1309 
1310 	// boost our priority to get this over with
1311 	thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY;
1312 
1313 	// Cancel previously installed alarm timer, if any
1314 	cancel_timer(&thread->alarm);
1315 
1316 	// delete the user stack area first, we won't need it anymore
1317 	if (team->address_space != NULL && thread->user_stack_area >= 0) {
1318 		area_id area = thread->user_stack_area;
1319 		thread->user_stack_area = -1;
1320 		delete_area_etc(team, area);
1321 	}
1322 
1323 	struct job_control_entry *death = NULL;
1324 	struct death_entry* threadDeathEntry = NULL;
1325 
1326 	if (team != team_get_kernel_team()) {
1327 		if (team->main_thread == thread) {
1328 			// this was the main thread in this team, so we will delete that as well
1329 			deleteTeam = true;
1330 		} else {
1331 			threadDeathEntry = (death_entry*)malloc(sizeof(death_entry));
1332 			team_free_user_thread(thread);
1333 		}
1334 
1335 		// remove this thread from the current team and add it to the kernel
1336 		// put the thread into the kernel team until it dies
1337 		state = disable_interrupts();
1338 		GRAB_TEAM_LOCK();
1339 		GRAB_THREAD_LOCK();
1340 			// removing the thread and putting its death entry to the parent
1341 			// team needs to be an atomic operation
1342 
1343 		// remember how long this thread lasted
1344 		team->dead_threads_kernel_time += thread->kernel_time;
1345 		team->dead_threads_user_time += thread->user_time;
1346 
1347 		remove_thread_from_team(team, thread);
1348 		insert_thread_into_team(team_get_kernel_team(), thread);
1349 
1350 		cachedDeathSem = team->death_sem;
1351 
1352 		if (deleteTeam) {
1353 			struct team *parent = team->parent;
1354 
1355 			// remember who our parent was so we can send a signal
1356 			parentID = parent->id;
1357 
1358 			// Set the team job control state to "dead" and detach the job
1359 			// control entry from our team struct.
1360 			team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, 0, true);
1361 			death = team->job_control_entry;
1362 			team->job_control_entry = NULL;
1363 
1364 			if (death != NULL) {
1365 				death->InitDeadState();
1366 
1367 				// team_set_job_control_state() already moved our entry
1368 				// into the parent's list. We just check the soft limit of
1369 				// death entries.
1370 				if (parent->dead_children->count > MAX_DEAD_CHILDREN) {
1371 					death = parent->dead_children->entries.RemoveHead();
1372 					parent->dead_children->count--;
1373 				} else
1374 					death = NULL;
1375 
1376 				RELEASE_THREAD_LOCK();
1377 			} else
1378 				RELEASE_THREAD_LOCK();
1379 
1380 			team_remove_team(team);
1381 
1382 			send_signal_etc(parentID, SIGCHLD,
1383 				SIGNAL_FLAG_TEAMS_LOCKED | B_DO_NOT_RESCHEDULE);
1384 		} else {
1385 			// The thread is not the main thread. We store a thread death
1386 			// entry for it, unless someone is already waiting it.
1387 			if (threadDeathEntry != NULL
1388 				&& list_is_empty(&thread->exit.waiters)) {
1389 				threadDeathEntry->thread = thread->id;
1390 				threadDeathEntry->status = thread->exit.status;
1391 				threadDeathEntry->reason = thread->exit.reason;
1392 				threadDeathEntry->signal = thread->exit.signal;
1393 
1394 				// add entry -- remove and old one, if we hit the limit
1395 				list_add_item(&team->dead_threads, threadDeathEntry);
1396 				team->dead_threads_count++;
1397 				threadDeathEntry = NULL;
1398 
1399 				if (team->dead_threads_count > MAX_DEAD_THREADS) {
1400 					threadDeathEntry = (death_entry*)list_remove_head_item(
1401 						&team->dead_threads);
1402 					team->dead_threads_count--;
1403 				}
1404 			}
1405 
1406 			RELEASE_THREAD_LOCK();
1407 		}
1408 
1409 		RELEASE_TEAM_LOCK();
1410 
1411 		// swap address spaces, to make sure we're running on the kernel's pgdir
1412 		vm_swap_address_space(vm_kernel_address_space());
1413 		restore_interrupts(state);
1414 
1415 		TRACE(("thread_exit: thread %ld now a kernel thread!\n", thread->id));
1416 	}
1417 
1418 	if (threadDeathEntry != NULL)
1419 		free(threadDeathEntry);
1420 
1421 	// delete the team if we're its main thread
1422 	if (deleteTeam) {
1423 		team_delete_team(team);
1424 
1425 		// we need to delete any death entry that made it to here
1426 		if (death != NULL)
1427 			delete death;
1428 
1429 		cachedDeathSem = -1;
1430 	}
1431 
1432 	state = disable_interrupts();
1433 	GRAB_THREAD_LOCK();
1434 
1435 	// remove thread from hash, so it's no longer accessible
1436 	hash_remove(sThreadHash, thread);
1437 	sUsedThreads--;
1438 
1439 	// Stop debugging for this thread
1440 	debugInfo = thread->debug_info;
1441 	clear_thread_debug_info(&thread->debug_info, true);
1442 
1443 	// Remove the select infos. We notify them a little later.
1444 	select_info* selectInfos = thread->select_infos;
1445 	thread->select_infos = NULL;
1446 
1447 	RELEASE_THREAD_LOCK();
1448 	restore_interrupts(state);
1449 
1450 	destroy_thread_debug_info(&debugInfo);
1451 
1452 	// notify select infos
1453 	select_info* info = selectInfos;
1454 	while (info != NULL) {
1455 		select_sync* sync = info->sync;
1456 
1457 		notify_select_events(info, B_EVENT_INVALID);
1458 		info = info->next;
1459 		put_select_sync(sync);
1460 	}
1461 
1462 	// shutdown the thread messaging
1463 
1464 	status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
1465 	if (status == B_WOULD_BLOCK) {
1466 		// there is data waiting for us, so let us eat it
1467 		thread_id sender;
1468 
1469 		delete_sem(thread->msg.write_sem);
1470 			// first, let's remove all possibly waiting writers
1471 		receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
1472 	} else {
1473 		// we probably own the semaphore here, and we're the last to do so
1474 		delete_sem(thread->msg.write_sem);
1475 	}
1476 	// now we can safely remove the msg.read_sem
1477 	delete_sem(thread->msg.read_sem);
1478 
1479 	// fill all death entries and delete the sem that others will use to wait on us
1480 	{
1481 		sem_id cachedExitSem = thread->exit.sem;
1482 		cpu_status state;
1483 
1484 		state = disable_interrupts();
1485 		GRAB_THREAD_LOCK();
1486 
1487 		// make sure no one will grab this semaphore again
1488 		thread->exit.sem = -1;
1489 
1490 		// fill all death entries
1491 		death_entry* entry = NULL;
1492 		while ((entry = (struct death_entry*)list_get_next_item(
1493 				&thread->exit.waiters, entry)) != NULL) {
1494 			entry->status = thread->exit.status;
1495 			entry->reason = thread->exit.reason;
1496 			entry->signal = thread->exit.signal;
1497 		}
1498 
1499 		RELEASE_THREAD_LOCK();
1500 		restore_interrupts(state);
1501 
1502 		delete_sem(cachedExitSem);
1503 	}
1504 
1505 	// enqueue in the undertaker list and reschedule for the last time
1506 	UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
1507 
1508 	disable_interrupts();
1509 	GRAB_THREAD_LOCK();
1510 
1511 	sUndertakerEntries.Add(&undertakerEntry);
1512 	sUndertakerCondition.NotifyOne(true);
1513 
1514 	thread->next_state = THREAD_STATE_FREE_ON_RESCHED;
1515 	scheduler_reschedule();
1516 
1517 	panic("never can get here\n");
1518 }
1519 
1520 
1521 struct thread *
1522 thread_get_thread_struct(thread_id id)
1523 {
1524 	struct thread *thread;
1525 	cpu_status state;
1526 
1527 	state = disable_interrupts();
1528 	GRAB_THREAD_LOCK();
1529 
1530 	thread = thread_get_thread_struct_locked(id);
1531 
1532 	RELEASE_THREAD_LOCK();
1533 	restore_interrupts(state);
1534 
1535 	return thread;
1536 }
1537 
1538 
1539 struct thread *
1540 thread_get_thread_struct_locked(thread_id id)
1541 {
1542 	struct thread_key key;
1543 
1544 	key.id = id;
1545 
1546 	return (struct thread*)hash_lookup(sThreadHash, &key);
1547 }
1548 
1549 
1550 /*!
1551 	Called in the interrupt handler code when a thread enters
1552 	the kernel for any reason.
1553 	Only tracks time for now.
1554 	Interrupts are disabled.
1555 */
1556 void
1557 thread_at_kernel_entry(bigtime_t now)
1558 {
1559 	struct thread *thread = thread_get_current_thread();
1560 
1561 	TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
1562 
1563 	// track user time
1564 	thread->user_time += now - thread->last_time;
1565 	thread->last_time = now;
1566 
1567 	thread->in_kernel = true;
1568 }
1569 
1570 
1571 /*!
1572 	Called whenever a thread exits kernel space to user space.
1573 	Tracks time, handles signals, ...
1574 */
1575 void
1576 thread_at_kernel_exit(void)
1577 {
1578 	struct thread *thread = thread_get_current_thread();
1579 
1580 	TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
1581 
1582 	while (handle_signals(thread)) {
1583 		InterruptsSpinLocker _(thread_spinlock);
1584 		scheduler_reschedule();
1585 	}
1586 
1587 	cpu_status state = disable_interrupts();
1588 
1589 	thread->in_kernel = false;
1590 
1591 	// track kernel time
1592 	bigtime_t now = system_time();
1593 	thread->kernel_time += now - thread->last_time;
1594 	thread->last_time = now;
1595 
1596 	restore_interrupts(state);
1597 }
1598 
1599 
1600 /*!	The quick version of thread_kernel_exit(), in case no signals are pending
1601 	and no debugging shall be done.
1602 	Interrupts are disabled in this case.
1603 */
1604 void
1605 thread_at_kernel_exit_no_signals(void)
1606 {
1607 	struct thread *thread = thread_get_current_thread();
1608 
1609 	TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
1610 
1611 	thread->in_kernel = false;
1612 
1613 	// track kernel time
1614 	bigtime_t now = system_time();
1615 	thread->kernel_time += now - thread->last_time;
1616 	thread->last_time = now;
1617 }
1618 
1619 
1620 void
1621 thread_reset_for_exec(void)
1622 {
1623 	struct thread *thread = thread_get_current_thread();
1624 
1625 	cancel_timer(&thread->alarm);
1626 	reset_signals(thread);
1627 }
1628 
1629 
1630 /*! Insert a thread to the tail of a queue */
1631 void
1632 thread_enqueue(struct thread *thread, struct thread_queue *queue)
1633 {
1634 	thread->queue_next = NULL;
1635 	if (queue->head == NULL) {
1636 		queue->head = thread;
1637 		queue->tail = thread;
1638 	} else {
1639 		queue->tail->queue_next = thread;
1640 		queue->tail = thread;
1641 	}
1642 }
1643 
1644 
1645 struct thread *
1646 thread_lookat_queue(struct thread_queue *queue)
1647 {
1648 	return queue->head;
1649 }
1650 
1651 
1652 struct thread *
1653 thread_dequeue(struct thread_queue *queue)
1654 {
1655 	struct thread *thread = queue->head;
1656 
1657 	if (thread != NULL) {
1658 		queue->head = thread->queue_next;
1659 		if (queue->tail == thread)
1660 			queue->tail = NULL;
1661 	}
1662 	return thread;
1663 }
1664 
1665 
1666 struct thread *
1667 thread_dequeue_id(struct thread_queue *q, thread_id id)
1668 {
1669 	struct thread *thread;
1670 	struct thread *last = NULL;
1671 
1672 	thread = q->head;
1673 	while (thread != NULL) {
1674 		if (thread->id == id) {
1675 			if (last == NULL)
1676 				q->head = thread->queue_next;
1677 			else
1678 				last->queue_next = thread->queue_next;
1679 
1680 			if (q->tail == thread)
1681 				q->tail = last;
1682 			break;
1683 		}
1684 		last = thread;
1685 		thread = thread->queue_next;
1686 	}
1687 	return thread;
1688 }
1689 
1690 
1691 thread_id
1692 allocate_thread_id(void)
1693 {
1694 	return atomic_add(&sNextThreadID, 1);
1695 }
1696 
1697 
1698 thread_id
1699 peek_next_thread_id(void)
1700 {
1701 	return atomic_get(&sNextThreadID);
1702 }
1703 
1704 
1705 /*!	Yield the CPU to other threads.
1706 	If \a force is \c true, the thread will almost guaranteedly be unscheduled.
1707 	If \c false, it will continue to run, if there's no other thread in ready
1708 	state, and if it has a higher priority than the other ready threads, it
1709 	still has a good chance to continue.
1710 */
1711 void
1712 thread_yield(bool force)
1713 {
1714 	if (force) {
1715 		// snooze for roughly 3 thread quantums
1716 		snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT);
1717 #if 0
1718 		cpu_status state;
1719 
1720 		struct thread *thread = thread_get_current_thread();
1721 		if (thread == NULL)
1722 			return;
1723 
1724 		state = disable_interrupts();
1725 		GRAB_THREAD_LOCK();
1726 
1727 		// mark the thread as yielded, so it will not be scheduled next
1728 		//thread->was_yielded = true;
1729 		thread->next_priority = B_LOWEST_ACTIVE_PRIORITY;
1730 		scheduler_reschedule();
1731 
1732 		RELEASE_THREAD_LOCK();
1733 		restore_interrupts(state);
1734 #endif
1735 	} else {
1736 		struct thread *thread = thread_get_current_thread();
1737 		if (thread == NULL)
1738 			return;
1739 
1740 		// Don't force the thread off the CPU, just reschedule.
1741 		InterruptsSpinLocker _(thread_spinlock);
1742 		scheduler_reschedule();
1743 	}
1744 }
1745 
1746 
1747 /*!
1748 	Kernel private thread creation function.
1749 
1750 	\param threadID The ID to be assigned to the new thread. If
1751 		  \code < 0 \endcode a fresh one is allocated.
1752 */
1753 thread_id
1754 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
1755 	void *arg, team_id team, thread_id threadID)
1756 {
1757 	thread_creation_attributes attributes;
1758 	attributes.entry = (thread_entry_func)function;
1759 	attributes.name = name;
1760 	attributes.priority = priority;
1761 	attributes.args1 = arg;
1762 	attributes.args2 = NULL;
1763 	attributes.stack_address = NULL;
1764 	attributes.stack_size = 0;
1765 	attributes.team = team;
1766 	attributes.thread = threadID;
1767 
1768 	return create_thread(attributes, true);
1769 }
1770 
1771 
1772 status_t
1773 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
1774 	status_t *_returnCode)
1775 {
1776 	sem_id exitSem = B_BAD_THREAD_ID;
1777 	struct death_entry death;
1778 	job_control_entry* freeDeath = NULL;
1779 	struct thread *thread;
1780 	cpu_status state;
1781 	status_t status = B_OK;
1782 
1783 	if (id < B_OK)
1784 		return B_BAD_THREAD_ID;
1785 
1786 	// we need to resume the thread we're waiting for first
1787 
1788 	state = disable_interrupts();
1789 	GRAB_THREAD_LOCK();
1790 
1791 	thread = thread_get_thread_struct_locked(id);
1792 	if (thread != NULL) {
1793 		// remember the semaphore we have to wait on and place our death entry
1794 		exitSem = thread->exit.sem;
1795 		list_add_link_to_head(&thread->exit.waiters, &death);
1796 	}
1797 
1798 	death_entry* threadDeathEntry = NULL;
1799 
1800 	RELEASE_THREAD_LOCK();
1801 
1802 	if (thread == NULL) {
1803 		// we couldn't find this thread - maybe it's already gone, and we'll
1804 		// find its death entry in our team
1805 		GRAB_TEAM_LOCK();
1806 
1807 		struct team* team = thread_get_current_thread()->team;
1808 
1809 		// check the child death entries first (i.e. main threads of child
1810 		// teams)
1811 		bool deleteEntry;
1812 		freeDeath = team_get_death_entry(team, id, &deleteEntry);
1813 		if (freeDeath != NULL) {
1814 			death.status = freeDeath->status;
1815 			if (!deleteEntry)
1816 				freeDeath = NULL;
1817 		} else {
1818 			// check the thread death entries of the team (non-main threads)
1819 			while ((threadDeathEntry = (death_entry*)list_get_next_item(
1820 					&team->dead_threads, threadDeathEntry)) != NULL) {
1821 				if (threadDeathEntry->thread == id) {
1822 					list_remove_item(&team->dead_threads, threadDeathEntry);
1823 					team->dead_threads_count--;
1824 					death.status = threadDeathEntry->status;
1825 					break;
1826 				}
1827 			}
1828 
1829 			if (threadDeathEntry == NULL)
1830 				status = B_BAD_THREAD_ID;
1831 		}
1832 
1833 		RELEASE_TEAM_LOCK();
1834 	}
1835 
1836 	restore_interrupts(state);
1837 
1838 	if (thread == NULL && status == B_OK) {
1839 		// we found the thread's death entry in our team
1840 		if (_returnCode)
1841 			*_returnCode = death.status;
1842 
1843 		delete freeDeath;
1844 		free(threadDeathEntry);
1845 		return B_OK;
1846 	}
1847 
1848 	// we need to wait for the death of the thread
1849 
1850 	if (exitSem < B_OK)
1851 		return B_BAD_THREAD_ID;
1852 
1853 	resume_thread(id);
1854 		// make sure we don't wait forever on a suspended thread
1855 
1856 	status = acquire_sem_etc(exitSem, 1, flags, timeout);
1857 
1858 	if (status == B_OK) {
1859 		// this should never happen as the thread deletes the semaphore on exit
1860 		panic("could acquire exit_sem for thread %ld\n", id);
1861 	} else if (status == B_BAD_SEM_ID) {
1862 		// this is the way the thread normally exits
1863 		status = B_OK;
1864 
1865 		if (_returnCode)
1866 			*_returnCode = death.status;
1867 	} else {
1868 		// We were probably interrupted; we need to remove our death entry now.
1869 		state = disable_interrupts();
1870 		GRAB_THREAD_LOCK();
1871 
1872 		thread = thread_get_thread_struct_locked(id);
1873 		if (thread != NULL)
1874 			list_remove_link(&death);
1875 
1876 		RELEASE_THREAD_LOCK();
1877 		restore_interrupts(state);
1878 
1879 		// If the thread is already gone, we need to wait for its exit semaphore
1880 		// to make sure our death entry stays valid - it won't take long
1881 		if (thread == NULL)
1882 			acquire_sem(exitSem);
1883 	}
1884 
1885 	return status;
1886 }
1887 
1888 
1889 status_t
1890 select_thread(int32 id, struct select_info* info, bool kernel)
1891 {
1892 	InterruptsSpinLocker locker(thread_spinlock);
1893 
1894 	// get thread
1895 	struct thread* thread = thread_get_thread_struct_locked(id);
1896 	if (thread == NULL)
1897 		return B_BAD_THREAD_ID;
1898 
1899 	// We support only B_EVENT_INVALID at the moment.
1900 	info->selected_events &= B_EVENT_INVALID;
1901 
1902 	// add info to list
1903 	if (info->selected_events != 0) {
1904 		info->next = thread->select_infos;
1905 		thread->select_infos = info;
1906 
1907 		// we need a sync reference
1908 		atomic_add(&info->sync->ref_count, 1);
1909 	}
1910 
1911 	return B_OK;
1912 }
1913 
1914 
1915 status_t
1916 deselect_thread(int32 id, struct select_info* info, bool kernel)
1917 {
1918 	InterruptsSpinLocker locker(thread_spinlock);
1919 
1920 	// get thread
1921 	struct thread* thread = thread_get_thread_struct_locked(id);
1922 	if (thread == NULL)
1923 		return B_BAD_THREAD_ID;
1924 
1925 	// remove info from list
1926 	select_info** infoLocation = &thread->select_infos;
1927 	while (*infoLocation != NULL && *infoLocation != info)
1928 		infoLocation = &(*infoLocation)->next;
1929 
1930 	if (*infoLocation != info)
1931 		return B_OK;
1932 
1933 	*infoLocation = info->next;
1934 
1935 	locker.Unlock();
1936 
1937 	// surrender sync reference
1938 	put_select_sync(info->sync);
1939 
1940 	return B_OK;
1941 }
1942 
1943 
1944 int32
1945 thread_max_threads(void)
1946 {
1947 	return sMaxThreads;
1948 }
1949 
1950 
1951 int32
1952 thread_used_threads(void)
1953 {
1954 	return sUsedThreads;
1955 }
1956 
1957 
1958 status_t
1959 thread_init(kernel_args *args)
1960 {
1961 	uint32 i;
1962 
1963 	TRACE(("thread_init: entry\n"));
1964 
1965 	// create the thread hash table
1966 	sThreadHash = hash_init(15, offsetof(struct thread, all_next),
1967 		&thread_struct_compare, &thread_struct_hash);
1968 
1969 	// zero out the dead thread structure q
1970 	memset(&dead_q, 0, sizeof(dead_q));
1971 
1972 	if (arch_thread_init(args) < B_OK)
1973 		panic("arch_thread_init() failed!\n");
1974 
1975 	// skip all thread IDs including B_SYSTEM_TEAM, which is reserved
1976 	sNextThreadID = B_SYSTEM_TEAM + 1;
1977 
1978 	// create an idle thread for each cpu
1979 
1980 	for (i = 0; i < args->num_cpus; i++) {
1981 		struct thread *thread;
1982 		area_info info;
1983 		char name[64];
1984 
1985 		sprintf(name, "idle thread %lu", i + 1);
1986 		thread = create_thread_struct(&sIdleThreads[i], name,
1987 			i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
1988 		if (thread == NULL) {
1989 			panic("error creating idle thread struct\n");
1990 			return B_NO_MEMORY;
1991 		}
1992 
1993 		thread->team = team_get_kernel_team();
1994 		thread->priority = thread->next_priority = B_IDLE_PRIORITY;
1995 		thread->state = B_THREAD_RUNNING;
1996 		thread->next_state = B_THREAD_READY;
1997 		sprintf(name, "idle thread %lu kstack", i + 1);
1998 		thread->kernel_stack_area = find_area(name);
1999 		thread->entry = NULL;
2000 
2001 		if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2002 			panic("error finding idle kstack area\n");
2003 
2004 		thread->kernel_stack_base = (addr_t)info.address;
2005 
2006 		hash_insert(sThreadHash, thread);
2007 		insert_thread_into_team(thread->team, thread);
2008 	}
2009 	sUsedThreads = args->num_cpus;
2010 
2011 	// start the undertaker thread
2012 	new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2013 	sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2014 
2015 	thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2016 		B_DISPLAY_PRIORITY, NULL);
2017 	if (undertakerThread < 0)
2018 		panic("Failed to create undertaker thread!");
2019 	resume_thread(undertakerThread);
2020 
2021 	// set up some debugger commands
2022 	add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2023 		"[ <team> ]\n"
2024 		"Prints a list of all existing threads, or, if a team ID is given,\n"
2025 		"all threads of the specified team.\n"
2026 		"  <team>  - The ID of the team whose threads shall be listed.\n", 0);
2027 	add_debugger_command_etc("ready", &dump_thread_list,
2028 		"List all ready threads",
2029 		"\n"
2030 		"Prints a list of all threads in ready state.\n", 0);
2031 	add_debugger_command_etc("running", &dump_thread_list,
2032 		"List all running threads",
2033 		"\n"
2034 		"Prints a list of all threads in running state.\n", 0);
2035 	add_debugger_command_etc("waiting", &dump_thread_list,
2036 		"List all waiting threads (optionally for a specific semaphore)",
2037 		"[ <sem> ]\n"
2038 		"Prints a list of all threads in waiting state. If a semaphore is\n"
2039 		"specified, only the threads waiting on that semaphore are listed.\n"
2040 		"  <sem>  - ID of the semaphore.\n", 0);
2041 	add_debugger_command_etc("realtime", &dump_thread_list,
2042 		"List all realtime threads",
2043 		"\n"
2044 		"Prints a list of all threads with realtime priority.\n", 0);
2045 	add_debugger_command_etc("thread", &dump_thread_info,
2046 		"Dump info about a particular thread",
2047 		"[ <id> | <address> | <name> ]\n"
2048 		"Prints information about the specified thread. If no argument is\n"
2049 		"given the current thread is selected.\n"
2050 		"  <id>       - The ID of the thread.\n"
2051 		"  <address>  - The address of the thread structure.\n"
2052 		"  <name>     - The thread's name.\n", 0);
2053 	add_debugger_command_etc("calling", &dump_thread_list,
2054 		"Show all threads that have a specific address in their call chain",
2055 		"{ <symbol-pattern> | <start> <end> }\n", 0);
2056 	add_debugger_command_etc("unreal", &make_thread_unreal,
2057 		"Set realtime priority threads to normal priority",
2058 		"[ <id> ]\n"
2059 		"Sets the priority of all realtime threads or, if given, the one\n"
2060 		"with the specified ID to \"normal\" priority.\n"
2061 		"  <id>  - The ID of the thread.\n", 0);
2062 	add_debugger_command_etc("suspend", &make_thread_suspended,
2063 		"Suspend a thread",
2064 		"[ <id> ]\n"
2065 		"Suspends the thread with the given ID. If no ID argument is given\n"
2066 		"the current thread is selected.\n"
2067 		"  <id>  - The ID of the thread.\n", 0);
2068 	add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2069 		"<id>\n"
2070 		"Resumes the specified thread, if it is currently suspended.\n"
2071 		"  <id>  - The ID of the thread.\n", 0);
2072 	add_debugger_command_etc("drop", &drop_into_debugger,
2073 		"Drop a thread into the userland debugger",
2074 		"<id>\n"
2075 		"Drops the specified (userland) thread into the userland debugger\n"
2076 		"after leaving the kernel debugger.\n"
2077 		"  <id>  - The ID of the thread.\n", 0);
2078 	add_debugger_command_etc("priority", &set_thread_prio,
2079 		"Set a thread's priority",
2080 		"<priority> [ <id> ]\n"
2081 		"Sets the priority of the thread with the specified ID to the given\n"
2082 		"priority. If no thread ID is given, the current thread is selected.\n"
2083 		"  <priority>  - The thread's new priority (0 - 120)\n"
2084 		"  <id>        - The ID of the thread.\n", 0);
2085 
2086 	return B_OK;
2087 }
2088 
2089 
2090 status_t
2091 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2092 {
2093 	// set up the cpu pointer in the not yet initialized per-cpu idle thread
2094 	// so that get_current_cpu and friends will work, which is crucial for
2095 	// a lot of low level routines
2096 	sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2097 	arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2098 	return B_OK;
2099 }
2100 
2101 
2102 //	#pragma mark - thread blocking API
2103 
2104 
2105 static status_t
2106 thread_block_timeout(timer* timer)
2107 {
2108 	// The timer has been installed with B_TIMER_ACQUIRE_THREAD_LOCK, so
2109 	// we're holding the thread lock already. This makes things comfortably
2110 	// easy.
2111 
2112 	struct thread* thread = (struct thread*)timer->user_data;
2113 	if (thread_unblock_locked(thread, B_TIMED_OUT))
2114 		return B_INVOKE_SCHEDULER;
2115 
2116 	return B_HANDLED_INTERRUPT;
2117 }
2118 
2119 
2120 status_t
2121 thread_block()
2122 {
2123 	InterruptsSpinLocker _(thread_spinlock);
2124 	return thread_block_locked(thread_get_current_thread());
2125 }
2126 
2127 
2128 bool
2129 thread_unblock(status_t threadID, status_t status)
2130 {
2131 	InterruptsSpinLocker _(thread_spinlock);
2132 
2133 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2134 	if (thread == NULL)
2135 		return false;
2136 	return thread_unblock_locked(thread, status);
2137 }
2138 
2139 
2140 status_t
2141 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2142 {
2143 	InterruptsSpinLocker _(thread_spinlock);
2144 	return thread_block_with_timeout_locked(timeoutFlags, timeout);
2145 }
2146 
2147 
2148 status_t
2149 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout)
2150 {
2151 	struct thread* thread = thread_get_current_thread();
2152 
2153 	if (thread->wait.status != 1)
2154 		return thread->wait.status;
2155 
2156 	bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT))
2157 		&& timeout != B_INFINITE_TIMEOUT;
2158 
2159 	if (useTimer) {
2160 		// Timer flags: absolute/relative + "acquire thread lock". The latter
2161 		// avoids nasty race conditions and deadlock problems that could
2162 		// otherwise occur between our cancel_timer() and a concurrently
2163 		// executing thread_block_timeout().
2164 		uint32 timerFlags;
2165 		if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2166 			timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2167 		} else {
2168 			timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2169 			if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2170 				timeout -= rtc_boot_time();
2171 		}
2172 		timerFlags |= B_TIMER_ACQUIRE_THREAD_LOCK;
2173 
2174 		// install the timer
2175 		thread->wait.unblock_timer.user_data = thread;
2176 		add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2177 			timerFlags);
2178 	}
2179 
2180 	// block
2181 	status_t error = thread_block_locked(thread);
2182 
2183 	// cancel timer, if it didn't fire
2184 	if (error != B_TIMED_OUT && useTimer)
2185 		cancel_timer(&thread->wait.unblock_timer);
2186 
2187 	return error;
2188 }
2189 
2190 
2191 /*!	Thread spinlock must be held.
2192 */
2193 static status_t
2194 user_unblock_thread(thread_id threadID, status_t status)
2195 {
2196 	struct thread* thread = thread_get_thread_struct_locked(threadID);
2197 	if (thread == NULL)
2198 		return B_BAD_THREAD_ID;
2199 	if (thread->user_thread == NULL)
2200 		return B_NOT_ALLOWED;
2201 
2202 	thread_unblock_locked(thread, status);
2203 
2204 	return B_OK;
2205 }
2206 
2207 
2208 //	#pragma mark - public kernel API
2209 
2210 
2211 void
2212 exit_thread(status_t returnValue)
2213 {
2214 	struct thread *thread = thread_get_current_thread();
2215 
2216 	thread->exit.status = returnValue;
2217 	thread->exit.reason = THREAD_RETURN_EXIT;
2218 
2219 	// if called from a kernel thread, we don't deliver the signal,
2220 	// we just exit directly to keep the user space behaviour of
2221 	// this function
2222 	if (thread->team != team_get_kernel_team())
2223 		send_signal_etc(thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2224 	else
2225 		thread_exit();
2226 }
2227 
2228 
2229 status_t
2230 kill_thread(thread_id id)
2231 {
2232 	if (id <= 0)
2233 		return B_BAD_VALUE;
2234 
2235 	return send_signal(id, SIGKILLTHR);
2236 }
2237 
2238 
2239 status_t
2240 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
2241 {
2242 	return send_data_etc(thread, code, buffer, bufferSize, 0);
2243 }
2244 
2245 
2246 int32
2247 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
2248 {
2249 	return receive_data_etc(sender, buffer, bufferSize, 0);
2250 }
2251 
2252 
2253 bool
2254 has_data(thread_id thread)
2255 {
2256 	int32 count;
2257 
2258 	if (get_sem_count(thread_get_current_thread()->msg.read_sem,
2259 			&count) != B_OK)
2260 		return false;
2261 
2262 	return count == 0 ? false : true;
2263 }
2264 
2265 
2266 status_t
2267 _get_thread_info(thread_id id, thread_info *info, size_t size)
2268 {
2269 	status_t status = B_OK;
2270 	struct thread *thread;
2271 	cpu_status state;
2272 
2273 	if (info == NULL || size != sizeof(thread_info) || id < B_OK)
2274 		return B_BAD_VALUE;
2275 
2276 	state = disable_interrupts();
2277 	GRAB_THREAD_LOCK();
2278 
2279 	thread = thread_get_thread_struct_locked(id);
2280 	if (thread == NULL) {
2281 		status = B_BAD_VALUE;
2282 		goto err;
2283 	}
2284 
2285 	fill_thread_info(thread, info, size);
2286 
2287 err:
2288 	RELEASE_THREAD_LOCK();
2289 	restore_interrupts(state);
2290 
2291 	return status;
2292 }
2293 
2294 
2295 status_t
2296 _get_next_thread_info(team_id team, int32 *_cookie, thread_info *info,
2297 	size_t size)
2298 {
2299 	status_t status = B_BAD_VALUE;
2300 	struct thread *thread = NULL;
2301 	cpu_status state;
2302 	int slot;
2303 	thread_id lastThreadID;
2304 
2305 	if (info == NULL || size != sizeof(thread_info) || team < B_OK)
2306 		return B_BAD_VALUE;
2307 
2308 	if (team == B_CURRENT_TEAM)
2309 		team = team_get_current_team_id();
2310 	else if (!team_is_valid(team))
2311 		return B_BAD_VALUE;
2312 
2313 	slot = *_cookie;
2314 
2315 	state = disable_interrupts();
2316 	GRAB_THREAD_LOCK();
2317 
2318 	lastThreadID = peek_next_thread_id();
2319 	if (slot >= lastThreadID)
2320 		goto err;
2321 
2322 	while (slot < lastThreadID
2323 		&& (!(thread = thread_get_thread_struct_locked(slot))
2324 			|| thread->team->id != team))
2325 		slot++;
2326 
2327 	if (thread != NULL && thread->team->id == team) {
2328 		fill_thread_info(thread, info, size);
2329 
2330 		*_cookie = slot + 1;
2331 		status = B_OK;
2332 	}
2333 
2334 err:
2335 	RELEASE_THREAD_LOCK();
2336 	restore_interrupts(state);
2337 
2338 	return status;
2339 }
2340 
2341 
2342 thread_id
2343 find_thread(const char *name)
2344 {
2345 	struct hash_iterator iterator;
2346 	struct thread *thread;
2347 	cpu_status state;
2348 
2349 	if (name == NULL)
2350 		return thread_get_current_thread_id();
2351 
2352 	state = disable_interrupts();
2353 	GRAB_THREAD_LOCK();
2354 
2355 	// ToDo: this might not be in the same order as find_thread() in BeOS
2356 	//		which could be theoretically problematic.
2357 	// ToDo: scanning the whole list with the thread lock held isn't exactly
2358 	//		cheap either - although this function is probably used very rarely.
2359 
2360 	hash_open(sThreadHash, &iterator);
2361 	while ((thread = (struct thread*)hash_next(sThreadHash, &iterator))
2362 			!= NULL) {
2363 		// Search through hash
2364 		if (thread->name != NULL && !strcmp(thread->name, name)) {
2365 			thread_id id = thread->id;
2366 
2367 			RELEASE_THREAD_LOCK();
2368 			restore_interrupts(state);
2369 			return id;
2370 		}
2371 	}
2372 
2373 	RELEASE_THREAD_LOCK();
2374 	restore_interrupts(state);
2375 
2376 	return B_NAME_NOT_FOUND;
2377 }
2378 
2379 
2380 status_t
2381 rename_thread(thread_id id, const char *name)
2382 {
2383 	struct thread *thread = thread_get_current_thread();
2384 	status_t status = B_BAD_THREAD_ID;
2385 	cpu_status state;
2386 
2387 	if (name == NULL)
2388 		return B_BAD_VALUE;
2389 
2390 	state = disable_interrupts();
2391 	GRAB_THREAD_LOCK();
2392 
2393 	if (thread->id != id)
2394 		thread = thread_get_thread_struct_locked(id);
2395 
2396 	if (thread != NULL) {
2397 		if (thread->team == thread_get_current_thread()->team) {
2398 			strlcpy(thread->name, name, B_OS_NAME_LENGTH);
2399 			status = B_OK;
2400 		} else
2401 			status = B_NOT_ALLOWED;
2402 	}
2403 
2404 	RELEASE_THREAD_LOCK();
2405 	restore_interrupts(state);
2406 
2407 	return status;
2408 }
2409 
2410 
2411 status_t
2412 set_thread_priority(thread_id id, int32 priority)
2413 {
2414 	struct thread *thread;
2415 	int32 oldPriority;
2416 
2417 	// make sure the passed in priority is within bounds
2418 	if (priority > B_MAX_PRIORITY)
2419 		priority = B_MAX_PRIORITY;
2420 	if (priority < B_MIN_PRIORITY)
2421 		priority = B_MIN_PRIORITY;
2422 
2423 	thread = thread_get_current_thread();
2424 	if (thread->id == id) {
2425 		// it's ourself, so we know we aren't in the run queue, and we can manipulate
2426 		// our structure directly
2427 		oldPriority = thread->priority;
2428 			// note that this might not return the correct value if we are preempted
2429 			// here, and another thread changes our priority before the next line is
2430 			// executed
2431 		thread->priority = thread->next_priority = priority;
2432 	} else {
2433 		cpu_status state = disable_interrupts();
2434 		GRAB_THREAD_LOCK();
2435 
2436 		thread = thread_get_thread_struct_locked(id);
2437 		if (thread) {
2438 			oldPriority = thread->priority;
2439 			thread->next_priority = priority;
2440 			if (thread->state == B_THREAD_READY && thread->priority != priority) {
2441 				// if the thread is in the run queue, we reinsert it at a new position
2442 				scheduler_remove_from_run_queue(thread);
2443 				thread->priority = priority;
2444 				scheduler_enqueue_in_run_queue(thread);
2445 			} else
2446 				thread->priority = priority;
2447 		} else
2448 			oldPriority = B_BAD_THREAD_ID;
2449 
2450 		RELEASE_THREAD_LOCK();
2451 		restore_interrupts(state);
2452 	}
2453 
2454 	return oldPriority;
2455 }
2456 
2457 
2458 status_t
2459 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2460 {
2461 	status_t status;
2462 
2463 	if (timebase != B_SYSTEM_TIMEBASE)
2464 		return B_BAD_VALUE;
2465 
2466 	InterruptsSpinLocker _(thread_spinlock);
2467 	struct thread* thread = thread_get_current_thread();
2468 
2469 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, NULL);
2470 	status = thread_block_with_timeout_locked(flags, timeout);
2471 
2472 	if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
2473 		return B_OK;
2474 
2475 	return status;
2476 }
2477 
2478 
2479 /*!	snooze() for internal kernel use only; doesn't interrupt on signals. */
2480 status_t
2481 snooze(bigtime_t timeout)
2482 {
2483 	return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
2484 }
2485 
2486 
2487 /*!
2488 	snooze_until() for internal kernel use only; doesn't interrupt on
2489 	signals.
2490 */
2491 status_t
2492 snooze_until(bigtime_t timeout, int timebase)
2493 {
2494 	return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
2495 }
2496 
2497 
2498 status_t
2499 wait_for_thread(thread_id thread, status_t *_returnCode)
2500 {
2501 	return wait_for_thread_etc(thread, 0, 0, _returnCode);
2502 }
2503 
2504 
2505 status_t
2506 suspend_thread(thread_id id)
2507 {
2508 	if (id <= 0)
2509 		return B_BAD_VALUE;
2510 
2511 	return send_signal(id, SIGSTOP);
2512 }
2513 
2514 
2515 status_t
2516 resume_thread(thread_id id)
2517 {
2518 	if (id <= 0)
2519 		return B_BAD_VALUE;
2520 
2521 	return send_signal_etc(id, SIGCONT, SIGNAL_FLAG_DONT_RESTART_SYSCALL);
2522 		// This retains compatibility to BeOS which documents the
2523 		// combination of suspend_thread() and resume_thread() to
2524 		// interrupt threads waiting on semaphores.
2525 }
2526 
2527 
2528 thread_id
2529 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
2530 	void *arg)
2531 {
2532 	thread_creation_attributes attributes;
2533 	attributes.entry = (thread_entry_func)function;
2534 	attributes.name = name;
2535 	attributes.priority = priority;
2536 	attributes.args1 = arg;
2537 	attributes.args2 = NULL;
2538 	attributes.stack_address = NULL;
2539 	attributes.stack_size = 0;
2540 	attributes.team = team_get_kernel_team()->id;
2541 	attributes.thread = -1;
2542 
2543 	return create_thread(attributes, true);
2544 }
2545 
2546 
2547 int
2548 getrlimit(int resource, struct rlimit * rlp)
2549 {
2550 	status_t error = common_getrlimit(resource, rlp);
2551 	if (error != B_OK) {
2552 		errno = error;
2553 		return -1;
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 
2560 int
2561 setrlimit(int resource, const struct rlimit * rlp)
2562 {
2563 	status_t error = common_setrlimit(resource, rlp);
2564 	if (error != B_OK) {
2565 		errno = error;
2566 		return -1;
2567 	}
2568 
2569 	return 0;
2570 }
2571 
2572 
2573 //	#pragma mark - syscalls
2574 
2575 
2576 void
2577 _user_exit_thread(status_t returnValue)
2578 {
2579 	exit_thread(returnValue);
2580 }
2581 
2582 
2583 status_t
2584 _user_kill_thread(thread_id thread)
2585 {
2586 	return kill_thread(thread);
2587 }
2588 
2589 
2590 status_t
2591 _user_resume_thread(thread_id thread)
2592 {
2593 	return resume_thread(thread);
2594 }
2595 
2596 
2597 status_t
2598 _user_suspend_thread(thread_id thread)
2599 {
2600 	return suspend_thread(thread);
2601 }
2602 
2603 
2604 status_t
2605 _user_rename_thread(thread_id thread, const char *userName)
2606 {
2607 	char name[B_OS_NAME_LENGTH];
2608 
2609 	if (!IS_USER_ADDRESS(userName)
2610 		|| userName == NULL
2611 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
2612 		return B_BAD_ADDRESS;
2613 
2614 	return rename_thread(thread, name);
2615 }
2616 
2617 
2618 int32
2619 _user_set_thread_priority(thread_id thread, int32 newPriority)
2620 {
2621 	return set_thread_priority(thread, newPriority);
2622 }
2623 
2624 
2625 thread_id
2626 _user_spawn_thread(thread_creation_attributes* userAttributes)
2627 {
2628 	thread_creation_attributes attributes;
2629 	if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
2630 		|| user_memcpy(&attributes, userAttributes,
2631 				sizeof(attributes)) != B_OK) {
2632 		return B_BAD_ADDRESS;
2633 	}
2634 
2635 	if (attributes.stack_size != 0
2636 		&& (attributes.stack_size < MIN_USER_STACK_SIZE
2637 			|| attributes.stack_size > MAX_USER_STACK_SIZE)) {
2638 		return B_BAD_VALUE;
2639 	}
2640 
2641 	char name[B_OS_NAME_LENGTH];
2642 	thread_id threadID;
2643 
2644 	if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
2645 		|| attributes.stack_address != NULL
2646 			&& !IS_USER_ADDRESS(attributes.stack_address)
2647 		|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
2648 			|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
2649 		return B_BAD_ADDRESS;
2650 
2651 	attributes.name = attributes.name != NULL ? name : "user thread";
2652 	attributes.team = thread_get_current_thread()->team->id;
2653 	attributes.thread = -1;
2654 
2655 	threadID = create_thread(attributes, false);
2656 
2657 	if (threadID >= 0)
2658 		user_debug_thread_created(threadID);
2659 
2660 	return threadID;
2661 }
2662 
2663 
2664 status_t
2665 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
2666 {
2667 	// NOTE: We only know the system timebase at the moment.
2668 	syscall_restart_handle_timeout_pre(flags, timeout);
2669 
2670 	status_t error = snooze_etc(timeout, timebase, flags | B_CAN_INTERRUPT);
2671 
2672 	return syscall_restart_handle_timeout_post(error, timeout);
2673 }
2674 
2675 
2676 void
2677 _user_thread_yield(void)
2678 {
2679 	thread_yield(true);
2680 }
2681 
2682 
2683 status_t
2684 _user_get_thread_info(thread_id id, thread_info *userInfo)
2685 {
2686 	thread_info info;
2687 	status_t status;
2688 
2689 	if (!IS_USER_ADDRESS(userInfo))
2690 		return B_BAD_ADDRESS;
2691 
2692 	status = _get_thread_info(id, &info, sizeof(thread_info));
2693 
2694 	if (status >= B_OK
2695 		&& user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2696 		return B_BAD_ADDRESS;
2697 
2698 	return status;
2699 }
2700 
2701 
2702 status_t
2703 _user_get_next_thread_info(team_id team, int32 *userCookie,
2704 	thread_info *userInfo)
2705 {
2706 	status_t status;
2707 	thread_info info;
2708 	int32 cookie;
2709 
2710 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
2711 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
2712 		return B_BAD_ADDRESS;
2713 
2714 	status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
2715 	if (status < B_OK)
2716 		return status;
2717 
2718 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
2719 		|| user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
2720 		return B_BAD_ADDRESS;
2721 
2722 	return status;
2723 }
2724 
2725 
2726 thread_id
2727 _user_find_thread(const char *userName)
2728 {
2729 	char name[B_OS_NAME_LENGTH];
2730 
2731 	if (userName == NULL)
2732 		return find_thread(NULL);
2733 
2734 	if (!IS_USER_ADDRESS(userName)
2735 		|| user_strlcpy(name, userName, sizeof(name)) < B_OK)
2736 		return B_BAD_ADDRESS;
2737 
2738 	return find_thread(name);
2739 }
2740 
2741 
2742 status_t
2743 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
2744 {
2745 	status_t returnCode;
2746 	status_t status;
2747 
2748 	if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
2749 		return B_BAD_ADDRESS;
2750 
2751 	status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
2752 
2753 	if (status == B_OK && userReturnCode != NULL
2754 		&& user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
2755 		return B_BAD_ADDRESS;
2756 	}
2757 
2758 	return syscall_restart_handle_post(status);
2759 }
2760 
2761 
2762 bool
2763 _user_has_data(thread_id thread)
2764 {
2765 	return has_data(thread);
2766 }
2767 
2768 
2769 status_t
2770 _user_send_data(thread_id thread, int32 code, const void *buffer,
2771 	size_t bufferSize)
2772 {
2773 	if (!IS_USER_ADDRESS(buffer))
2774 		return B_BAD_ADDRESS;
2775 
2776 	return send_data_etc(thread, code, buffer, bufferSize,
2777 		B_KILL_CAN_INTERRUPT);
2778 		// supports userland buffers
2779 }
2780 
2781 
2782 status_t
2783 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
2784 {
2785 	thread_id sender;
2786 	status_t code;
2787 
2788 	if (!IS_USER_ADDRESS(_userSender)
2789 		|| !IS_USER_ADDRESS(buffer))
2790 		return B_BAD_ADDRESS;
2791 
2792 	code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
2793 		// supports userland buffers
2794 
2795 	if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
2796 		return B_BAD_ADDRESS;
2797 
2798 	return code;
2799 }
2800 
2801 
2802 status_t
2803 _user_block_thread(uint32 flags, bigtime_t timeout)
2804 {
2805 	syscall_restart_handle_timeout_pre(flags, timeout);
2806 	flags |= B_CAN_INTERRUPT;
2807 
2808 	struct thread* thread = thread_get_current_thread();
2809 
2810 	InterruptsSpinLocker locker(thread_spinlock);
2811 
2812 	// check, if already done
2813 	if (thread->user_thread->wait_status <= 0)
2814 		return thread->user_thread->wait_status;
2815 
2816 	// nope, so wait
2817 	thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user");
2818 	status_t status = thread_block_with_timeout_locked(flags, timeout);
2819 	thread->user_thread->wait_status = status;
2820 
2821 	return syscall_restart_handle_timeout_post(status, timeout);
2822 }
2823 
2824 
2825 status_t
2826 _user_unblock_thread(thread_id threadID, status_t status)
2827 {
2828 	InterruptsSpinLocker locker(thread_spinlock);
2829 	return user_unblock_thread(threadID, status);
2830 }
2831 
2832 
2833 status_t
2834 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
2835 {
2836 	enum {
2837 		MAX_USER_THREADS_TO_UNBLOCK	= 128
2838 	};
2839 
2840 	if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
2841 		return B_BAD_ADDRESS;
2842 	if (count > MAX_USER_THREADS_TO_UNBLOCK)
2843 		return B_BAD_VALUE;
2844 
2845 	thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
2846 	if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
2847 		return B_BAD_ADDRESS;
2848 
2849 	for (uint32 i = 0; i < count; i++)
2850 		user_unblock_thread(threads[i], status);
2851 
2852 	return B_OK;
2853 }
2854 
2855 
2856 // TODO: the following two functions don't belong here
2857 
2858 
2859 int
2860 _user_getrlimit(int resource, struct rlimit *urlp)
2861 {
2862 	struct rlimit rl;
2863 	int ret;
2864 
2865 	if (urlp == NULL)
2866 		return EINVAL;
2867 
2868 	if (!IS_USER_ADDRESS(urlp))
2869 		return B_BAD_ADDRESS;
2870 
2871 	ret = common_getrlimit(resource, &rl);
2872 
2873 	if (ret == 0) {
2874 		ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2875 		if (ret < 0)
2876 			return ret;
2877 
2878 		return 0;
2879 	}
2880 
2881 	return ret;
2882 }
2883 
2884 
2885 int
2886 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
2887 {
2888 	struct rlimit resourceLimit;
2889 
2890 	if (userResourceLimit == NULL)
2891 		return EINVAL;
2892 
2893 	if (!IS_USER_ADDRESS(userResourceLimit)
2894 		|| user_memcpy(&resourceLimit, userResourceLimit,
2895 			sizeof(struct rlimit)) < B_OK)
2896 		return B_BAD_ADDRESS;
2897 
2898 	return common_setrlimit(resource, &resourceLimit);
2899 }
2900