xref: /haiku/src/system/kernel/team.cpp (revision e9c4d47ad719d6fd67cd9b75b41ebbec563e7a79)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*!	Team functions */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/wait.h>
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <FindDirectory.h>
21 
22 #include <boot_device.h>
23 #include <elf.h>
24 #include <file_cache.h>
25 #include <fs/KPath.h>
26 #include <heap.h>
27 #include <int.h>
28 #include <kernel.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <port.h>
33 #include <posix/realtime_sem.h>
34 #include <posix/xsi_semaphore.h>
35 #include <sem.h>
36 #include <syscall_process_info.h>
37 #include <syscall_restart.h>
38 #include <syscalls.h>
39 #include <team.h>
40 #include <tls.h>
41 #include <tracing.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <usergroup.h>
45 #include <vfs.h>
46 #include <vm.h>
47 #include <vm_address_space.h>
48 #include <util/AutoLock.h>
49 #include <util/khash.h>
50 
51 //#define TRACE_TEAM
52 #ifdef TRACE_TEAM
53 #	define TRACE(x) dprintf x
54 #else
55 #	define TRACE(x) ;
56 #endif
57 
58 
59 struct team_key {
60 	team_id id;
61 };
62 
63 struct team_arg {
64 	char	*path;
65 	char	**flat_args;
66 	size_t	flat_args_size;
67 	uint32	arg_count;
68 	uint32	env_count;
69 	port_id	error_port;
70 	uint32	error_token;
71 };
72 
73 struct fork_arg {
74 	area_id		user_stack_area;
75 	addr_t		user_stack_base;
76 	size_t		user_stack_size;
77 	addr_t		user_local_storage;
78 	sigset_t	sig_block_mask;
79 	struct user_thread* user_thread;
80 
81 	struct arch_fork_arg arch_info;
82 };
83 
84 
85 static hash_table *sTeamHash = NULL;
86 static hash_table *sGroupHash = NULL;
87 static struct team *sKernelTeam = NULL;
88 
89 // some arbitrary chosen limits - should probably depend on the available
90 // memory (the limit is not yet enforced)
91 static int32 sMaxTeams = 2048;
92 static int32 sUsedTeams = 1;
93 
94 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
95 
96 
97 // #pragma mark - Tracing
98 
99 
100 #if TEAM_TRACING
101 namespace TeamTracing {
102 
103 class TeamForked : public AbstractTraceEntry {
104 public:
105 	TeamForked(thread_id forkedThread)
106 		:
107 		fForkedThread(forkedThread)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("team forked, new thread %ld", fForkedThread);
115 	}
116 
117 private:
118 	thread_id			fForkedThread;
119 };
120 
121 
122 class ExecTeam : public AbstractTraceEntry {
123 public:
124 	ExecTeam(const char* path, int32 argCount, const char* const* args,
125 			int32 envCount, const char* const* env)
126 		:
127 		fArgCount(argCount),
128 		fArgs(NULL)
129 	{
130 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
131 			false);
132 
133 		// determine the buffer size we need for the args
134 		size_t argBufferSize = 0;
135 		for (int32 i = 0; i < argCount; i++)
136 			argBufferSize += strlen(args[i]) + 1;
137 
138 		// allocate a buffer
139 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
140 		if (fArgs) {
141 			char* buffer = fArgs;
142 			for (int32 i = 0; i < argCount; i++) {
143 				size_t argSize = strlen(args[i]) + 1;
144 				memcpy(buffer, args[i], argSize);
145 				buffer += argSize;
146 			}
147 		}
148 
149 		// ignore env for the time being
150 		(void)envCount;
151 		(void)env;
152 
153 		Initialized();
154 	}
155 
156 	virtual void AddDump(TraceOutput& out)
157 	{
158 		out.Print("team exec, \"%p\", args:", fPath);
159 
160 		if (fArgs != NULL) {
161 			char* args = fArgs;
162 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
163 				out.Print(" \"%s\"", args);
164 				args += strlen(args) + 1;
165 			}
166 		} else
167 			out.Print(" <too long>");
168 	}
169 
170 private:
171 	char*	fPath;
172 	int32	fArgCount;
173 	char*	fArgs;
174 };
175 
176 
177 static const char*
178 job_control_state_name(job_control_state state)
179 {
180 	switch (state) {
181 		case JOB_CONTROL_STATE_NONE:
182 			return "none";
183 		case JOB_CONTROL_STATE_STOPPED:
184 			return "stopped";
185 		case JOB_CONTROL_STATE_CONTINUED:
186 			return "continued";
187 		case JOB_CONTROL_STATE_DEAD:
188 			return "dead";
189 		default:
190 			return "invalid";
191 	}
192 }
193 
194 
195 class SetJobControlState : public AbstractTraceEntry {
196 public:
197 	SetJobControlState(team_id team, job_control_state newState, int signal)
198 		:
199 		fTeam(team),
200 		fNewState(newState),
201 		fSignal(signal)
202 	{
203 		Initialized();
204 	}
205 
206 	virtual void AddDump(TraceOutput& out)
207 	{
208 		out.Print("team set job control state, team %ld, "
209 			"new state: %s, signal: %d",
210 			fTeam, job_control_state_name(fNewState), fSignal);
211 	}
212 
213 private:
214 	team_id				fTeam;
215 	job_control_state	fNewState;
216 	int					fSignal;
217 };
218 
219 
220 class WaitForChild : public AbstractTraceEntry {
221 public:
222 	WaitForChild(pid_t child, uint32 flags)
223 		:
224 		fChild(child),
225 		fFlags(flags)
226 	{
227 		Initialized();
228 	}
229 
230 	virtual void AddDump(TraceOutput& out)
231 	{
232 		out.Print("team wait for child, child: %ld, "
233 			"flags: 0x%lx", fChild, fFlags);
234 	}
235 
236 private:
237 	pid_t	fChild;
238 	uint32	fFlags;
239 };
240 
241 
242 class WaitForChildDone : public AbstractTraceEntry {
243 public:
244 	WaitForChildDone(const job_control_entry& entry)
245 		:
246 		fState(entry.state),
247 		fTeam(entry.thread),
248 		fStatus(entry.status),
249 		fReason(entry.reason),
250 		fSignal(entry.signal)
251 	{
252 		Initialized();
253 	}
254 
255 	WaitForChildDone(status_t error)
256 		:
257 		fTeam(error)
258 	{
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		if (fTeam >= 0) {
265 			out.Print("team wait for child done, team: %ld, "
266 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
267 				fTeam, job_control_state_name(fState), fStatus, fReason,
268 				fSignal);
269 		} else {
270 			out.Print("team wait for child failed, error: "
271 				"0x%lx, ", fTeam);
272 		}
273 	}
274 
275 private:
276 	job_control_state	fState;
277 	team_id				fTeam;
278 	status_t			fStatus;
279 	uint16				fReason;
280 	uint16				fSignal;
281 };
282 
283 }	// namespace TeamTracing
284 
285 #	define T(x) new(std::nothrow) TeamTracing::x;
286 #else
287 #	define T(x) ;
288 #endif
289 
290 
291 
292 //	#pragma mark - Private functions
293 
294 
295 static void
296 _dump_team_info(struct team *team)
297 {
298 	kprintf("TEAM: %p\n", team);
299 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
300 	kprintf("name:        '%s'\n", team->name);
301 	kprintf("args:        '%s'\n", team->args);
302 	kprintf("next:        %p\n", team->next);
303 	kprintf("parent:      %p", team->parent);
304 	if (team->parent != NULL) {
305 		kprintf(" (id = %ld)\n", team->parent->id);
306 	} else
307 		kprintf("\n");
308 
309 	kprintf("children:    %p\n", team->children);
310 	kprintf("num_threads: %d\n", team->num_threads);
311 	kprintf("state:       %d\n", team->state);
312 	kprintf("flags:       0x%lx\n", team->flags);
313 	kprintf("io_context:  %p\n", team->io_context);
314 	if (team->address_space)
315 		kprintf("address_space: %p\n", team->address_space);
316 	kprintf("main_thread: %p\n", team->main_thread);
317 	kprintf("thread_list: %p\n", team->thread_list);
318 	kprintf("group_id:    %ld\n", team->group_id);
319 	kprintf("session_id:  %ld\n", team->session_id);
320 }
321 
322 
323 static int
324 dump_team_info(int argc, char **argv)
325 {
326 	struct hash_iterator iterator;
327 	struct team *team;
328 	team_id id = -1;
329 	bool found = false;
330 
331 	if (argc < 2) {
332 		struct thread* thread = thread_get_current_thread();
333 		if (thread != NULL && thread->team != NULL)
334 			_dump_team_info(thread->team);
335 		else
336 			kprintf("No current team!\n");
337 		return 0;
338 	}
339 
340 	id = strtoul(argv[1], NULL, 0);
341 	if (IS_KERNEL_ADDRESS(id)) {
342 		// semi-hack
343 		_dump_team_info((struct team *)id);
344 		return 0;
345 	}
346 
347 	// walk through the thread list, trying to match name or id
348 	hash_open(sTeamHash, &iterator);
349 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
350 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
351 			_dump_team_info(team);
352 			found = true;
353 			break;
354 		}
355 	}
356 	hash_close(sTeamHash, &iterator, false);
357 
358 	if (!found)
359 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
360 	return 0;
361 }
362 
363 
364 static int
365 dump_teams(int argc, char **argv)
366 {
367 	struct hash_iterator iterator;
368 	struct team *team;
369 
370 	kprintf("team           id  parent      name\n");
371 	hash_open(sTeamHash, &iterator);
372 
373 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
374 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
375 	}
376 
377 	hash_close(sTeamHash, &iterator, false);
378 	return 0;
379 }
380 
381 
382 static int
383 team_struct_compare(void *_p, const void *_key)
384 {
385 	struct team *p = (struct team*)_p;
386 	const struct team_key *key = (const struct team_key*)_key;
387 
388 	if (p->id == key->id)
389 		return 0;
390 
391 	return 1;
392 }
393 
394 
395 static uint32
396 team_struct_hash(void *_p, const void *_key, uint32 range)
397 {
398 	struct team *p = (struct team*)_p;
399 	const struct team_key *key = (const struct team_key*)_key;
400 
401 	if (p != NULL)
402 		return p->id % range;
403 
404 	return (uint32)key->id % range;
405 }
406 
407 
408 static int
409 process_group_compare(void *_group, const void *_key)
410 {
411 	struct process_group *group = (struct process_group*)_group;
412 	const struct team_key *key = (const struct team_key*)_key;
413 
414 	if (group->id == key->id)
415 		return 0;
416 
417 	return 1;
418 }
419 
420 
421 static uint32
422 process_group_hash(void *_group, const void *_key, uint32 range)
423 {
424 	struct process_group *group = (struct process_group*)_group;
425 	const struct team_key *key = (const struct team_key*)_key;
426 
427 	if (group != NULL)
428 		return group->id % range;
429 
430 	return (uint32)key->id % range;
431 }
432 
433 
434 static void
435 insert_team_into_parent(struct team *parent, struct team *team)
436 {
437 	ASSERT(parent != NULL);
438 
439 	team->siblings_next = parent->children;
440 	parent->children = team;
441 	team->parent = parent;
442 }
443 
444 
445 /*!	Note: must have team lock held */
446 static void
447 remove_team_from_parent(struct team *parent, struct team *team)
448 {
449 	struct team *child, *last = NULL;
450 
451 	for (child = parent->children; child != NULL; child = child->siblings_next) {
452 		if (child == team) {
453 			if (last == NULL)
454 				parent->children = child->siblings_next;
455 			else
456 				last->siblings_next = child->siblings_next;
457 
458 			team->parent = NULL;
459 			break;
460 		}
461 		last = child;
462 	}
463 }
464 
465 
466 /*!	Reparent each of our children
467 	Note: must have team lock held
468 */
469 static void
470 reparent_children(struct team *team)
471 {
472 	struct team *child;
473 
474 	while ((child = team->children) != NULL) {
475 		// remove the child from the current proc and add to the parent
476 		remove_team_from_parent(team, child);
477 		insert_team_into_parent(sKernelTeam, child);
478 	}
479 
480 	// move job control entries too
481 	sKernelTeam->stopped_children->entries.MoveFrom(
482 		&team->stopped_children->entries);
483 	sKernelTeam->continued_children->entries.MoveFrom(
484 		&team->continued_children->entries);
485 
486 	// Note, we don't move the dead children entries. Those will be deleted
487 	// when the team structure is deleted.
488 }
489 
490 
491 static bool
492 is_session_leader(struct team *team)
493 {
494 	return team->session_id == team->id;
495 }
496 
497 
498 static bool
499 is_process_group_leader(struct team *team)
500 {
501 	return team->group_id == team->id;
502 }
503 
504 
505 static void
506 deferred_delete_process_group(struct process_group *group)
507 {
508 	if (group == NULL)
509 		return;
510 
511 	// remove_group_from_session() keeps this pointer around
512 	// only if the session can be freed as well
513 	if (group->session) {
514 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
515 			group->session->id));
516 		deferred_free(group->session);
517 	}
518 
519 	deferred_free(group);
520 }
521 
522 
523 /*!	Removes a group from a session, and puts the session object
524 	back into the session cache, if it's not used anymore.
525 	You must hold the team lock when calling this function.
526 */
527 static void
528 remove_group_from_session(struct process_group *group)
529 {
530 	struct process_session *session = group->session;
531 
532 	// the group must be in any session to let this function have any effect
533 	if (session == NULL)
534 		return;
535 
536 	hash_remove(sGroupHash, group);
537 
538 	// we cannot free the resource here, so we're keeping the group link
539 	// around - this way it'll be freed by free_process_group()
540 	if (--session->group_count > 0)
541 		group->session = NULL;
542 }
543 
544 
545 /*!	Team lock must be held.
546 */
547 static void
548 acquire_process_group_ref(pid_t groupID)
549 {
550 	process_group* group = team_get_process_group_locked(NULL, groupID);
551 	if (group == NULL) {
552 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
553 		return;
554 	}
555 
556 	group->refs++;
557 }
558 
559 
560 /*!	Team lock must be held.
561 */
562 static void
563 release_process_group_ref(pid_t groupID)
564 {
565 	process_group* group = team_get_process_group_locked(NULL, groupID);
566 	if (group == NULL) {
567 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
568 		return;
569 	}
570 
571 	if (group->refs <= 0) {
572 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
573 		return;
574 	}
575 
576 	if (--group->refs > 0)
577 		return;
578 
579 	// group is no longer used
580 
581 	remove_group_from_session(group);
582 	deferred_delete_process_group(group);
583 }
584 
585 
586 /*!	You must hold the team lock when calling this function. */
587 static void
588 insert_group_into_session(struct process_session *session, struct process_group *group)
589 {
590 	if (group == NULL)
591 		return;
592 
593 	group->session = session;
594 	hash_insert(sGroupHash, group);
595 	session->group_count++;
596 }
597 
598 
599 /*!	You must hold the team lock when calling this function. */
600 static void
601 insert_team_into_group(struct process_group *group, struct team *team)
602 {
603 	team->group = group;
604 	team->group_id = group->id;
605 	team->session_id = group->session->id;
606 
607 	team->group_next = group->teams;
608 	group->teams = team;
609 	acquire_process_group_ref(group->id);
610 }
611 
612 
613 /*!	Removes the team from the group.
614 
615 	\param team the team that'll be removed from it's group
616 */
617 static void
618 remove_team_from_group(struct team *team)
619 {
620 	struct process_group *group = team->group;
621 	struct team *current, *last = NULL;
622 
623 	// the team must be in any team to let this function have any effect
624 	if  (group == NULL)
625 		return;
626 
627 	for (current = group->teams; current != NULL; current = current->group_next) {
628 		if (current == team) {
629 			if (last == NULL)
630 				group->teams = current->group_next;
631 			else
632 				last->group_next = current->group_next;
633 
634 			team->group = NULL;
635 			break;
636 		}
637 		last = current;
638 	}
639 
640 	team->group = NULL;
641 	team->group_next = NULL;
642 
643 	release_process_group_ref(group->id);
644 }
645 
646 
647 static struct process_group *
648 create_process_group(pid_t id)
649 {
650 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
651 	if (group == NULL)
652 		return NULL;
653 
654 	group->id = id;
655 	group->refs = 0;
656 	group->session = NULL;
657 	group->teams = NULL;
658 	group->orphaned = true;
659 	return group;
660 }
661 
662 
663 static struct process_session *
664 create_process_session(pid_t id)
665 {
666 	struct process_session *session
667 		= (struct process_session *)malloc(sizeof(struct process_session));
668 	if (session == NULL)
669 		return NULL;
670 
671 	session->id = id;
672 	session->group_count = 0;
673 	session->controlling_tty = -1;
674 	session->foreground_group = -1;
675 
676 	return session;
677 }
678 
679 
680 static void
681 set_team_name(struct team* team, const char* name)
682 {
683 	if (const char* lastSlash = strrchr(name, '/'))
684 		name = lastSlash + 1;
685 
686 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
687 }
688 
689 
690 static struct team *
691 create_team_struct(const char *name, bool kernel)
692 {
693 	struct team *team = (struct team *)malloc(sizeof(struct team));
694 	if (team == NULL)
695 		return NULL;
696 	MemoryDeleter teamDeleter(team);
697 
698 	team->next = team->siblings_next = team->children = team->parent = NULL;
699 	team->id = allocate_thread_id();
700 	set_team_name(team, name);
701 	team->args[0] = '\0';
702 	team->num_threads = 0;
703 	team->io_context = NULL;
704 	team->address_space = NULL;
705 	team->realtime_sem_context = NULL;
706 	team->xsi_sem_context = NULL;
707 	team->thread_list = NULL;
708 	team->main_thread = NULL;
709 	team->loading_info = NULL;
710 	team->state = TEAM_STATE_BIRTH;
711 	team->flags = 0;
712 	team->death_sem = -1;
713 	team->user_data_area = -1;
714 	team->user_data = 0;
715 	team->used_user_data = 0;
716 	team->user_data_size = 0;
717 	team->free_user_threads = NULL;
718 
719 	team->supplementary_groups = NULL;
720 	team->supplementary_group_count = 0;
721 
722 	team->dead_threads_kernel_time = 0;
723 	team->dead_threads_user_time = 0;
724 
725 	// dead threads
726 	list_init(&team->dead_threads);
727 	team->dead_threads_count = 0;
728 
729 	// dead children
730 	team->dead_children = new(nothrow) team_dead_children;
731 	if (team->dead_children == NULL)
732 		return NULL;
733 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
734 
735 	team->dead_children->count = 0;
736 	team->dead_children->kernel_time = 0;
737 	team->dead_children->user_time = 0;
738 
739 	// stopped children
740 	team->stopped_children = new(nothrow) team_job_control_children;
741 	if (team->stopped_children == NULL)
742 		return NULL;
743 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
744 		team->stopped_children);
745 
746 	// continued children
747 	team->continued_children = new(nothrow) team_job_control_children;
748 	if (team->continued_children == NULL)
749 		return NULL;
750 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
751 		team->continued_children);
752 
753 	// job control entry
754 	team->job_control_entry = new(nothrow) job_control_entry;
755 	if (team->job_control_entry == NULL)
756 		return NULL;
757 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
758 		team->job_control_entry);
759 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
760 	team->job_control_entry->thread = team->id;
761 	team->job_control_entry->team = team;
762 
763 	list_init(&team->image_list);
764 	list_init(&team->watcher_list);
765 
766 	clear_team_debug_info(&team->debug_info, true);
767 
768 	if (arch_team_init_team_struct(team, kernel) < 0)
769 		return NULL;
770 
771 	// publish dead/stopped/continued children condition vars
772 	team->dead_children->condition_variable.Init(team->dead_children,
773 		"team children");
774 
775 	// keep all allocated structures
776 	jobControlEntryDeleter.Detach();
777 	continuedChildrenDeleter.Detach();
778 	stoppedChildrenDeleter.Detach();
779 	deadChildrenDeleter.Detach();
780 	teamDeleter.Detach();
781 
782 	return team;
783 }
784 
785 
786 static void
787 delete_team_struct(struct team *team)
788 {
789 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
790 			&team->dead_threads)) {
791 		free(threadDeathEntry);
792 	}
793 
794 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
795 		delete entry;
796 
797 	while (free_user_thread* entry = team->free_user_threads) {
798 		team->free_user_threads = entry->next;
799 		free(entry);
800 	}
801 
802 	malloc_referenced_release(team->supplementary_groups);
803 
804 	delete team->job_control_entry;
805 		// usually already NULL and transferred to the parent
806 	delete team->continued_children;
807 	delete team->stopped_children;
808 	delete team->dead_children;
809 	free(team);
810 }
811 
812 
813 static status_t
814 create_team_user_data(struct team* team)
815 {
816 	void* address = (void*)KERNEL_USER_DATA_BASE;
817 	size_t size = 4 * B_PAGE_SIZE;
818 	team->user_data_area = create_area_etc(team->id, "user area", &address,
819 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0);
820 	if (team->user_data_area < 0)
821 		return team->user_data_area;
822 
823 	team->user_data = (addr_t)address;
824 	team->used_user_data = 0;
825 	team->user_data_size = size;
826 	team->free_user_threads = NULL;
827 
828 	return B_OK;
829 }
830 
831 
832 static void
833 delete_team_user_data(struct team* team)
834 {
835 	if (team->user_data_area >= 0) {
836 		vm_delete_area(team->id, team->user_data_area, true);
837 		team->user_data = 0;
838 		team->used_user_data = 0;
839 		team->user_data_size = 0;
840 		team->user_data_area = -1;
841 		while (free_user_thread* entry = team->free_user_threads) {
842 			team->free_user_threads = entry->next;
843 			free(entry);
844 		}
845 	}
846 }
847 
848 
849 static status_t
850 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
851 	int32 argCount, int32 envCount, char**& _flatArgs)
852 {
853 	if (argCount < 0 || envCount < 0)
854 		return B_BAD_VALUE;
855 
856 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
857 		return B_TOO_MANY_ARGS;
858 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
859 		return B_BAD_VALUE;
860 
861 	if (!IS_USER_ADDRESS(userFlatArgs))
862 		return B_BAD_ADDRESS;
863 
864 	// allocate kernel memory
865 	char** flatArgs = (char**)malloc(flatArgsSize);
866 	if (flatArgs == NULL)
867 		return B_NO_MEMORY;
868 
869 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
870 		free(flatArgs);
871 		return B_BAD_ADDRESS;
872 	}
873 
874 	// check and relocate the array
875 	status_t error = B_OK;
876 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
877 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
878 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
879 		if (i == argCount || i == argCount + envCount + 1) {
880 			// check array null termination
881 			if (flatArgs[i] != NULL) {
882 				error = B_BAD_VALUE;
883 				break;
884 			}
885 		} else {
886 			// check string
887 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
888 			size_t maxLen = stringEnd - arg;
889 			if (arg < stringBase || arg >= stringEnd
890 					|| strnlen(arg, maxLen) == maxLen) {
891 				error = B_BAD_VALUE;
892 				break;
893 			}
894 
895 			flatArgs[i] = arg;
896 		}
897 	}
898 
899 	if (error == B_OK)
900 		_flatArgs = flatArgs;
901 	else
902 		free(flatArgs);
903 
904 	return error;
905 }
906 
907 
908 static void
909 free_team_arg(struct team_arg *teamArg)
910 {
911 	if (teamArg != NULL) {
912 		free(teamArg->flat_args);
913 		free(teamArg->path);
914 		free(teamArg);
915 	}
916 }
917 
918 
919 static status_t
920 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
921 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
922 	uint32 token)
923 {
924 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
925 	if (teamArg == NULL)
926 		return B_NO_MEMORY;
927 
928 	teamArg->path = strdup(path);
929 	if (teamArg->path == NULL) {
930 		free(teamArg);
931 		return B_NO_MEMORY;
932 	}
933 
934 	// copy the args over
935 
936 	teamArg->flat_args = flatArgs;
937 	teamArg->flat_args_size = flatArgsSize;
938 	teamArg->arg_count = argCount;
939 	teamArg->env_count = envCount;
940 	teamArg->error_port = port;
941 	teamArg->error_token = token;
942 
943 	*_teamArg = teamArg;
944 	return B_OK;
945 }
946 
947 
948 static int32
949 team_create_thread_start(void *args)
950 {
951 	status_t err;
952 	struct thread *t;
953 	struct team *team;
954 	struct team_arg *teamArgs = (struct team_arg*)args;
955 	const char *path;
956 	addr_t entry;
957 	char ustack_name[128];
958 	uint32 sizeLeft;
959 	char **userArgs;
960 	char **userEnv;
961 	struct user_space_program_args *programArgs;
962 	uint32 argCount, envCount, i;
963 
964 	t = thread_get_current_thread();
965 	team = t->team;
966 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
967 
968 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
969 
970 	// get a user thread for the main thread
971 	t->user_thread = team_allocate_user_thread(team);
972 
973 	// create an initial primary stack area
974 
975 	// Main stack area layout is currently as follows (starting from 0):
976 	//
977 	// size								| usage
978 	// ---------------------------------+--------------------------------
979 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
980 	// TLS_SIZE							| TLS data
981 	// sizeof(user_space_program_args)	| argument structure for the runtime
982 	//									| loader
983 	// flat arguments size				| flat process arguments and environment
984 
985 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
986 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
987 
988 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
989 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
990 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
991 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
992 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
993 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
994 		// the exact location at the end of the user stack area
995 
996 	sprintf(ustack_name, "%s_main_stack", team->name);
997 	t->user_stack_area = create_area_etc(team->id, ustack_name,
998 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
999 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
1000 	if (t->user_stack_area < 0) {
1001 		dprintf("team_create_thread_start: could not create default user stack region\n");
1002 
1003 		free_team_arg(teamArgs);
1004 		return t->user_stack_area;
1005 	}
1006 
1007 	// now that the TLS area is allocated, initialize TLS
1008 	arch_thread_init_tls(t);
1009 
1010 	argCount = teamArgs->arg_count;
1011 	envCount = teamArgs->env_count;
1012 
1013 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1014 		+ t->user_stack_size + TLS_SIZE);
1015 
1016 	userArgs = (char**)(programArgs + 1);
1017 	userEnv = userArgs + argCount + 1;
1018 	path = teamArgs->path;
1019 
1020 	if (user_strlcpy(programArgs->program_path, path,
1021 				sizeof(programArgs->program_path)) < B_OK
1022 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1023 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1024 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1025 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1026 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1027 				sizeof(port_id)) < B_OK
1028 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1029 				sizeof(uint32)) < B_OK
1030 		|| user_memcpy(userArgs, teamArgs->flat_args,
1031 				teamArgs->flat_args_size) < B_OK) {
1032 		// the team deletion process will clean this mess
1033 		return B_BAD_ADDRESS;
1034 	}
1035 
1036 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1037 
1038 	// add args to info member
1039 	team->args[0] = 0;
1040 	strlcpy(team->args, path, sizeof(team->args));
1041 	for (i = 1; i < argCount; i++) {
1042 		strlcat(team->args, " ", sizeof(team->args));
1043 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1044 	}
1045 
1046 	free_team_arg(teamArgs);
1047 		// the arguments are already on the user stack, we no longer need
1048 		// them in this form
1049 
1050 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1051 	// automatic variables with function scope will never be destroyed.
1052 	{
1053 		// find runtime_loader path
1054 		KPath runtimeLoaderPath;
1055 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1056 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1057 		if (err < B_OK) {
1058 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1059 				strerror(err)));
1060 			return err;
1061 		}
1062 		runtimeLoaderPath.UnlockBuffer();
1063 		err = runtimeLoaderPath.Append("runtime_loader");
1064 
1065 		if (err == B_OK)
1066 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1067 	}
1068 
1069 	if (err < B_OK) {
1070 		// Luckily, we don't have to clean up the mess we created - that's
1071 		// done for us by the normal team deletion process
1072 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1073 			"%s\n", strerror(err)));
1074 		return err;
1075 	}
1076 
1077 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1078 
1079 	team->state = TEAM_STATE_NORMAL;
1080 
1081 	// jump to the entry point in user space
1082 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1083 		// only returns in case of error
1084 }
1085 
1086 
1087 /*!	The BeOS kernel exports a function with this name, but most probably with
1088 	different parameters; we should not make it public.
1089 */
1090 static thread_id
1091 load_image_etc(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1092 	int32 envCount, int32 priority, uint32 flags, port_id errorPort,
1093 	uint32 errorToken)
1094 {
1095 	char** flatArgs = _flatArgs;
1096 	struct team *team, *parent;
1097 	const char *threadName;
1098 	thread_id thread;
1099 	status_t status;
1100 	cpu_status state;
1101 	struct team_arg *teamArgs;
1102 	struct team_loading_info loadingInfo;
1103 
1104 	if (flatArgs == NULL || argCount == 0)
1105 		return B_BAD_VALUE;
1106 
1107 	const char* path = flatArgs[0];
1108 
1109 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1110 		path, flatArgs, argCount));
1111 
1112 	team = create_team_struct(path, false);
1113 	if (team == NULL)
1114 		return B_NO_MEMORY;
1115 
1116 	parent = thread_get_current_thread()->team;
1117 
1118 	if (flags & B_WAIT_TILL_LOADED) {
1119 		loadingInfo.thread = thread_get_current_thread();
1120 		loadingInfo.result = B_ERROR;
1121 		loadingInfo.done = false;
1122 		team->loading_info = &loadingInfo;
1123 	}
1124 
1125 	// Inherit the parent's user/group, but also check the executable's
1126 	// set-user/group-id permission
1127 	inherit_parent_user_and_group(team, parent);
1128 	update_set_id_user_and_group(team, path);
1129 
1130 	state = disable_interrupts();
1131 	GRAB_TEAM_LOCK();
1132 
1133 	hash_insert(sTeamHash, team);
1134 	insert_team_into_parent(parent, team);
1135 	insert_team_into_group(parent->group, team);
1136 	sUsedTeams++;
1137 
1138 	RELEASE_TEAM_LOCK();
1139 	restore_interrupts(state);
1140 
1141 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1142 		envCount, errorPort, errorToken);
1143 
1144 	if (status != B_OK)
1145 		goto err1;
1146 
1147 	_flatArgs = NULL;
1148 		// args are owned by the team_arg structure now
1149 
1150 	// create a new io_context for this team
1151 	team->io_context = vfs_new_io_context(parent->io_context);
1152 	if (!team->io_context) {
1153 		status = B_NO_MEMORY;
1154 		goto err2;
1155 	}
1156 
1157 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1158 	vfs_exec_io_context(team->io_context);
1159 
1160 	// create an address space for this team
1161 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1162 		&team->address_space);
1163 	if (status < B_OK)
1164 		goto err3;
1165 
1166 	// cut the path from the main thread name
1167 	threadName = strrchr(path, '/');
1168 	if (threadName != NULL)
1169 		threadName++;
1170 	else
1171 		threadName = path;
1172 
1173 	// create the user data area
1174 	status = create_team_user_data(team);
1175 	if (status != B_OK)
1176 		goto err4;
1177 
1178 	// Create a kernel thread, but under the context of the new team
1179 	// The new thread will take over ownership of teamArgs
1180 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1181 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1182 	if (thread < 0) {
1183 		status = thread;
1184 		goto err5;
1185 	}
1186 
1187 	// wait for the loader of the new team to finish its work
1188 	if (flags & B_WAIT_TILL_LOADED) {
1189 		struct thread *mainThread;
1190 
1191 		state = disable_interrupts();
1192 		GRAB_THREAD_LOCK();
1193 
1194 		mainThread = thread_get_thread_struct_locked(thread);
1195 		if (mainThread) {
1196 			// resume the team's main thread
1197 			if (mainThread->state == B_THREAD_SUSPENDED)
1198 				scheduler_enqueue_in_run_queue(mainThread);
1199 
1200 			// Now suspend ourselves until loading is finished.
1201 			// We will be woken either by the thread, when it finished or
1202 			// aborted loading, or when the team is going to die (e.g. is
1203 			// killed). In either case the one setting `loadingInfo.done' is
1204 			// responsible for removing the info from the team structure.
1205 			while (!loadingInfo.done) {
1206 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1207 				scheduler_reschedule();
1208 			}
1209 		} else {
1210 			// Impressive! Someone managed to kill the thread in this short
1211 			// time.
1212 		}
1213 
1214 		RELEASE_THREAD_LOCK();
1215 		restore_interrupts(state);
1216 
1217 		if (loadingInfo.result < B_OK)
1218 			return loadingInfo.result;
1219 	}
1220 
1221 	// notify the debugger
1222 	user_debug_team_created(team->id);
1223 
1224 	return thread;
1225 
1226 err5:
1227 	delete_team_user_data(team);
1228 err4:
1229 	vm_put_address_space(team->address_space);
1230 err3:
1231 	vfs_free_io_context(team->io_context);
1232 err2:
1233 	free_team_arg(teamArgs);
1234 err1:
1235 	// remove the team structure from the team hash table and delete the team structure
1236 	state = disable_interrupts();
1237 	GRAB_TEAM_LOCK();
1238 
1239 	remove_team_from_group(team);
1240 	remove_team_from_parent(parent, team);
1241 	hash_remove(sTeamHash, team);
1242 
1243 	RELEASE_TEAM_LOCK();
1244 	restore_interrupts(state);
1245 
1246 	delete_team_struct(team);
1247 
1248 	return status;
1249 }
1250 
1251 
1252 /*!	Almost shuts down the current team and loads a new image into it.
1253 	If successful, this function does not return and will takeover ownership of
1254 	the arguments provided.
1255 	This function may only be called from user space.
1256 */
1257 static status_t
1258 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1259 	int32 argCount, int32 envCount)
1260 {
1261 	// NOTE: Since this function normally doesn't return, don't use automatic
1262 	// variables that need destruction in the function scope.
1263 	char** flatArgs = _flatArgs;
1264 	struct team *team = thread_get_current_thread()->team;
1265 	struct team_arg *teamArgs;
1266 	const char *threadName;
1267 	status_t status = B_OK;
1268 	cpu_status state;
1269 	struct thread *thread;
1270 	thread_id nubThreadID = -1;
1271 
1272 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1273 		path, argCount, envCount, team->id));
1274 
1275 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1276 
1277 	// switching the kernel at run time is probably not a good idea :)
1278 	if (team == team_get_kernel_team())
1279 		return B_NOT_ALLOWED;
1280 
1281 	// we currently need to be single threaded here
1282 	// ToDo: maybe we should just kill all other threads and
1283 	//	make the current thread the team's main thread?
1284 	if (team->main_thread != thread_get_current_thread())
1285 		return B_NOT_ALLOWED;
1286 
1287 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1288 	// We iterate through the thread list to make sure that there's no other
1289 	// thread.
1290 	state = disable_interrupts();
1291 	GRAB_TEAM_LOCK();
1292 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1293 
1294 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1295 		nubThreadID = team->debug_info.nub_thread;
1296 
1297 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1298 
1299 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1300 		if (thread != team->main_thread && thread->id != nubThreadID) {
1301 			status = B_NOT_ALLOWED;
1302 			break;
1303 		}
1304 	}
1305 
1306 	RELEASE_TEAM_LOCK();
1307 	restore_interrupts(state);
1308 
1309 	if (status != B_OK)
1310 		return status;
1311 
1312 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1313 		envCount, -1, 0);
1314 
1315 	if (status != B_OK)
1316 		return status;
1317 
1318 	_flatArgs = NULL;
1319 		// args are owned by the team_arg structure now
1320 
1321 	// ToDo: remove team resources if there are any left
1322 	// thread_atkernel_exit() might not be called at all
1323 
1324 	thread_reset_for_exec();
1325 
1326 	user_debug_prepare_for_exec();
1327 
1328 	delete_team_user_data(team);
1329 	vm_delete_areas(team->address_space);
1330 	xsi_sem_undo(team);
1331 	delete_owned_ports(team->id);
1332 	sem_delete_owned_sems(team->id);
1333 	remove_images(team);
1334 	vfs_exec_io_context(team->io_context);
1335 	delete_realtime_sem_context(team->realtime_sem_context);
1336 	team->realtime_sem_context = NULL;
1337 
1338 	status = create_team_user_data(team);
1339 	if (status != B_OK) {
1340 		// creating the user data failed -- we're toast
1341 		// TODO: We should better keep the old user area in the first place.
1342 		exit_thread(status);
1343 		return status;
1344 	}
1345 
1346 	user_debug_finish_after_exec();
1347 
1348 	// rename the team
1349 
1350 	set_team_name(team, path);
1351 
1352 	// cut the path from the team name and rename the main thread, too
1353 	threadName = strrchr(path, '/');
1354 	if (threadName != NULL)
1355 		threadName++;
1356 	else
1357 		threadName = path;
1358 	rename_thread(thread_get_current_thread_id(), threadName);
1359 
1360 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1361 
1362 	// Update user/group according to the executable's set-user/group-id
1363 	// permission.
1364 	update_set_id_user_and_group(team, path);
1365 
1366 	user_debug_team_exec();
1367 
1368 	status = team_create_thread_start(teamArgs);
1369 		// this one usually doesn't return...
1370 
1371 	// sorry, we have to kill us, there is no way out anymore
1372 	// (without any areas left and all that)
1373 	exit_thread(status);
1374 
1375 	// we return a status here since the signal that is sent by the
1376 	// call above is not immediately handled
1377 	return B_ERROR;
1378 }
1379 
1380 
1381 /*! This is the first function to be called from the newly created
1382 	main child thread.
1383 	It will fill in everything what's left to do from fork_arg, and
1384 	return from the parent's fork() syscall to the child.
1385 */
1386 static int32
1387 fork_team_thread_start(void *_args)
1388 {
1389 	struct thread *thread = thread_get_current_thread();
1390 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1391 
1392 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1393 		// we need a local copy of the arch dependent part
1394 
1395 	thread->user_stack_area = forkArgs->user_stack_area;
1396 	thread->user_stack_base = forkArgs->user_stack_base;
1397 	thread->user_stack_size = forkArgs->user_stack_size;
1398 	thread->user_local_storage = forkArgs->user_local_storage;
1399 	thread->sig_block_mask = forkArgs->sig_block_mask;
1400 	thread->user_thread = forkArgs->user_thread;
1401 
1402 	arch_thread_init_tls(thread);
1403 
1404 	free(forkArgs);
1405 
1406 	// set frame of the parent thread to this one, too
1407 
1408 	arch_restore_fork_frame(&archArgs);
1409 		// This one won't return here
1410 
1411 	return 0;
1412 }
1413 
1414 
1415 static thread_id
1416 fork_team(void)
1417 {
1418 	struct thread *parentThread = thread_get_current_thread();
1419 	struct team *parentTeam = parentThread->team, *team;
1420 	struct fork_arg *forkArgs;
1421 	struct area_info info;
1422 	thread_id threadID;
1423 	cpu_status state;
1424 	status_t status;
1425 	int32 cookie;
1426 
1427 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1428 
1429 	if (parentTeam == team_get_kernel_team())
1430 		return B_NOT_ALLOWED;
1431 
1432 	// create a new team
1433 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1434 
1435 	team = create_team_struct(parentTeam->name, false);
1436 	if (team == NULL)
1437 		return B_NO_MEMORY;
1438 
1439 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1440 
1441 	// Inherit the parent's user/group.
1442 	inherit_parent_user_and_group(team, parentTeam);
1443 
1444 	state = disable_interrupts();
1445 	GRAB_TEAM_LOCK();
1446 
1447 	hash_insert(sTeamHash, team);
1448 	insert_team_into_parent(parentTeam, team);
1449 	insert_team_into_group(parentTeam->group, team);
1450 	sUsedTeams++;
1451 
1452 	RELEASE_TEAM_LOCK();
1453 	restore_interrupts(state);
1454 
1455 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1456 	if (forkArgs == NULL) {
1457 		status = B_NO_MEMORY;
1458 		goto err1;
1459 	}
1460 
1461 	// create a new io_context for this team
1462 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1463 	if (!team->io_context) {
1464 		status = B_NO_MEMORY;
1465 		goto err2;
1466 	}
1467 
1468 	// duplicate the realtime sem context
1469 	if (parentTeam->realtime_sem_context) {
1470 		team->realtime_sem_context = clone_realtime_sem_context(
1471 			parentTeam->realtime_sem_context);
1472 		if (team->realtime_sem_context == NULL) {
1473 			status = B_NO_MEMORY;
1474 			goto err25;
1475 		}
1476 	}
1477 
1478 	// create an address space for this team
1479 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1480 		&team->address_space);
1481 	if (status < B_OK)
1482 		goto err3;
1483 
1484 	// copy all areas of the team
1485 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1486 	// ToDo: all stacks of other threads than the current one could be left out
1487 
1488 	forkArgs->user_thread = NULL;
1489 
1490 	cookie = 0;
1491 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1492 		if (info.area == parentTeam->user_data_area) {
1493 			// don't clone the user area; just create a new one
1494 			status = create_team_user_data(team);
1495 			if (status != B_OK)
1496 				break;
1497 
1498 			forkArgs->user_thread = team_allocate_user_thread(team);
1499 		} else {
1500 			void *address;
1501 			area_id area = vm_copy_area(team->address_space->id, info.name,
1502 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1503 			if (area < B_OK) {
1504 				status = area;
1505 				break;
1506 			}
1507 
1508 			if (info.area == parentThread->user_stack_area)
1509 				forkArgs->user_stack_area = area;
1510 		}
1511 	}
1512 
1513 	if (status < B_OK)
1514 		goto err4;
1515 
1516 	if (forkArgs->user_thread == NULL) {
1517 #if KDEBUG
1518 		panic("user data area not found, parent area is %ld",
1519 			parentTeam->user_data_area);
1520 #endif
1521 		status = B_ERROR;
1522 		goto err4;
1523 	}
1524 
1525 	forkArgs->user_stack_base = parentThread->user_stack_base;
1526 	forkArgs->user_stack_size = parentThread->user_stack_size;
1527 	forkArgs->user_local_storage = parentThread->user_local_storage;
1528 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1529 	arch_store_fork_frame(&forkArgs->arch_info);
1530 
1531 	// copy image list
1532 	image_info imageInfo;
1533 	cookie = 0;
1534 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1535 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1536 		if (image < 0)
1537 			goto err5;
1538 	}
1539 
1540 	// create a kernel thread under the context of the new team
1541 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1542 		parentThread->name, parentThread->priority, forkArgs,
1543 		team->id, team->id);
1544 	if (threadID < 0) {
1545 		status = threadID;
1546 		goto err5;
1547 	}
1548 
1549 	// notify the debugger
1550 	user_debug_team_created(team->id);
1551 
1552 	T(TeamForked(threadID));
1553 
1554 	resume_thread(threadID);
1555 	return threadID;
1556 
1557 err5:
1558 	remove_images(team);
1559 err4:
1560 	vm_delete_address_space(team->address_space);
1561 err3:
1562 	delete_realtime_sem_context(team->realtime_sem_context);
1563 err25:
1564 	vfs_free_io_context(team->io_context);
1565 err2:
1566 	free(forkArgs);
1567 err1:
1568 	// remove the team structure from the team hash table and delete the team structure
1569 	state = disable_interrupts();
1570 	GRAB_TEAM_LOCK();
1571 
1572 	remove_team_from_group(team);
1573 	remove_team_from_parent(parentTeam, team);
1574 	hash_remove(sTeamHash, team);
1575 
1576 	RELEASE_TEAM_LOCK();
1577 	restore_interrupts(state);
1578 
1579 	delete_team_struct(team);
1580 
1581 	return status;
1582 }
1583 
1584 
1585 /*!	Returns if the specified \a team has any children belonging to the
1586 	specified \a group.
1587 	Must be called with the team lock held.
1588 */
1589 static bool
1590 has_children_in_group(struct team *parent, pid_t groupID)
1591 {
1592 	struct team *team;
1593 
1594 	struct process_group *group = team_get_process_group_locked(
1595 		parent->group->session, groupID);
1596 	if (group == NULL)
1597 		return false;
1598 
1599 	for (team = group->teams; team; team = team->group_next) {
1600 		if (team->parent == parent)
1601 			return true;
1602 	}
1603 
1604 	return false;
1605 }
1606 
1607 
1608 static job_control_entry*
1609 get_job_control_entry(team_job_control_children* children, pid_t id)
1610 {
1611 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1612 		 job_control_entry* entry = it.Next();) {
1613 
1614 		if (id > 0) {
1615 			if (entry->thread == id)
1616 				return entry;
1617 		} else if (id == -1) {
1618 			return entry;
1619 		} else {
1620 			pid_t processGroup
1621 				= (entry->team ? entry->team->group_id : entry->group_id);
1622 			if (processGroup == -id)
1623 				return entry;
1624 		}
1625 	}
1626 
1627 	return NULL;
1628 }
1629 
1630 
1631 static job_control_entry*
1632 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1633 {
1634 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1635 
1636 	if (entry == NULL && (flags & WCONTINUED) != 0)
1637 		entry = get_job_control_entry(team->continued_children, id);
1638 
1639 	if (entry == NULL && (flags & WUNTRACED) != 0)
1640 		entry = get_job_control_entry(team->stopped_children, id);
1641 
1642 	return entry;
1643 }
1644 
1645 
1646 job_control_entry::job_control_entry()
1647 	:
1648 	has_group_ref(false)
1649 {
1650 }
1651 
1652 
1653 job_control_entry::~job_control_entry()
1654 {
1655 	if (has_group_ref) {
1656 		InterruptsSpinLocker locker(gTeamSpinlock);
1657 		release_process_group_ref(group_id);
1658 	}
1659 }
1660 
1661 
1662 /*!	Team and thread lock must be held.
1663 */
1664 void
1665 job_control_entry::InitDeadState()
1666 {
1667 	if (team != NULL) {
1668 		struct thread* thread = team->main_thread;
1669 		group_id = team->group_id;
1670 		this->thread = thread->id;
1671 		status = thread->exit.status;
1672 		reason = thread->exit.reason;
1673 		signal = thread->exit.signal;
1674 		team = NULL;
1675 		acquire_process_group_ref(group_id);
1676 		has_group_ref = true;
1677 	}
1678 }
1679 
1680 
1681 job_control_entry&
1682 job_control_entry::operator=(const job_control_entry& other)
1683 {
1684 	state = other.state;
1685 	thread = other.thread;
1686 	has_group_ref = false;
1687 	team = other.team;
1688 	group_id = other.group_id;
1689 	status = other.status;
1690 	reason = other.reason;
1691 	signal = other.signal;
1692 
1693 	return *this;
1694 }
1695 
1696 
1697 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1698 	comes to the reason why a thread has died than waitpid() can be.
1699 */
1700 static thread_id
1701 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1702 	status_t *_returnCode)
1703 {
1704 	struct thread* thread = thread_get_current_thread();
1705 	struct team* team = thread->team;
1706 	struct job_control_entry foundEntry;
1707 	struct job_control_entry* freeDeathEntry = NULL;
1708 	status_t status = B_OK;
1709 
1710 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1711 
1712 	T(WaitForChild(child, flags));
1713 
1714 	if (child == 0) {
1715 		// wait for all children in the process group of the calling team
1716 		child = -team->group_id;
1717 	}
1718 
1719 	bool ignoreFoundEntries = false;
1720 	bool ignoreFoundEntriesChecked = false;
1721 
1722 	while (true) {
1723 		InterruptsSpinLocker locker(gTeamSpinlock);
1724 
1725 		// check whether any condition holds
1726 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1727 
1728 		// If we don't have an entry yet, check whether there are any children
1729 		// complying to the process group specification at all.
1730 		if (entry == NULL) {
1731 			// No success yet -- check whether there are any children we could
1732 			// wait for.
1733 			bool childrenExist = false;
1734 			if (child == -1) {
1735 				childrenExist = team->children != NULL;
1736 			} else if (child < -1) {
1737 				childrenExist = has_children_in_group(team, -child);
1738 			} else {
1739 				if (struct team* childTeam = team_get_team_struct_locked(child))
1740 					childrenExist = childTeam->parent == team;
1741 			}
1742 
1743 			if (!childrenExist) {
1744 				// there is no child we could wait for
1745 				status = ECHILD;
1746 			} else {
1747 				// the children we're waiting for are still running
1748 				status = B_WOULD_BLOCK;
1749 			}
1750 		} else {
1751 			// got something
1752 			foundEntry = *entry;
1753 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1754 				// The child is dead. Reap its death entry.
1755 				freeDeathEntry = entry;
1756 				team->dead_children->entries.Remove(entry);
1757 				team->dead_children->count--;
1758 			} else {
1759 				// The child is well. Reset its job control state.
1760 				team_set_job_control_state(entry->team,
1761 					JOB_CONTROL_STATE_NONE, 0, false);
1762 			}
1763 		}
1764 
1765 		// If we haven't got anything yet, prepare for waiting for the
1766 		// condition variable.
1767 		ConditionVariableEntry deadWaitEntry;
1768 
1769 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1770 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1771 
1772 		locker.Unlock();
1773 
1774 		// we got our entry and can return to our caller
1775 		if (status == B_OK) {
1776 			if (ignoreFoundEntries) {
1777 				// ... unless we shall ignore found entries
1778 				delete freeDeathEntry;
1779 				freeDeathEntry = NULL;
1780 				continue;
1781 			}
1782 
1783 			break;
1784 		}
1785 
1786 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1787 			T(WaitForChildDone(status));
1788 			return status;
1789 		}
1790 
1791 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1792 		if (status == B_INTERRUPTED) {
1793 			T(WaitForChildDone(status));
1794 			return status;
1795 		}
1796 
1797 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1798 		// all our children are dead and fail with ECHILD. We check the
1799 		// condition at this point.
1800 		if (!ignoreFoundEntriesChecked) {
1801 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1802 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1803 				|| handler.sa_handler == SIG_IGN) {
1804 				ignoreFoundEntries = true;
1805 			}
1806 
1807 			ignoreFoundEntriesChecked = true;
1808 		}
1809 	}
1810 
1811 	delete freeDeathEntry;
1812 
1813 	// when we got here, we have a valid death entry, and
1814 	// already got unregistered from the team or group
1815 	int reason = 0;
1816 	switch (foundEntry.state) {
1817 		case JOB_CONTROL_STATE_DEAD:
1818 			reason = foundEntry.reason;
1819 			break;
1820 		case JOB_CONTROL_STATE_STOPPED:
1821 			reason = THREAD_STOPPED;
1822 			break;
1823 		case JOB_CONTROL_STATE_CONTINUED:
1824 			reason = THREAD_CONTINUED;
1825 			break;
1826 		case JOB_CONTROL_STATE_NONE:
1827 			// can't happen
1828 			break;
1829 	}
1830 
1831 	*_returnCode = foundEntry.status;
1832 	*_reason = (foundEntry.signal << 16) | reason;
1833 
1834 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1835 	// status is available.
1836 	if (is_signal_blocked(SIGCHLD)) {
1837 		InterruptsSpinLocker locker(gTeamSpinlock);
1838 
1839 		if (get_job_control_entry(team, child, flags) == NULL)
1840 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1841 	}
1842 
1843 	// When the team is dead, the main thread continues to live in the kernel
1844 	// team for a very short time. To avoid surprises for the caller we rather
1845 	// wait until the thread is really gone.
1846 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1847 		wait_for_thread(foundEntry.thread, NULL);
1848 
1849 	T(WaitForChildDone(foundEntry));
1850 
1851 	return foundEntry.thread;
1852 }
1853 
1854 
1855 /*! Fills the team_info structure with information from the specified
1856 	team.
1857 	The team lock must be held when called.
1858 */
1859 static status_t
1860 fill_team_info(struct team *team, team_info *info, size_t size)
1861 {
1862 	if (size != sizeof(team_info))
1863 		return B_BAD_VALUE;
1864 
1865 	// ToDo: Set more informations for team_info
1866 	memset(info, 0, size);
1867 
1868 	info->team = team->id;
1869 	info->thread_count = team->num_threads;
1870 	info->image_count = count_images(team);
1871 	//info->area_count =
1872 	info->debugger_nub_thread = team->debug_info.nub_thread;
1873 	info->debugger_nub_port = team->debug_info.nub_port;
1874 	//info->uid =
1875 	//info->gid =
1876 
1877 	strlcpy(info->args, team->args, sizeof(info->args));
1878 	info->argc = 1;
1879 
1880 	return B_OK;
1881 }
1882 
1883 
1884 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1885 	Interrupts must be disabled and team lock be held.
1886 */
1887 static bool
1888 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1889 {
1890 	// Orphaned Process Group: "A process group in which the parent of every
1891 	// member is either itself a member of the group or is not a member of the
1892 	// group's session." (Open Group Base Specs Issue 6)
1893 
1894 	// once orphaned, things won't change (exception: cf. setpgid())
1895 	if (group->orphaned)
1896 		return true;
1897 
1898 	struct team* team = group->teams;
1899 	while (team != NULL) {
1900 		struct team* parent = team->parent;
1901 		if (team->id != dyingProcess && parent != NULL
1902 			&& parent->id != dyingProcess
1903 			&& parent->group_id != group->id
1904 			&& parent->session_id == group->session->id) {
1905 			return false;
1906 		}
1907 
1908 		team = team->group_next;
1909 	}
1910 
1911 	group->orphaned = true;
1912 	return true;
1913 }
1914 
1915 
1916 /*!	Returns whether the process group contains stopped processes.
1917 	Interrupts must be disabled and team lock be held.
1918 */
1919 static bool
1920 process_group_has_stopped_processes(process_group* group)
1921 {
1922 	SpinLocker _(gThreadSpinlock);
1923 
1924 	struct team* team = group->teams;
1925 	while (team != NULL) {
1926 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1927 			return true;
1928 
1929 		team = team->group_next;
1930 	}
1931 
1932 	return false;
1933 }
1934 
1935 
1936 //	#pragma mark - Private kernel API
1937 
1938 
1939 status_t
1940 team_init(kernel_args *args)
1941 {
1942 	struct process_session *session;
1943 	struct process_group *group;
1944 
1945 	// create the team hash table
1946 	sTeamHash = hash_init(16, offsetof(struct team, next),
1947 		&team_struct_compare, &team_struct_hash);
1948 
1949 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1950 		&process_group_compare, &process_group_hash);
1951 
1952 	// create initial session and process groups
1953 
1954 	session = create_process_session(1);
1955 	if (session == NULL)
1956 		panic("Could not create initial session.\n");
1957 
1958 	group = create_process_group(1);
1959 	if (group == NULL)
1960 		panic("Could not create initial process group.\n");
1961 
1962 	insert_group_into_session(session, group);
1963 
1964 	// create the kernel team
1965 	sKernelTeam = create_team_struct("kernel_team", true);
1966 	if (sKernelTeam == NULL)
1967 		panic("could not create kernel team!\n");
1968 	strcpy(sKernelTeam->args, sKernelTeam->name);
1969 	sKernelTeam->state = TEAM_STATE_NORMAL;
1970 
1971 	sKernelTeam->saved_set_uid = 0;
1972 	sKernelTeam->real_uid = 0;
1973 	sKernelTeam->effective_uid = 0;
1974 	sKernelTeam->saved_set_gid = 0;
1975 	sKernelTeam->real_gid = 0;
1976 	sKernelTeam->effective_gid = 0;
1977 	sKernelTeam->supplementary_groups = NULL;
1978 	sKernelTeam->supplementary_group_count = 0;
1979 
1980 	insert_team_into_group(group, sKernelTeam);
1981 
1982 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1983 	if (sKernelTeam->io_context == NULL)
1984 		panic("could not create io_context for kernel team!\n");
1985 
1986 	// stick it in the team hash
1987 	hash_insert(sTeamHash, sKernelTeam);
1988 
1989 	add_debugger_command_etc("team", &dump_team_info,
1990 		"Dump info about a particular team",
1991 		"[ <id> | <address> | <name> ]\n"
1992 		"Prints information about the specified team. If no argument is given\n"
1993 		"the current team is selected.\n"
1994 		"  <id>       - The ID of the team.\n"
1995 		"  <address>  - The address of the team structure.\n"
1996 		"  <name>     - The team's name.\n", 0);
1997 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1998 		"\n"
1999 		"Prints a list of all existing teams.\n", 0);
2000 	return 0;
2001 }
2002 
2003 
2004 int32
2005 team_max_teams(void)
2006 {
2007 	return sMaxTeams;
2008 }
2009 
2010 
2011 int32
2012 team_used_teams(void)
2013 {
2014 	return sUsedTeams;
2015 }
2016 
2017 
2018 /*! Fills the provided death entry if it's in the team.
2019 	You need to have the team lock held when calling this function.
2020 */
2021 job_control_entry*
2022 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2023 {
2024 	if (child <= 0)
2025 		return NULL;
2026 
2027 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2028 		child);
2029 	if (entry) {
2030 		// remove the entry only, if the caller is the parent of the found team
2031 		if (team_get_current_team_id() == entry->thread) {
2032 			team->dead_children->entries.Remove(entry);
2033 			team->dead_children->count--;
2034 			*_deleteEntry = true;
2035 		} else {
2036 			*_deleteEntry = false;
2037 		}
2038 	}
2039 
2040 	return entry;
2041 }
2042 
2043 
2044 /*! Quick check to see if we have a valid team ID. */
2045 bool
2046 team_is_valid(team_id id)
2047 {
2048 	struct team *team;
2049 	cpu_status state;
2050 
2051 	if (id <= 0)
2052 		return false;
2053 
2054 	state = disable_interrupts();
2055 	GRAB_TEAM_LOCK();
2056 
2057 	team = team_get_team_struct_locked(id);
2058 
2059 	RELEASE_TEAM_LOCK();
2060 	restore_interrupts(state);
2061 
2062 	return team != NULL;
2063 }
2064 
2065 
2066 struct team *
2067 team_get_team_struct_locked(team_id id)
2068 {
2069 	struct team_key key;
2070 	key.id = id;
2071 
2072 	return (struct team*)hash_lookup(sTeamHash, &key);
2073 }
2074 
2075 
2076 /*! This searches the session of the team for the specified group ID.
2077 	You must hold the team lock when you call this function.
2078 */
2079 struct process_group *
2080 team_get_process_group_locked(struct process_session *session, pid_t id)
2081 {
2082 	struct process_group *group;
2083 	struct team_key key;
2084 	key.id = id;
2085 
2086 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2087 	if (group != NULL && (session == NULL || session == group->session))
2088 		return group;
2089 
2090 	return NULL;
2091 }
2092 
2093 
2094 void
2095 team_delete_process_group(struct process_group *group)
2096 {
2097 	if (group == NULL)
2098 		return;
2099 
2100 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2101 
2102 	// remove_group_from_session() keeps this pointer around
2103 	// only if the session can be freed as well
2104 	if (group->session) {
2105 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2106 		free(group->session);
2107 	}
2108 
2109 	free(group);
2110 }
2111 
2112 
2113 void
2114 team_set_controlling_tty(int32 ttyIndex)
2115 {
2116 	struct team* team = thread_get_current_thread()->team;
2117 
2118 	InterruptsSpinLocker _(gTeamSpinlock);
2119 
2120 	team->group->session->controlling_tty = ttyIndex;
2121 	team->group->session->foreground_group = -1;
2122 }
2123 
2124 
2125 int32
2126 team_get_controlling_tty()
2127 {
2128 	struct team* team = thread_get_current_thread()->team;
2129 
2130 	InterruptsSpinLocker _(gTeamSpinlock);
2131 
2132 	return team->group->session->controlling_tty;
2133 }
2134 
2135 
2136 status_t
2137 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2138 {
2139 	struct thread* thread = thread_get_current_thread();
2140 	struct team* team = thread->team;
2141 
2142 	InterruptsSpinLocker locker(gTeamSpinlock);
2143 
2144 	process_session* session = team->group->session;
2145 
2146 	// must be the controlling tty of the calling process
2147 	if (session->controlling_tty != ttyIndex)
2148 		return ENOTTY;
2149 
2150 	// check process group -- must belong to our session
2151 	process_group* group = team_get_process_group_locked(session,
2152 		processGroupID);
2153 	if (group == NULL)
2154 		return B_BAD_VALUE;
2155 
2156 	// If we are a background group, we can't do that unharmed, only if we
2157 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2158 	if (session->foreground_group != -1
2159 		&& session->foreground_group != team->group_id
2160 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2161 		&& !is_signal_blocked(SIGTTOU)) {
2162 		pid_t groupID = team->group->id;
2163 		locker.Unlock();
2164 		send_signal(-groupID, SIGTTOU);
2165 		return B_INTERRUPTED;
2166 	}
2167 
2168 	team->group->session->foreground_group = processGroupID;
2169 
2170 	return B_OK;
2171 }
2172 
2173 
2174 /*!	Removes the specified team from the global team hash, and from its parent.
2175 	It also moves all of its children up to the parent.
2176 	You must hold the team lock when you call this function.
2177 */
2178 void
2179 team_remove_team(struct team *team)
2180 {
2181 	struct team *parent = team->parent;
2182 
2183 	// remember how long this team lasted
2184 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2185 		+ team->dead_children->kernel_time;
2186 	parent->dead_children->user_time += team->dead_threads_user_time
2187 		+ team->dead_children->user_time;
2188 
2189 	// Also grab the thread spinlock while removing the team from the hash.
2190 	// This makes the following sequence safe: grab teams lock, lookup team,
2191 	// grab threads lock, unlock teams lock,
2192 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2193 	// lock another team's IO context.
2194 	GRAB_THREAD_LOCK();
2195 	hash_remove(sTeamHash, team);
2196 	RELEASE_THREAD_LOCK();
2197 	sUsedTeams--;
2198 
2199 	team->state = TEAM_STATE_DEATH;
2200 
2201 	// If we're a controlling process (i.e. a session leader with controlling
2202 	// terminal), there's a bit of signalling we have to do.
2203 	if (team->session_id == team->id
2204 		&& team->group->session->controlling_tty >= 0) {
2205 		process_session* session = team->group->session;
2206 
2207 		session->controlling_tty = -1;
2208 
2209 		// send SIGHUP to the foreground
2210 		if (session->foreground_group >= 0) {
2211 			send_signal_etc(-session->foreground_group, SIGHUP,
2212 				SIGNAL_FLAG_TEAMS_LOCKED);
2213 		}
2214 
2215 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2216 		// stopped processes
2217 		struct team* child = team->children;
2218 		while (child != NULL) {
2219 			process_group* childGroup = child->group;
2220 			if (!childGroup->orphaned
2221 				&& update_orphaned_process_group(childGroup, team->id)
2222 				&& process_group_has_stopped_processes(childGroup)) {
2223 				send_signal_etc(-childGroup->id, SIGHUP,
2224 					SIGNAL_FLAG_TEAMS_LOCKED);
2225 				send_signal_etc(-childGroup->id, SIGCONT,
2226 					SIGNAL_FLAG_TEAMS_LOCKED);
2227 			}
2228 
2229 			child = child->siblings_next;
2230 		}
2231 	} else {
2232 		// update "orphaned" flags of all children's process groups
2233 		struct team* child = team->children;
2234 		while (child != NULL) {
2235 			process_group* childGroup = child->group;
2236 			if (!childGroup->orphaned)
2237 				update_orphaned_process_group(childGroup, team->id);
2238 
2239 			child = child->siblings_next;
2240 		}
2241 
2242 		// update "orphaned" flag of this team's process group
2243 		update_orphaned_process_group(team->group, team->id);
2244 	}
2245 
2246 	// reparent each of the team's children
2247 	reparent_children(team);
2248 
2249 	// remove us from our process group
2250 	remove_team_from_group(team);
2251 
2252 	// remove us from our parent
2253 	remove_team_from_parent(parent, team);
2254 }
2255 
2256 
2257 void
2258 team_delete_team(struct team *team)
2259 {
2260 	team_id teamID = team->id;
2261 	port_id debuggerPort = -1;
2262 	cpu_status state;
2263 
2264 	if (team->num_threads > 0) {
2265 		// there are other threads still in this team,
2266 		// cycle through and signal kill on each of the threads
2267 		// ToDo: this can be optimized. There's got to be a better solution.
2268 		struct thread *temp_thread;
2269 		char death_sem_name[B_OS_NAME_LENGTH];
2270 		sem_id deathSem;
2271 		int32 threadCount;
2272 
2273 		sprintf(death_sem_name, "team %ld death sem", teamID);
2274 		deathSem = create_sem(0, death_sem_name);
2275 		if (deathSem < 0)
2276 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2277 
2278 		state = disable_interrupts();
2279 		GRAB_TEAM_LOCK();
2280 
2281 		team->death_sem = deathSem;
2282 		threadCount = team->num_threads;
2283 
2284 		// If the team was being debugged, that will stop with the termination
2285 		// of the nub thread. The team structure has already been removed from
2286 		// the team hash table at this point, so noone can install a debugger
2287 		// anymore. We fetch the debugger's port to send it a message at the
2288 		// bitter end.
2289 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2290 
2291 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2292 			debuggerPort = team->debug_info.debugger_port;
2293 
2294 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2295 
2296 		// we can safely walk the list because of the lock. no new threads can be created
2297 		// because of the TEAM_STATE_DEATH flag on the team
2298 		temp_thread = team->thread_list;
2299 		while (temp_thread) {
2300 			struct thread *next = temp_thread->team_next;
2301 
2302 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2303 			temp_thread = next;
2304 		}
2305 
2306 		RELEASE_TEAM_LOCK();
2307 		restore_interrupts(state);
2308 
2309 		// wait until all threads in team are dead.
2310 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2311 		delete_sem(team->death_sem);
2312 	}
2313 
2314 	// If someone is waiting for this team to be loaded, but it dies
2315 	// unexpectedly before being done, we need to notify the waiting
2316 	// thread now.
2317 
2318 	state = disable_interrupts();
2319 	GRAB_TEAM_LOCK();
2320 
2321 	if (team->loading_info) {
2322 		// there's indeed someone waiting
2323 		struct team_loading_info *loadingInfo = team->loading_info;
2324 		team->loading_info = NULL;
2325 
2326 		loadingInfo->result = B_ERROR;
2327 		loadingInfo->done = true;
2328 
2329 		GRAB_THREAD_LOCK();
2330 
2331 		// wake up the waiting thread
2332 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2333 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2334 
2335 		RELEASE_THREAD_LOCK();
2336 	}
2337 
2338 	RELEASE_TEAM_LOCK();
2339 	restore_interrupts(state);
2340 
2341 	// notify team watchers
2342 
2343 	{
2344 		// we're not reachable from anyone anymore at this point, so we
2345 		// can safely access the list without any locking
2346 		struct team_watcher *watcher;
2347 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2348 				&team->watcher_list)) != NULL) {
2349 			watcher->hook(teamID, watcher->data);
2350 			free(watcher);
2351 		}
2352 	}
2353 
2354 	// free team resources
2355 
2356 	vfs_free_io_context(team->io_context);
2357 	delete_realtime_sem_context(team->realtime_sem_context);
2358 	xsi_sem_undo(team);
2359 	delete_owned_ports(teamID);
2360 	sem_delete_owned_sems(teamID);
2361 	remove_images(team);
2362 	vm_delete_address_space(team->address_space);
2363 
2364 	delete_team_struct(team);
2365 
2366 	// notify the debugger, that the team is gone
2367 	user_debug_team_deleted(teamID, debuggerPort);
2368 }
2369 
2370 
2371 struct team *
2372 team_get_kernel_team(void)
2373 {
2374 	return sKernelTeam;
2375 }
2376 
2377 
2378 team_id
2379 team_get_kernel_team_id(void)
2380 {
2381 	if (!sKernelTeam)
2382 		return 0;
2383 
2384 	return sKernelTeam->id;
2385 }
2386 
2387 
2388 team_id
2389 team_get_current_team_id(void)
2390 {
2391 	return thread_get_current_thread()->team->id;
2392 }
2393 
2394 
2395 status_t
2396 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2397 {
2398 	cpu_status state;
2399 	struct team *team;
2400 	status_t status;
2401 
2402 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2403 	if (id == 1) {
2404 		// we're the kernel team, so we don't have to go through all
2405 		// the hassle (locking and hash lookup)
2406 		*_addressSpace = vm_get_kernel_address_space();
2407 		return B_OK;
2408 	}
2409 
2410 	state = disable_interrupts();
2411 	GRAB_TEAM_LOCK();
2412 
2413 	team = team_get_team_struct_locked(id);
2414 	if (team != NULL) {
2415 		atomic_add(&team->address_space->ref_count, 1);
2416 		*_addressSpace = team->address_space;
2417 		status = B_OK;
2418 	} else
2419 		status = B_BAD_VALUE;
2420 
2421 	RELEASE_TEAM_LOCK();
2422 	restore_interrupts(state);
2423 
2424 	return status;
2425 }
2426 
2427 
2428 /*!	Sets the team's job control state.
2429 	Interrupts must be disabled and the team lock be held.
2430 	\a threadsLocked indicates whether the thread lock is being held, too.
2431 */
2432 void
2433 team_set_job_control_state(struct team* team, job_control_state newState,
2434 	int signal, bool threadsLocked)
2435 {
2436 	if (team == NULL || team->job_control_entry == NULL)
2437 		return;
2438 
2439 	// don't touch anything, if the state stays the same or the team is already
2440 	// dead
2441 	job_control_entry* entry = team->job_control_entry;
2442 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2443 		return;
2444 
2445 	T(SetJobControlState(team->id, newState, signal));
2446 
2447 	// remove from the old list
2448 	switch (entry->state) {
2449 		case JOB_CONTROL_STATE_NONE:
2450 			// entry is in no list ATM
2451 			break;
2452 		case JOB_CONTROL_STATE_DEAD:
2453 			// can't get here
2454 			break;
2455 		case JOB_CONTROL_STATE_STOPPED:
2456 			team->parent->stopped_children->entries.Remove(entry);
2457 			break;
2458 		case JOB_CONTROL_STATE_CONTINUED:
2459 			team->parent->continued_children->entries.Remove(entry);
2460 			break;
2461 	}
2462 
2463 	entry->state = newState;
2464 	entry->signal = signal;
2465 
2466 	// add to new list
2467 	team_job_control_children* childList = NULL;
2468 	switch (entry->state) {
2469 		case JOB_CONTROL_STATE_NONE:
2470 			// entry doesn't get into any list
2471 			break;
2472 		case JOB_CONTROL_STATE_DEAD:
2473 			childList = team->parent->dead_children;
2474 			team->parent->dead_children->count++;
2475 			break;
2476 		case JOB_CONTROL_STATE_STOPPED:
2477 			childList = team->parent->stopped_children;
2478 			break;
2479 		case JOB_CONTROL_STATE_CONTINUED:
2480 			childList = team->parent->continued_children;
2481 			break;
2482 	}
2483 
2484 	if (childList != NULL) {
2485 		childList->entries.Add(entry);
2486 		team->parent->dead_children->condition_variable.NotifyAll(
2487 			threadsLocked);
2488 	}
2489 }
2490 
2491 
2492 /*! Adds a hook to the team that is called as soon as this
2493 	team goes away.
2494 	This call might get public in the future.
2495 */
2496 status_t
2497 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2498 {
2499 	struct team_watcher *watcher;
2500 	struct team *team;
2501 	cpu_status state;
2502 
2503 	if (hook == NULL || teamID < B_OK)
2504 		return B_BAD_VALUE;
2505 
2506 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2507 	if (watcher == NULL)
2508 		return B_NO_MEMORY;
2509 
2510 	watcher->hook = hook;
2511 	watcher->data = data;
2512 
2513 	// find team and add watcher
2514 
2515 	state = disable_interrupts();
2516 	GRAB_TEAM_LOCK();
2517 
2518 	team = team_get_team_struct_locked(teamID);
2519 	if (team != NULL)
2520 		list_add_item(&team->watcher_list, watcher);
2521 
2522 	RELEASE_TEAM_LOCK();
2523 	restore_interrupts(state);
2524 
2525 	if (team == NULL) {
2526 		free(watcher);
2527 		return B_BAD_TEAM_ID;
2528 	}
2529 
2530 	return B_OK;
2531 }
2532 
2533 
2534 status_t
2535 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2536 {
2537 	struct team_watcher *watcher = NULL;
2538 	struct team *team;
2539 	cpu_status state;
2540 
2541 	if (hook == NULL || teamID < B_OK)
2542 		return B_BAD_VALUE;
2543 
2544 	// find team and remove watcher (if present)
2545 
2546 	state = disable_interrupts();
2547 	GRAB_TEAM_LOCK();
2548 
2549 	team = team_get_team_struct_locked(teamID);
2550 	if (team != NULL) {
2551 		// search for watcher
2552 		while ((watcher = (struct team_watcher*)list_get_next_item(
2553 				&team->watcher_list, watcher)) != NULL) {
2554 			if (watcher->hook == hook && watcher->data == data) {
2555 				// got it!
2556 				list_remove_item(&team->watcher_list, watcher);
2557 				break;
2558 			}
2559 		}
2560 	}
2561 
2562 	RELEASE_TEAM_LOCK();
2563 	restore_interrupts(state);
2564 
2565 	if (watcher == NULL)
2566 		return B_ENTRY_NOT_FOUND;
2567 
2568 	free(watcher);
2569 	return B_OK;
2570 }
2571 
2572 
2573 /*!	The team lock must be held or the team must still be single threaded.
2574 */
2575 struct user_thread*
2576 team_allocate_user_thread(struct team* team)
2577 {
2578 	if (team->user_data == 0)
2579 		return NULL;
2580 
2581 	user_thread* thread = NULL;
2582 
2583 	// take an entry from the free list, if any
2584 	if (struct free_user_thread* entry = team->free_user_threads) {
2585 		thread = entry->thread;
2586 		team->free_user_threads = entry->next;
2587 		deferred_free(entry);
2588 		return thread;
2589 	} else {
2590 		// enough space left?
2591 		size_t needed = _ALIGN(sizeof(user_thread));
2592 		if (team->user_data_size - team->used_user_data < needed)
2593 			return NULL;
2594 		// TODO: This imposes a per team thread limit! We should resize the
2595 		// area, if necessary. That's problematic at this point, though, since
2596 		// we've got the team lock.
2597 
2598 		thread = (user_thread*)(team->user_data + team->used_user_data);
2599 		team->used_user_data += needed;
2600 	}
2601 
2602 	thread->defer_signals = 0;
2603 	thread->pending_signals = 0;
2604 	thread->wait_status = B_OK;
2605 
2606 	return thread;
2607 }
2608 
2609 
2610 /*!	The team lock must not be held. \a thread must be the current thread.
2611 */
2612 void
2613 team_free_user_thread(struct thread* thread)
2614 {
2615 	user_thread* userThread = thread->user_thread;
2616 	if (userThread == NULL)
2617 		return;
2618 
2619 	// create a free list entry
2620 	free_user_thread* entry
2621 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2622 	if (entry == NULL) {
2623 		// we have to leak the user thread :-/
2624 		return;
2625 	}
2626 
2627 	InterruptsSpinLocker _(gTeamSpinlock);
2628 
2629 	entry->thread = userThread;
2630 	entry->next = thread->team->free_user_threads;
2631 	thread->team->free_user_threads = entry;
2632 }
2633 
2634 
2635 //	#pragma mark - Public kernel API
2636 
2637 
2638 thread_id
2639 load_image(int32 argCount, const char **args, const char **env)
2640 {
2641 	// we need to flatten the args and environment
2642 
2643 	if (args == NULL)
2644 		return B_BAD_VALUE;
2645 
2646 	// determine total needed size
2647 	int32 argSize = 0;
2648 	for (int32 i = 0; i < argCount; i++)
2649 		argSize += strlen(args[i]) + 1;
2650 
2651 	int32 envCount = 0;
2652 	int32 envSize = 0;
2653 	while (env != NULL && env[envCount] != NULL)
2654 		envSize += strlen(env[envCount++]) + 1;
2655 
2656 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2657 	if (size > MAX_PROCESS_ARGS_SIZE)
2658 		return B_TOO_MANY_ARGS;
2659 
2660 	// allocate space
2661 	char** flatArgs = (char**)malloc(size);
2662 	if (flatArgs == NULL)
2663 		return B_NO_MEMORY;
2664 
2665 	char** slot = flatArgs;
2666 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2667 
2668 	// copy arguments and environment
2669 	for (int32 i = 0; i < argCount; i++) {
2670 		int32 argSize = strlen(args[i]) + 1;
2671 		memcpy(stringSpace, args[i], argSize);
2672 		*slot++ = stringSpace;
2673 		stringSpace += argSize;
2674 	}
2675 
2676 	*slot++ = NULL;
2677 
2678 	for (int32 i = 0; i < envCount; i++) {
2679 		int32 envSize = strlen(env[i]) + 1;
2680 		memcpy(stringSpace, env[i], envSize);
2681 		*slot++ = stringSpace;
2682 		stringSpace += envSize;
2683 	}
2684 
2685 	*slot++ = NULL;
2686 
2687 	thread_id thread = load_image_etc(flatArgs, size, argCount, envCount,
2688 		B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED, -1, 0);
2689 
2690 	free(flatArgs);
2691 		// load_image_etc() unset our variable if it took over ownership
2692 
2693 	return thread;
2694 }
2695 
2696 
2697 status_t
2698 wait_for_team(team_id id, status_t *_returnCode)
2699 {
2700 	struct team *team;
2701 	thread_id thread;
2702 	cpu_status state;
2703 
2704 	// find main thread and wait for that
2705 
2706 	state = disable_interrupts();
2707 	GRAB_TEAM_LOCK();
2708 
2709 	team = team_get_team_struct_locked(id);
2710 	if (team != NULL && team->main_thread != NULL)
2711 		thread = team->main_thread->id;
2712 	else
2713 		thread = B_BAD_THREAD_ID;
2714 
2715 	RELEASE_TEAM_LOCK();
2716 	restore_interrupts(state);
2717 
2718 	if (thread < 0)
2719 		return thread;
2720 
2721 	return wait_for_thread(thread, _returnCode);
2722 }
2723 
2724 
2725 status_t
2726 kill_team(team_id id)
2727 {
2728 	status_t status = B_OK;
2729 	thread_id threadID = -1;
2730 	struct team *team;
2731 	cpu_status state;
2732 
2733 	state = disable_interrupts();
2734 	GRAB_TEAM_LOCK();
2735 
2736 	team = team_get_team_struct_locked(id);
2737 	if (team != NULL) {
2738 		if (team != sKernelTeam) {
2739 			threadID = team->id;
2740 				// the team ID is the same as the ID of its main thread
2741 		} else
2742 			status = B_NOT_ALLOWED;
2743 	} else
2744 		status = B_BAD_THREAD_ID;
2745 
2746 	RELEASE_TEAM_LOCK();
2747 	restore_interrupts(state);
2748 
2749 	if (status < B_OK)
2750 		return status;
2751 
2752 	// just kill the main thread in the team. The cleanup code there will
2753 	// take care of the team
2754 	return kill_thread(threadID);
2755 }
2756 
2757 
2758 status_t
2759 _get_team_info(team_id id, team_info *info, size_t size)
2760 {
2761 	cpu_status state;
2762 	status_t status = B_OK;
2763 	struct team *team;
2764 
2765 	state = disable_interrupts();
2766 	GRAB_TEAM_LOCK();
2767 
2768 	if (id == B_CURRENT_TEAM)
2769 		team = thread_get_current_thread()->team;
2770 	else
2771 		team = team_get_team_struct_locked(id);
2772 
2773 	if (team == NULL) {
2774 		status = B_BAD_TEAM_ID;
2775 		goto err;
2776 	}
2777 
2778 	status = fill_team_info(team, info, size);
2779 
2780 err:
2781 	RELEASE_TEAM_LOCK();
2782 	restore_interrupts(state);
2783 
2784 	return status;
2785 }
2786 
2787 
2788 status_t
2789 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2790 {
2791 	status_t status = B_BAD_TEAM_ID;
2792 	struct team *team = NULL;
2793 	int32 slot = *cookie;
2794 	team_id lastTeamID;
2795 	cpu_status state;
2796 
2797 	if (slot < 1)
2798 		slot = 1;
2799 
2800 	state = disable_interrupts();
2801 	GRAB_TEAM_LOCK();
2802 
2803 	lastTeamID = peek_next_thread_id();
2804 	if (slot >= lastTeamID)
2805 		goto err;
2806 
2807 	// get next valid team
2808 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2809 		slot++;
2810 
2811 	if (team) {
2812 		status = fill_team_info(team, info, size);
2813 		*cookie = ++slot;
2814 	}
2815 
2816 err:
2817 	RELEASE_TEAM_LOCK();
2818 	restore_interrupts(state);
2819 
2820 	return status;
2821 }
2822 
2823 
2824 status_t
2825 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2826 {
2827 	bigtime_t kernelTime = 0, userTime = 0;
2828 	status_t status = B_OK;
2829 	struct team *team;
2830 	cpu_status state;
2831 
2832 	if (size != sizeof(team_usage_info)
2833 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2834 		return B_BAD_VALUE;
2835 
2836 	state = disable_interrupts();
2837 	GRAB_TEAM_LOCK();
2838 
2839 	if (id == B_CURRENT_TEAM)
2840 		team = thread_get_current_thread()->team;
2841 	else
2842 		team = team_get_team_struct_locked(id);
2843 
2844 	if (team == NULL) {
2845 		status = B_BAD_TEAM_ID;
2846 		goto out;
2847 	}
2848 
2849 	switch (who) {
2850 		case B_TEAM_USAGE_SELF:
2851 		{
2852 			struct thread *thread = team->thread_list;
2853 
2854 			for (; thread != NULL; thread = thread->team_next) {
2855 				kernelTime += thread->kernel_time;
2856 				userTime += thread->user_time;
2857 			}
2858 
2859 			kernelTime += team->dead_threads_kernel_time;
2860 			userTime += team->dead_threads_user_time;
2861 			break;
2862 		}
2863 
2864 		case B_TEAM_USAGE_CHILDREN:
2865 		{
2866 			struct team *child = team->children;
2867 			for (; child != NULL; child = child->siblings_next) {
2868 				struct thread *thread = team->thread_list;
2869 
2870 				for (; thread != NULL; thread = thread->team_next) {
2871 					kernelTime += thread->kernel_time;
2872 					userTime += thread->user_time;
2873 				}
2874 
2875 				kernelTime += child->dead_threads_kernel_time;
2876 				userTime += child->dead_threads_user_time;
2877 			}
2878 
2879 			kernelTime += team->dead_children->kernel_time;
2880 			userTime += team->dead_children->user_time;
2881 			break;
2882 		}
2883 	}
2884 
2885 out:
2886 	RELEASE_TEAM_LOCK();
2887 	restore_interrupts(state);
2888 
2889 	if (status == B_OK) {
2890 		info->kernel_time = kernelTime;
2891 		info->user_time = userTime;
2892 	}
2893 
2894 	return status;
2895 }
2896 
2897 
2898 pid_t
2899 getpid(void)
2900 {
2901 	return thread_get_current_thread()->team->id;
2902 }
2903 
2904 
2905 pid_t
2906 getppid(void)
2907 {
2908 	struct team *team = thread_get_current_thread()->team;
2909 	cpu_status state;
2910 	pid_t parent;
2911 
2912 	state = disable_interrupts();
2913 	GRAB_TEAM_LOCK();
2914 
2915 	parent = team->parent->id;
2916 
2917 	RELEASE_TEAM_LOCK();
2918 	restore_interrupts(state);
2919 
2920 	return parent;
2921 }
2922 
2923 
2924 pid_t
2925 getpgid(pid_t process)
2926 {
2927 	struct thread *thread;
2928 	pid_t result = -1;
2929 	cpu_status state;
2930 
2931 	if (process == 0)
2932 		process = thread_get_current_thread()->team->id;
2933 
2934 	state = disable_interrupts();
2935 	GRAB_THREAD_LOCK();
2936 
2937 	thread = thread_get_thread_struct_locked(process);
2938 	if (thread != NULL)
2939 		result = thread->team->group_id;
2940 
2941 	RELEASE_THREAD_LOCK();
2942 	restore_interrupts(state);
2943 
2944 	return thread != NULL ? result : B_BAD_VALUE;
2945 }
2946 
2947 
2948 pid_t
2949 getsid(pid_t process)
2950 {
2951 	struct thread *thread;
2952 	pid_t result = -1;
2953 	cpu_status state;
2954 
2955 	if (process == 0)
2956 		process = thread_get_current_thread()->team->id;
2957 
2958 	state = disable_interrupts();
2959 	GRAB_THREAD_LOCK();
2960 
2961 	thread = thread_get_thread_struct_locked(process);
2962 	if (thread != NULL)
2963 		result = thread->team->session_id;
2964 
2965 	RELEASE_THREAD_LOCK();
2966 	restore_interrupts(state);
2967 
2968 	return thread != NULL ? result : B_BAD_VALUE;
2969 }
2970 
2971 
2972 //	#pragma mark - User syscalls
2973 
2974 
2975 status_t
2976 _user_exec(const char *userPath, const char* const* userFlatArgs,
2977 	size_t flatArgsSize, int32 argCount, int32 envCount)
2978 {
2979 	// NOTE: Since this function normally doesn't return, don't use automatic
2980 	// variables that need destruction in the function scope.
2981 	char path[B_PATH_NAME_LENGTH];
2982 
2983 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
2984 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2985 		return B_BAD_ADDRESS;
2986 
2987 	// copy and relocate the flat arguments
2988 	char** flatArgs;
2989 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
2990 		argCount, envCount, flatArgs);
2991 
2992 	if (error == B_OK) {
2993 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
2994 			envCount);
2995 			// this one only returns in case of error
2996 	}
2997 
2998 	free(flatArgs);
2999 	return error;
3000 }
3001 
3002 
3003 thread_id
3004 _user_fork(void)
3005 {
3006 	return fork_team();
3007 }
3008 
3009 
3010 thread_id
3011 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3012 {
3013 	status_t returnCode;
3014 	int32 reason;
3015 	thread_id deadChild;
3016 
3017 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3018 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3019 		return B_BAD_ADDRESS;
3020 
3021 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3022 
3023 	if (deadChild >= B_OK) {
3024 		// copy result data on successful completion
3025 		if ((_userReason != NULL
3026 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3027 			|| (_userReturnCode != NULL
3028 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3029 					< B_OK)) {
3030 			return B_BAD_ADDRESS;
3031 		}
3032 
3033 		return deadChild;
3034 	}
3035 
3036 	return syscall_restart_handle_post(deadChild);
3037 }
3038 
3039 
3040 pid_t
3041 _user_process_info(pid_t process, int32 which)
3042 {
3043 	// we only allow to return the parent of the current process
3044 	if (which == PARENT_ID
3045 		&& process != 0 && process != thread_get_current_thread()->team->id)
3046 		return B_BAD_VALUE;
3047 
3048 	switch (which) {
3049 		case SESSION_ID:
3050 			return getsid(process);
3051 		case GROUP_ID:
3052 			return getpgid(process);
3053 		case PARENT_ID:
3054 			return getppid();
3055 	}
3056 
3057 	return B_BAD_VALUE;
3058 }
3059 
3060 
3061 pid_t
3062 _user_setpgid(pid_t processID, pid_t groupID)
3063 {
3064 	struct thread *thread = thread_get_current_thread();
3065 	struct team *currentTeam = thread->team;
3066 	struct team *team;
3067 
3068 	if (groupID < 0)
3069 		return B_BAD_VALUE;
3070 
3071 	if (processID == 0)
3072 		processID = currentTeam->id;
3073 
3074 	// if the group ID is not specified, use the target process' ID
3075 	if (groupID == 0)
3076 		groupID = processID;
3077 
3078 	if (processID == currentTeam->id) {
3079 		// we set our own group
3080 
3081 		// we must not change our process group ID if we're a session leader
3082 		if (is_session_leader(currentTeam))
3083 			return B_NOT_ALLOWED;
3084 	} else {
3085 		// another team is the target of the call -- check it out
3086 		InterruptsSpinLocker _(gTeamSpinlock);
3087 
3088 		team = team_get_team_struct_locked(processID);
3089 		if (team == NULL)
3090 			return ESRCH;
3091 
3092 		// The team must be a child of the calling team and in the same session.
3093 		// (If that's the case it isn't a session leader either.)
3094 		if (team->parent != currentTeam
3095 			|| team->session_id != currentTeam->session_id) {
3096 			return B_NOT_ALLOWED;
3097 		}
3098 
3099 		if (team->group_id == groupID)
3100 			return groupID;
3101 
3102 		// The call is also supposed to fail on a child, when the child already
3103 		// has executed exec*() [EACCES].
3104 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3105 			return EACCES;
3106 	}
3107 
3108 	struct process_group *group = NULL;
3109 	if (groupID == processID) {
3110 		// A new process group might be needed.
3111 		group = create_process_group(groupID);
3112 		if (group == NULL)
3113 			return B_NO_MEMORY;
3114 
3115 		// Assume orphaned. We consider the situation of the team's parent
3116 		// below.
3117 		group->orphaned = true;
3118 	}
3119 
3120 	status_t status = B_OK;
3121 	struct process_group *freeGroup = NULL;
3122 
3123 	InterruptsSpinLocker locker(gTeamSpinlock);
3124 
3125 	team = team_get_team_struct_locked(processID);
3126 	if (team != NULL) {
3127 		// check the conditions again -- they might have changed in the meantime
3128 		if (is_session_leader(team)
3129 			|| team->session_id != currentTeam->session_id) {
3130 			status = B_NOT_ALLOWED;
3131 		} else if (team != currentTeam
3132 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3133 			status = EACCES;
3134 		} else if (team->group_id == groupID) {
3135 			// the team is already in the desired process group
3136 			freeGroup = group;
3137 		} else {
3138 			// Check if a process group with the requested ID already exists.
3139 			struct process_group *targetGroup
3140 				= team_get_process_group_locked(team->group->session, groupID);
3141 			if (targetGroup != NULL) {
3142 				// In case of processID == groupID we have to free the
3143 				// allocated group.
3144 				freeGroup = group;
3145 			} else if (processID == groupID) {
3146 				// We created a new process group, let us insert it into the
3147 				// team's session.
3148 				insert_group_into_session(team->group->session, group);
3149 				targetGroup = group;
3150 			}
3151 
3152 			if (targetGroup != NULL) {
3153 				// we got a group, let's move the team there
3154 				process_group* oldGroup = team->group;
3155 
3156 				remove_team_from_group(team);
3157 				insert_team_into_group(targetGroup, team);
3158 
3159 				// Update the "orphaned" flag of all potentially affected
3160 				// groups.
3161 
3162 				// the team's old group
3163 				if (oldGroup->teams != NULL) {
3164 					oldGroup->orphaned = false;
3165 					update_orphaned_process_group(oldGroup, -1);
3166 				}
3167 
3168 				// the team's new group
3169 				struct team* parent = team->parent;
3170 				targetGroup->orphaned &= parent == NULL
3171 					|| parent->group == targetGroup
3172 					|| team->parent->session_id != team->session_id;
3173 
3174 				// children's groups
3175 				struct team* child = team->children;
3176 				while (child != NULL) {
3177 					child->group->orphaned = false;
3178 					update_orphaned_process_group(child->group, -1);
3179 
3180 					child = child->siblings_next;
3181 				}
3182 			} else
3183 				status = B_NOT_ALLOWED;
3184 		}
3185 	} else
3186 		status = B_NOT_ALLOWED;
3187 
3188 	// Changing the process group might have changed the situation for a parent
3189 	// waiting in wait_for_child(). Hence we notify it.
3190 	if (status == B_OK)
3191 		team->parent->dead_children->condition_variable.NotifyAll(false);
3192 
3193 	locker.Unlock();
3194 
3195 	if (status != B_OK) {
3196 		// in case of error, the group hasn't been added into the hash
3197 		team_delete_process_group(group);
3198 	}
3199 
3200 	team_delete_process_group(freeGroup);
3201 
3202 	return status == B_OK ? groupID : status;
3203 }
3204 
3205 
3206 pid_t
3207 _user_setsid(void)
3208 {
3209 	struct team *team = thread_get_current_thread()->team;
3210 	struct process_session *session;
3211 	struct process_group *group;
3212 	cpu_status state;
3213 	bool failed = false;
3214 
3215 	// the team must not already be a process group leader
3216 	if (is_process_group_leader(team))
3217 		return B_NOT_ALLOWED;
3218 
3219 	group = create_process_group(team->id);
3220 	if (group == NULL)
3221 		return B_NO_MEMORY;
3222 
3223 	session = create_process_session(group->id);
3224 	if (session == NULL) {
3225 		team_delete_process_group(group);
3226 		return B_NO_MEMORY;
3227 	}
3228 
3229 	state = disable_interrupts();
3230 	GRAB_TEAM_LOCK();
3231 
3232 	// this may have changed since the check above
3233 	if (!is_process_group_leader(team)) {
3234 		remove_team_from_group(team);
3235 
3236 		insert_group_into_session(session, group);
3237 		insert_team_into_group(group, team);
3238 	} else
3239 		failed = true;
3240 
3241 	RELEASE_TEAM_LOCK();
3242 	restore_interrupts(state);
3243 
3244 	if (failed) {
3245 		team_delete_process_group(group);
3246 		free(session);
3247 		return B_NOT_ALLOWED;
3248 	}
3249 
3250 	return team->group_id;
3251 }
3252 
3253 
3254 status_t
3255 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3256 {
3257 	status_t returnCode;
3258 	status_t status;
3259 
3260 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3261 		return B_BAD_ADDRESS;
3262 
3263 	status = wait_for_team(id, &returnCode);
3264 	if (status >= B_OK && _userReturnCode != NULL) {
3265 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3266 			return B_BAD_ADDRESS;
3267 		return B_OK;
3268 	}
3269 
3270 	return syscall_restart_handle_post(status);
3271 }
3272 
3273 
3274 thread_id
3275 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3276 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3277 	port_id errorPort, uint32 errorToken)
3278 {
3279 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3280 
3281 	if (argCount < 1)
3282 		return B_BAD_VALUE;
3283 
3284 	// copy and relocate the flat arguments
3285 	char** flatArgs;
3286 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3287 		argCount, envCount, flatArgs);
3288 	if (error != B_OK)
3289 		return error;
3290 
3291 	thread_id thread = load_image_etc(flatArgs, _ALIGN(flatArgsSize), argCount,
3292 		envCount, priority, flags, errorPort, errorToken);
3293 
3294 	free(flatArgs);
3295 		// load_image_etc() unset our variable if it took over ownership
3296 
3297 	return thread;
3298 }
3299 
3300 
3301 void
3302 _user_exit_team(status_t returnValue)
3303 {
3304 	struct thread *thread = thread_get_current_thread();
3305 
3306 	thread->exit.status = returnValue;
3307 	thread->exit.reason = THREAD_RETURN_EXIT;
3308 
3309 	send_signal(thread->id, SIGKILL);
3310 }
3311 
3312 
3313 status_t
3314 _user_kill_team(team_id team)
3315 {
3316 	return kill_team(team);
3317 }
3318 
3319 
3320 status_t
3321 _user_get_team_info(team_id id, team_info *userInfo)
3322 {
3323 	status_t status;
3324 	team_info info;
3325 
3326 	if (!IS_USER_ADDRESS(userInfo))
3327 		return B_BAD_ADDRESS;
3328 
3329 	status = _get_team_info(id, &info, sizeof(team_info));
3330 	if (status == B_OK) {
3331 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3332 			return B_BAD_ADDRESS;
3333 	}
3334 
3335 	return status;
3336 }
3337 
3338 
3339 status_t
3340 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3341 {
3342 	status_t status;
3343 	team_info info;
3344 	int32 cookie;
3345 
3346 	if (!IS_USER_ADDRESS(userCookie)
3347 		|| !IS_USER_ADDRESS(userInfo)
3348 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3349 		return B_BAD_ADDRESS;
3350 
3351 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3352 	if (status != B_OK)
3353 		return status;
3354 
3355 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3356 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3357 		return B_BAD_ADDRESS;
3358 
3359 	return status;
3360 }
3361 
3362 
3363 team_id
3364 _user_get_current_team(void)
3365 {
3366 	return team_get_current_team_id();
3367 }
3368 
3369 
3370 status_t
3371 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3372 {
3373 	team_usage_info info;
3374 	status_t status;
3375 
3376 	if (!IS_USER_ADDRESS(userInfo))
3377 		return B_BAD_ADDRESS;
3378 
3379 	status = _get_team_usage_info(team, who, &info, size);
3380 	if (status != B_OK)
3381 		return status;
3382 
3383 	if (user_memcpy(userInfo, &info, size) < B_OK)
3384 		return B_BAD_ADDRESS;
3385 
3386 	return status;
3387 }
3388 
3389