xref: /haiku/src/system/kernel/team.cpp (revision 959ff00ddee8411dabb09211f3bfbd52d87229da)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*!	Team functions */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/wait.h>
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <FindDirectory.h>
21 
22 #include <boot_device.h>
23 #include <elf.h>
24 #include <file_cache.h>
25 #include <fs/KPath.h>
26 #include <heap.h>
27 #include <int.h>
28 #include <kernel.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <port.h>
33 #include <posix/realtime_sem.h>
34 #include <posix/xsi_semaphore.h>
35 #include <sem.h>
36 #include <syscall_process_info.h>
37 #include <syscall_restart.h>
38 #include <syscalls.h>
39 #include <team.h>
40 #include <tls.h>
41 #include <tracing.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <usergroup.h>
45 #include <vfs.h>
46 #include <vm.h>
47 #include <vm_address_space.h>
48 #include <util/AutoLock.h>
49 #include <util/khash.h>
50 
51 //#define TRACE_TEAM
52 #ifdef TRACE_TEAM
53 #	define TRACE(x) dprintf x
54 #else
55 #	define TRACE(x) ;
56 #endif
57 
58 
59 struct team_key {
60 	team_id id;
61 };
62 
63 struct team_arg {
64 	char	*path;
65 	char	**flat_args;
66 	size_t	flat_args_size;
67 	uint32	arg_count;
68 	uint32	env_count;
69 	port_id	error_port;
70 	uint32	error_token;
71 };
72 
73 struct fork_arg {
74 	area_id		user_stack_area;
75 	addr_t		user_stack_base;
76 	size_t		user_stack_size;
77 	addr_t		user_local_storage;
78 	sigset_t	sig_block_mask;
79 	struct user_thread* user_thread;
80 
81 	struct arch_fork_arg arch_info;
82 };
83 
84 
85 static hash_table *sTeamHash = NULL;
86 static hash_table *sGroupHash = NULL;
87 static struct team *sKernelTeam = NULL;
88 
89 // some arbitrary chosen limits - should probably depend on the available
90 // memory (the limit is not yet enforced)
91 static int32 sMaxTeams = 2048;
92 static int32 sUsedTeams = 1;
93 
94 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
95 
96 
97 // #pragma mark - Tracing
98 
99 
100 #if TEAM_TRACING
101 namespace TeamTracing {
102 
103 class TeamForked : public AbstractTraceEntry {
104 public:
105 	TeamForked(thread_id forkedThread)
106 		:
107 		fForkedThread(forkedThread)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("team forked, new thread %ld", fForkedThread);
115 	}
116 
117 private:
118 	thread_id			fForkedThread;
119 };
120 
121 
122 class ExecTeam : public AbstractTraceEntry {
123 public:
124 	ExecTeam(const char* path, int32 argCount, const char* const* args,
125 			int32 envCount, const char* const* env)
126 		:
127 		fArgCount(argCount),
128 		fArgs(NULL)
129 	{
130 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
131 			false);
132 
133 		// determine the buffer size we need for the args
134 		size_t argBufferSize = 0;
135 		for (int32 i = 0; i < argCount; i++)
136 			argBufferSize += strlen(args[i]) + 1;
137 
138 		// allocate a buffer
139 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
140 		if (fArgs) {
141 			char* buffer = fArgs;
142 			for (int32 i = 0; i < argCount; i++) {
143 				size_t argSize = strlen(args[i]) + 1;
144 				memcpy(buffer, args[i], argSize);
145 				buffer += argSize;
146 			}
147 		}
148 
149 		// ignore env for the time being
150 		(void)envCount;
151 		(void)env;
152 
153 		Initialized();
154 	}
155 
156 	virtual void AddDump(TraceOutput& out)
157 	{
158 		out.Print("team exec, \"%p\", args:", fPath);
159 
160 		if (fArgs != NULL) {
161 			char* args = fArgs;
162 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
163 				out.Print(" \"%s\"", args);
164 				args += strlen(args) + 1;
165 			}
166 		} else
167 			out.Print(" <too long>");
168 	}
169 
170 private:
171 	char*	fPath;
172 	int32	fArgCount;
173 	char*	fArgs;
174 };
175 
176 
177 static const char*
178 job_control_state_name(job_control_state state)
179 {
180 	switch (state) {
181 		case JOB_CONTROL_STATE_NONE:
182 			return "none";
183 		case JOB_CONTROL_STATE_STOPPED:
184 			return "stopped";
185 		case JOB_CONTROL_STATE_CONTINUED:
186 			return "continued";
187 		case JOB_CONTROL_STATE_DEAD:
188 			return "dead";
189 		default:
190 			return "invalid";
191 	}
192 }
193 
194 
195 class SetJobControlState : public AbstractTraceEntry {
196 public:
197 	SetJobControlState(team_id team, job_control_state newState, int signal)
198 		:
199 		fTeam(team),
200 		fNewState(newState),
201 		fSignal(signal)
202 	{
203 		Initialized();
204 	}
205 
206 	virtual void AddDump(TraceOutput& out)
207 	{
208 		out.Print("team set job control state, team %ld, "
209 			"new state: %s, signal: %d",
210 			fTeam, job_control_state_name(fNewState), fSignal);
211 	}
212 
213 private:
214 	team_id				fTeam;
215 	job_control_state	fNewState;
216 	int					fSignal;
217 };
218 
219 
220 class WaitForChild : public AbstractTraceEntry {
221 public:
222 	WaitForChild(pid_t child, uint32 flags)
223 		:
224 		fChild(child),
225 		fFlags(flags)
226 	{
227 		Initialized();
228 	}
229 
230 	virtual void AddDump(TraceOutput& out)
231 	{
232 		out.Print("team wait for child, child: %ld, "
233 			"flags: 0x%lx", fChild, fFlags);
234 	}
235 
236 private:
237 	pid_t	fChild;
238 	uint32	fFlags;
239 };
240 
241 
242 class WaitForChildDone : public AbstractTraceEntry {
243 public:
244 	WaitForChildDone(const job_control_entry& entry)
245 		:
246 		fState(entry.state),
247 		fTeam(entry.thread),
248 		fStatus(entry.status),
249 		fReason(entry.reason),
250 		fSignal(entry.signal)
251 	{
252 		Initialized();
253 	}
254 
255 	WaitForChildDone(status_t error)
256 		:
257 		fTeam(error)
258 	{
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		if (fTeam >= 0) {
265 			out.Print("team wait for child done, team: %ld, "
266 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
267 				fTeam, job_control_state_name(fState), fStatus, fReason,
268 				fSignal);
269 		} else {
270 			out.Print("team wait for child failed, error: "
271 				"0x%lx, ", fTeam);
272 		}
273 	}
274 
275 private:
276 	job_control_state	fState;
277 	team_id				fTeam;
278 	status_t			fStatus;
279 	uint16				fReason;
280 	uint16				fSignal;
281 };
282 
283 }	// namespace TeamTracing
284 
285 #	define T(x) new(std::nothrow) TeamTracing::x;
286 #else
287 #	define T(x) ;
288 #endif
289 
290 
291 
292 //	#pragma mark - Private functions
293 
294 
295 static void
296 _dump_team_info(struct team *team)
297 {
298 	kprintf("TEAM: %p\n", team);
299 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
300 	kprintf("name:        '%s'\n", team->name);
301 	kprintf("args:        '%s'\n", team->args);
302 	kprintf("next:        %p\n", team->next);
303 	kprintf("parent:      %p", team->parent);
304 	if (team->parent != NULL) {
305 		kprintf(" (id = %ld)\n", team->parent->id);
306 	} else
307 		kprintf("\n");
308 
309 	kprintf("children:    %p\n", team->children);
310 	kprintf("num_threads: %d\n", team->num_threads);
311 	kprintf("state:       %d\n", team->state);
312 	kprintf("flags:       0x%lx\n", team->flags);
313 	kprintf("io_context:  %p\n", team->io_context);
314 	if (team->address_space)
315 		kprintf("address_space: %p\n", team->address_space);
316 	kprintf("main_thread: %p\n", team->main_thread);
317 	kprintf("thread_list: %p\n", team->thread_list);
318 	kprintf("group_id:    %ld\n", team->group_id);
319 	kprintf("session_id:  %ld\n", team->session_id);
320 }
321 
322 
323 static int
324 dump_team_info(int argc, char **argv)
325 {
326 	struct hash_iterator iterator;
327 	struct team *team;
328 	team_id id = -1;
329 	bool found = false;
330 
331 	if (argc < 2) {
332 		struct thread* thread = thread_get_current_thread();
333 		if (thread != NULL && thread->team != NULL)
334 			_dump_team_info(thread->team);
335 		else
336 			kprintf("No current team!\n");
337 		return 0;
338 	}
339 
340 	id = strtoul(argv[1], NULL, 0);
341 	if (IS_KERNEL_ADDRESS(id)) {
342 		// semi-hack
343 		_dump_team_info((struct team *)id);
344 		return 0;
345 	}
346 
347 	// walk through the thread list, trying to match name or id
348 	hash_open(sTeamHash, &iterator);
349 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
350 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
351 			_dump_team_info(team);
352 			found = true;
353 			break;
354 		}
355 	}
356 	hash_close(sTeamHash, &iterator, false);
357 
358 	if (!found)
359 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
360 	return 0;
361 }
362 
363 
364 static int
365 dump_teams(int argc, char **argv)
366 {
367 	struct hash_iterator iterator;
368 	struct team *team;
369 
370 	kprintf("team           id  parent      name\n");
371 	hash_open(sTeamHash, &iterator);
372 
373 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
374 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
375 	}
376 
377 	hash_close(sTeamHash, &iterator, false);
378 	return 0;
379 }
380 
381 
382 static int
383 team_struct_compare(void *_p, const void *_key)
384 {
385 	struct team *p = (struct team*)_p;
386 	const struct team_key *key = (const struct team_key*)_key;
387 
388 	if (p->id == key->id)
389 		return 0;
390 
391 	return 1;
392 }
393 
394 
395 static uint32
396 team_struct_hash(void *_p, const void *_key, uint32 range)
397 {
398 	struct team *p = (struct team*)_p;
399 	const struct team_key *key = (const struct team_key*)_key;
400 
401 	if (p != NULL)
402 		return p->id % range;
403 
404 	return (uint32)key->id % range;
405 }
406 
407 
408 static int
409 process_group_compare(void *_group, const void *_key)
410 {
411 	struct process_group *group = (struct process_group*)_group;
412 	const struct team_key *key = (const struct team_key*)_key;
413 
414 	if (group->id == key->id)
415 		return 0;
416 
417 	return 1;
418 }
419 
420 
421 static uint32
422 process_group_hash(void *_group, const void *_key, uint32 range)
423 {
424 	struct process_group *group = (struct process_group*)_group;
425 	const struct team_key *key = (const struct team_key*)_key;
426 
427 	if (group != NULL)
428 		return group->id % range;
429 
430 	return (uint32)key->id % range;
431 }
432 
433 
434 static void
435 insert_team_into_parent(struct team *parent, struct team *team)
436 {
437 	ASSERT(parent != NULL);
438 
439 	team->siblings_next = parent->children;
440 	parent->children = team;
441 	team->parent = parent;
442 }
443 
444 
445 /*!	Note: must have team lock held */
446 static void
447 remove_team_from_parent(struct team *parent, struct team *team)
448 {
449 	struct team *child, *last = NULL;
450 
451 	for (child = parent->children; child != NULL; child = child->siblings_next) {
452 		if (child == team) {
453 			if (last == NULL)
454 				parent->children = child->siblings_next;
455 			else
456 				last->siblings_next = child->siblings_next;
457 
458 			team->parent = NULL;
459 			break;
460 		}
461 		last = child;
462 	}
463 }
464 
465 
466 /*!	Reparent each of our children
467 	Note: must have team lock held
468 */
469 static void
470 reparent_children(struct team *team)
471 {
472 	struct team *child;
473 
474 	while ((child = team->children) != NULL) {
475 		// remove the child from the current proc and add to the parent
476 		remove_team_from_parent(team, child);
477 		insert_team_into_parent(sKernelTeam, child);
478 	}
479 
480 	// move job control entries too
481 	sKernelTeam->stopped_children->entries.MoveFrom(
482 		&team->stopped_children->entries);
483 	sKernelTeam->continued_children->entries.MoveFrom(
484 		&team->continued_children->entries);
485 
486 	// Note, we don't move the dead children entries. Those will be deleted
487 	// when the team structure is deleted.
488 }
489 
490 
491 static bool
492 is_session_leader(struct team *team)
493 {
494 	return team->session_id == team->id;
495 }
496 
497 
498 static bool
499 is_process_group_leader(struct team *team)
500 {
501 	return team->group_id == team->id;
502 }
503 
504 
505 static void
506 deferred_delete_process_group(struct process_group *group)
507 {
508 	if (group == NULL)
509 		return;
510 
511 	// remove_group_from_session() keeps this pointer around
512 	// only if the session can be freed as well
513 	if (group->session) {
514 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
515 			group->session->id));
516 		deferred_free(group->session);
517 	}
518 
519 	deferred_free(group);
520 }
521 
522 
523 /*!	Removes a group from a session, and puts the session object
524 	back into the session cache, if it's not used anymore.
525 	You must hold the team lock when calling this function.
526 */
527 static void
528 remove_group_from_session(struct process_group *group)
529 {
530 	struct process_session *session = group->session;
531 
532 	// the group must be in any session to let this function have any effect
533 	if (session == NULL)
534 		return;
535 
536 	hash_remove(sGroupHash, group);
537 
538 	// we cannot free the resource here, so we're keeping the group link
539 	// around - this way it'll be freed by free_process_group()
540 	if (--session->group_count > 0)
541 		group->session = NULL;
542 }
543 
544 
545 /*!	Team lock must be held.
546 */
547 static void
548 acquire_process_group_ref(pid_t groupID)
549 {
550 	process_group* group = team_get_process_group_locked(NULL, groupID);
551 	if (group == NULL) {
552 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
553 		return;
554 	}
555 
556 	group->refs++;
557 }
558 
559 
560 /*!	Team lock must be held.
561 */
562 static void
563 release_process_group_ref(pid_t groupID)
564 {
565 	process_group* group = team_get_process_group_locked(NULL, groupID);
566 	if (group == NULL) {
567 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
568 		return;
569 	}
570 
571 	if (group->refs <= 0) {
572 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
573 		return;
574 	}
575 
576 	if (--group->refs > 0)
577 		return;
578 
579 	// group is no longer used
580 
581 	remove_group_from_session(group);
582 	deferred_delete_process_group(group);
583 }
584 
585 
586 /*!	You must hold the team lock when calling this function. */
587 static void
588 insert_group_into_session(struct process_session *session, struct process_group *group)
589 {
590 	if (group == NULL)
591 		return;
592 
593 	group->session = session;
594 	hash_insert(sGroupHash, group);
595 	session->group_count++;
596 }
597 
598 
599 /*!	You must hold the team lock when calling this function. */
600 static void
601 insert_team_into_group(struct process_group *group, struct team *team)
602 {
603 	team->group = group;
604 	team->group_id = group->id;
605 	team->session_id = group->session->id;
606 
607 	team->group_next = group->teams;
608 	group->teams = team;
609 	acquire_process_group_ref(group->id);
610 }
611 
612 
613 /*!	Removes the team from the group.
614 
615 	\param team the team that'll be removed from it's group
616 */
617 static void
618 remove_team_from_group(struct team *team)
619 {
620 	struct process_group *group = team->group;
621 	struct team *current, *last = NULL;
622 
623 	// the team must be in any team to let this function have any effect
624 	if  (group == NULL)
625 		return;
626 
627 	for (current = group->teams; current != NULL; current = current->group_next) {
628 		if (current == team) {
629 			if (last == NULL)
630 				group->teams = current->group_next;
631 			else
632 				last->group_next = current->group_next;
633 
634 			team->group = NULL;
635 			break;
636 		}
637 		last = current;
638 	}
639 
640 	team->group = NULL;
641 	team->group_next = NULL;
642 
643 	release_process_group_ref(group->id);
644 }
645 
646 
647 static struct process_group *
648 create_process_group(pid_t id)
649 {
650 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
651 	if (group == NULL)
652 		return NULL;
653 
654 	group->id = id;
655 	group->refs = 0;
656 	group->session = NULL;
657 	group->teams = NULL;
658 	group->orphaned = true;
659 	return group;
660 }
661 
662 
663 static struct process_session *
664 create_process_session(pid_t id)
665 {
666 	struct process_session *session
667 		= (struct process_session *)malloc(sizeof(struct process_session));
668 	if (session == NULL)
669 		return NULL;
670 
671 	session->id = id;
672 	session->group_count = 0;
673 	session->controlling_tty = -1;
674 	session->foreground_group = -1;
675 
676 	return session;
677 }
678 
679 
680 static void
681 set_team_name(struct team* team, const char* name)
682 {
683 	if (const char* lastSlash = strrchr(name, '/'))
684 		name = lastSlash + 1;
685 
686 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
687 }
688 
689 
690 static struct team *
691 create_team_struct(const char *name, bool kernel)
692 {
693 	struct team *team = (struct team *)malloc(sizeof(struct team));
694 	if (team == NULL)
695 		return NULL;
696 	MemoryDeleter teamDeleter(team);
697 
698 	team->next = team->siblings_next = team->children = team->parent = NULL;
699 	team->id = allocate_thread_id();
700 	set_team_name(team, name);
701 	team->args[0] = '\0';
702 	team->num_threads = 0;
703 	team->io_context = NULL;
704 	team->address_space = NULL;
705 	team->realtime_sem_context = NULL;
706 	team->xsi_sem_context = NULL;
707 	team->thread_list = NULL;
708 	team->main_thread = NULL;
709 	team->loading_info = NULL;
710 	team->state = TEAM_STATE_BIRTH;
711 	team->flags = 0;
712 	team->death_sem = -1;
713 	team->user_data_area = -1;
714 	team->user_data = 0;
715 	team->used_user_data = 0;
716 	team->user_data_size = 0;
717 	team->free_user_threads = NULL;
718 
719 	team->supplementary_groups = NULL;
720 	team->supplementary_group_count = 0;
721 
722 	team->dead_threads_kernel_time = 0;
723 	team->dead_threads_user_time = 0;
724 
725 	// dead threads
726 	list_init(&team->dead_threads);
727 	team->dead_threads_count = 0;
728 
729 	// dead children
730 	team->dead_children = new(nothrow) team_dead_children;
731 	if (team->dead_children == NULL)
732 		return NULL;
733 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
734 
735 	team->dead_children->count = 0;
736 	team->dead_children->kernel_time = 0;
737 	team->dead_children->user_time = 0;
738 
739 	// stopped children
740 	team->stopped_children = new(nothrow) team_job_control_children;
741 	if (team->stopped_children == NULL)
742 		return NULL;
743 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
744 		team->stopped_children);
745 
746 	// continued children
747 	team->continued_children = new(nothrow) team_job_control_children;
748 	if (team->continued_children == NULL)
749 		return NULL;
750 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
751 		team->continued_children);
752 
753 	// job control entry
754 	team->job_control_entry = new(nothrow) job_control_entry;
755 	if (team->job_control_entry == NULL)
756 		return NULL;
757 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
758 		team->job_control_entry);
759 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
760 	team->job_control_entry->thread = team->id;
761 	team->job_control_entry->team = team;
762 
763 	list_init(&team->image_list);
764 	list_init(&team->watcher_list);
765 
766 	clear_team_debug_info(&team->debug_info, true);
767 
768 	if (arch_team_init_team_struct(team, kernel) < 0)
769 		return NULL;
770 
771 	// publish dead/stopped/continued children condition vars
772 	team->dead_children->condition_variable.Init(team->dead_children,
773 		"team children");
774 
775 	// keep all allocated structures
776 	jobControlEntryDeleter.Detach();
777 	continuedChildrenDeleter.Detach();
778 	stoppedChildrenDeleter.Detach();
779 	deadChildrenDeleter.Detach();
780 	teamDeleter.Detach();
781 
782 	return team;
783 }
784 
785 
786 static void
787 delete_team_struct(struct team *team)
788 {
789 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
790 			&team->dead_threads)) {
791 		free(threadDeathEntry);
792 	}
793 
794 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
795 		delete entry;
796 
797 	while (free_user_thread* entry = team->free_user_threads) {
798 		team->free_user_threads = entry->next;
799 		free(entry);
800 	}
801 
802 	malloc_referenced_release(team->supplementary_groups);
803 
804 	delete team->job_control_entry;
805 		// usually already NULL and transferred to the parent
806 	delete team->continued_children;
807 	delete team->stopped_children;
808 	delete team->dead_children;
809 	free(team);
810 }
811 
812 
813 static status_t
814 create_team_user_data(struct team* team)
815 {
816 	void* address = (void*)KERNEL_USER_DATA_BASE;
817 	size_t size = 4 * B_PAGE_SIZE;
818 	team->user_data_area = create_area_etc(team->id, "user area", &address,
819 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0);
820 	if (team->user_data_area < 0)
821 		return team->user_data_area;
822 
823 	team->user_data = (addr_t)address;
824 	team->used_user_data = 0;
825 	team->user_data_size = size;
826 	team->free_user_threads = NULL;
827 
828 	return B_OK;
829 }
830 
831 
832 static void
833 delete_team_user_data(struct team* team)
834 {
835 	if (team->user_data_area >= 0) {
836 		vm_delete_area(team->id, team->user_data_area, true);
837 		team->user_data = 0;
838 		team->used_user_data = 0;
839 		team->user_data_size = 0;
840 		team->user_data_area = -1;
841 		while (free_user_thread* entry = team->free_user_threads) {
842 			team->free_user_threads = entry->next;
843 			free(entry);
844 		}
845 	}
846 }
847 
848 
849 static status_t
850 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
851 	int32 argCount, int32 envCount, char**& _flatArgs)
852 {
853 	if (argCount < 0 || envCount < 0)
854 		return B_BAD_VALUE;
855 
856 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
857 		return B_TOO_MANY_ARGS;
858 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
859 		return B_BAD_VALUE;
860 
861 	if (!IS_USER_ADDRESS(userFlatArgs))
862 		return B_BAD_ADDRESS;
863 
864 	// allocate kernel memory
865 	char** flatArgs = (char**)malloc(flatArgsSize);
866 	if (flatArgs == NULL)
867 		return B_NO_MEMORY;
868 
869 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
870 		free(flatArgs);
871 		return B_BAD_ADDRESS;
872 	}
873 
874 	// check and relocate the array
875 	status_t error = B_OK;
876 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
877 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
878 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
879 		if (i == argCount || i == argCount + envCount + 1) {
880 			// check array null termination
881 			if (flatArgs[i] != NULL) {
882 				error = B_BAD_VALUE;
883 				break;
884 			}
885 		} else {
886 			// check string
887 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
888 			size_t maxLen = stringEnd - arg;
889 			if (arg < stringBase || arg >= stringEnd
890 					|| strnlen(arg, maxLen) == maxLen) {
891 				error = B_BAD_VALUE;
892 				break;
893 			}
894 
895 			flatArgs[i] = arg;
896 		}
897 	}
898 
899 	if (error == B_OK)
900 		_flatArgs = flatArgs;
901 	else
902 		free(flatArgs);
903 
904 	return error;
905 }
906 
907 
908 static void
909 free_team_arg(struct team_arg *teamArg)
910 {
911 	if (teamArg != NULL) {
912 		free(teamArg->flat_args);
913 		free(teamArg->path);
914 		free(teamArg);
915 	}
916 }
917 
918 
919 static status_t
920 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
921 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
922 	uint32 token)
923 {
924 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
925 	if (teamArg == NULL)
926 		return B_NO_MEMORY;
927 
928 	teamArg->path = strdup(path);
929 	if (teamArg->path == NULL) {
930 		free(teamArg);
931 		return B_NO_MEMORY;
932 	}
933 
934 	// copy the args over
935 
936 	teamArg->flat_args = flatArgs;
937 	teamArg->flat_args_size = flatArgsSize;
938 	teamArg->arg_count = argCount;
939 	teamArg->env_count = envCount;
940 	teamArg->error_port = port;
941 	teamArg->error_token = token;
942 
943 	*_teamArg = teamArg;
944 	return B_OK;
945 }
946 
947 
948 static int32
949 team_create_thread_start(void *args)
950 {
951 	status_t err;
952 	struct thread *t;
953 	struct team *team;
954 	struct team_arg *teamArgs = (struct team_arg*)args;
955 	const char *path;
956 	addr_t entry;
957 	char ustack_name[128];
958 	uint32 sizeLeft;
959 	char **userArgs;
960 	char **userEnv;
961 	struct user_space_program_args *programArgs;
962 	uint32 argCount, envCount, i;
963 
964 	t = thread_get_current_thread();
965 	team = t->team;
966 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
967 
968 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
969 
970 	// get a user thread for the main thread
971 	t->user_thread = team_allocate_user_thread(team);
972 
973 	// create an initial primary stack area
974 
975 	// Main stack area layout is currently as follows (starting from 0):
976 	//
977 	// size								| usage
978 	// ---------------------------------+--------------------------------
979 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
980 	// TLS_SIZE							| TLS data
981 	// sizeof(user_space_program_args)	| argument structure for the runtime
982 	//									| loader
983 	// flat arguments size				| flat process arguments and environment
984 
985 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
986 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
987 
988 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
989 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
990 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
991 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
992 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
993 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
994 		// the exact location at the end of the user stack area
995 
996 	sprintf(ustack_name, "%s_main_stack", team->name);
997 	t->user_stack_area = create_area_etc(team->id, ustack_name,
998 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
999 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
1000 	if (t->user_stack_area < 0) {
1001 		dprintf("team_create_thread_start: could not create default user stack region\n");
1002 
1003 		free_team_arg(teamArgs);
1004 		return t->user_stack_area;
1005 	}
1006 
1007 	// now that the TLS area is allocated, initialize TLS
1008 	arch_thread_init_tls(t);
1009 
1010 	argCount = teamArgs->arg_count;
1011 	envCount = teamArgs->env_count;
1012 
1013 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1014 		+ t->user_stack_size + TLS_SIZE);
1015 
1016 	userArgs = (char**)(programArgs + 1);
1017 	userEnv = userArgs + argCount + 1;
1018 	path = teamArgs->path;
1019 
1020 	if (user_strlcpy(programArgs->program_path, path,
1021 				sizeof(programArgs->program_path)) < B_OK
1022 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1023 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1024 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1025 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1026 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1027 				sizeof(port_id)) < B_OK
1028 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1029 				sizeof(uint32)) < B_OK
1030 		|| user_memcpy(userArgs, teamArgs->flat_args,
1031 				teamArgs->flat_args_size) < B_OK) {
1032 		// the team deletion process will clean this mess
1033 		return B_BAD_ADDRESS;
1034 	}
1035 
1036 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1037 
1038 	// add args to info member
1039 	team->args[0] = 0;
1040 	strlcpy(team->args, path, sizeof(team->args));
1041 	for (i = 1; i < argCount; i++) {
1042 		strlcat(team->args, " ", sizeof(team->args));
1043 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1044 	}
1045 
1046 	free_team_arg(teamArgs);
1047 		// the arguments are already on the user stack, we no longer need
1048 		// them in this form
1049 
1050 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1051 	// automatic variables with function scope will never be destroyed.
1052 	{
1053 		// find runtime_loader path
1054 		KPath runtimeLoaderPath;
1055 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1056 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1057 		if (err < B_OK) {
1058 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1059 				strerror(err)));
1060 			return err;
1061 		}
1062 		runtimeLoaderPath.UnlockBuffer();
1063 		err = runtimeLoaderPath.Append("runtime_loader");
1064 
1065 		if (err == B_OK)
1066 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1067 	}
1068 
1069 	if (err < B_OK) {
1070 		// Luckily, we don't have to clean up the mess we created - that's
1071 		// done for us by the normal team deletion process
1072 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1073 			"%s\n", strerror(err)));
1074 		return err;
1075 	}
1076 
1077 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1078 
1079 	team->state = TEAM_STATE_NORMAL;
1080 
1081 	// jump to the entry point in user space
1082 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1083 		// only returns in case of error
1084 }
1085 
1086 
1087 /*!	The BeOS kernel exports a function with this name, but most probably with
1088 	different parameters; we should not make it public.
1089 */
1090 static thread_id
1091 load_image_etc(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1092 	int32 envCount, int32 priority, uint32 flags, port_id errorPort,
1093 	uint32 errorToken)
1094 {
1095 	char** flatArgs = _flatArgs;
1096 	struct team *team, *parent;
1097 	const char *threadName;
1098 	thread_id thread;
1099 	status_t status;
1100 	cpu_status state;
1101 	struct team_arg *teamArgs;
1102 	struct team_loading_info loadingInfo;
1103 
1104 	if (flatArgs == NULL || argCount == 0)
1105 		return B_BAD_VALUE;
1106 
1107 	const char* path = flatArgs[0];
1108 
1109 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1110 		path, flatArgs, argCount));
1111 
1112 	team = create_team_struct(path, false);
1113 	if (team == NULL)
1114 		return B_NO_MEMORY;
1115 
1116 	parent = thread_get_current_thread()->team;
1117 
1118 	if (flags & B_WAIT_TILL_LOADED) {
1119 		loadingInfo.thread = thread_get_current_thread();
1120 		loadingInfo.result = B_ERROR;
1121 		loadingInfo.done = false;
1122 		team->loading_info = &loadingInfo;
1123 	}
1124 
1125 	// Inherit the parent's user/group, but also check the executable's
1126 	// set-user/group-id permission
1127 	inherit_parent_user_and_group(team, parent);
1128 	update_set_id_user_and_group(team, path);
1129 
1130 	state = disable_interrupts();
1131 	GRAB_TEAM_LOCK();
1132 
1133 	hash_insert(sTeamHash, team);
1134 	insert_team_into_parent(parent, team);
1135 	insert_team_into_group(parent->group, team);
1136 	sUsedTeams++;
1137 
1138 	RELEASE_TEAM_LOCK();
1139 	restore_interrupts(state);
1140 
1141 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1142 		envCount, errorPort, errorToken);
1143 
1144 	if (status != B_OK)
1145 		goto err1;
1146 
1147 	_flatArgs = NULL;
1148 		// args are owned by the team_arg structure now
1149 
1150 	// create a new io_context for this team
1151 	team->io_context = vfs_new_io_context(parent->io_context);
1152 	if (!team->io_context) {
1153 		status = B_NO_MEMORY;
1154 		goto err2;
1155 	}
1156 
1157 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1158 	vfs_exec_io_context(team->io_context);
1159 
1160 	// create an address space for this team
1161 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1162 		&team->address_space);
1163 	if (status < B_OK)
1164 		goto err3;
1165 
1166 	// cut the path from the main thread name
1167 	threadName = strrchr(path, '/');
1168 	if (threadName != NULL)
1169 		threadName++;
1170 	else
1171 		threadName = path;
1172 
1173 	// create the user data area
1174 	status = create_team_user_data(team);
1175 	if (status != B_OK)
1176 		goto err4;
1177 
1178 	// Create a kernel thread, but under the context of the new team
1179 	// The new thread will take over ownership of teamArgs
1180 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1181 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1182 	if (thread < 0) {
1183 		status = thread;
1184 		goto err5;
1185 	}
1186 
1187 	// wait for the loader of the new team to finish its work
1188 	if (flags & B_WAIT_TILL_LOADED) {
1189 		struct thread *mainThread;
1190 
1191 		state = disable_interrupts();
1192 		GRAB_THREAD_LOCK();
1193 
1194 		mainThread = thread_get_thread_struct_locked(thread);
1195 		if (mainThread) {
1196 			// resume the team's main thread
1197 			if (mainThread->state == B_THREAD_SUSPENDED)
1198 				scheduler_enqueue_in_run_queue(mainThread);
1199 
1200 			// Now suspend ourselves until loading is finished.
1201 			// We will be woken either by the thread, when it finished or
1202 			// aborted loading, or when the team is going to die (e.g. is
1203 			// killed). In either case the one setting `loadingInfo.done' is
1204 			// responsible for removing the info from the team structure.
1205 			while (!loadingInfo.done) {
1206 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1207 				scheduler_reschedule();
1208 			}
1209 		} else {
1210 			// Impressive! Someone managed to kill the thread in this short
1211 			// time.
1212 		}
1213 
1214 		RELEASE_THREAD_LOCK();
1215 		restore_interrupts(state);
1216 
1217 		if (loadingInfo.result < B_OK)
1218 			return loadingInfo.result;
1219 	}
1220 
1221 	// notify the debugger
1222 	user_debug_team_created(team->id);
1223 
1224 	return thread;
1225 
1226 err5:
1227 	delete_team_user_data(team);
1228 err4:
1229 	vm_put_address_space(team->address_space);
1230 err3:
1231 	vfs_free_io_context(team->io_context);
1232 err2:
1233 	free_team_arg(teamArgs);
1234 err1:
1235 	// remove the team structure from the team hash table and delete the team structure
1236 	state = disable_interrupts();
1237 	GRAB_TEAM_LOCK();
1238 
1239 	remove_team_from_group(team);
1240 	remove_team_from_parent(parent, team);
1241 	hash_remove(sTeamHash, team);
1242 
1243 	RELEASE_TEAM_LOCK();
1244 	restore_interrupts(state);
1245 
1246 	delete_team_struct(team);
1247 
1248 	return status;
1249 }
1250 
1251 
1252 /*!	Almost shuts down the current team and loads a new image into it.
1253 	If successful, this function does not return and will takeover ownership of
1254 	the arguments provided.
1255 	This function may only be called from user space.
1256 */
1257 static status_t
1258 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1259 	int32 argCount, int32 envCount)
1260 {
1261 	// NOTE: Since this function normally doesn't return, don't use automatic
1262 	// variables that need destruction in the function scope.
1263 	char** flatArgs = _flatArgs;
1264 	struct team *team = thread_get_current_thread()->team;
1265 	struct team_arg *teamArgs;
1266 	const char *threadName;
1267 	status_t status = B_OK;
1268 	cpu_status state;
1269 	struct thread *thread;
1270 	thread_id nubThreadID = -1;
1271 
1272 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1273 		path, argCount, envCount, team->id));
1274 
1275 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1276 
1277 	// switching the kernel at run time is probably not a good idea :)
1278 	if (team == team_get_kernel_team())
1279 		return B_NOT_ALLOWED;
1280 
1281 	// we currently need to be single threaded here
1282 	// ToDo: maybe we should just kill all other threads and
1283 	//	make the current thread the team's main thread?
1284 	if (team->main_thread != thread_get_current_thread())
1285 		return B_NOT_ALLOWED;
1286 
1287 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1288 	// We iterate through the thread list to make sure that there's no other
1289 	// thread.
1290 	state = disable_interrupts();
1291 	GRAB_TEAM_LOCK();
1292 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1293 
1294 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1295 		nubThreadID = team->debug_info.nub_thread;
1296 
1297 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1298 
1299 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1300 		if (thread != team->main_thread && thread->id != nubThreadID) {
1301 			status = B_NOT_ALLOWED;
1302 			break;
1303 		}
1304 	}
1305 
1306 	RELEASE_TEAM_LOCK();
1307 	restore_interrupts(state);
1308 
1309 	if (status != B_OK)
1310 		return status;
1311 
1312 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1313 		envCount, -1, 0);
1314 
1315 	if (status != B_OK)
1316 		return status;
1317 
1318 	_flatArgs = NULL;
1319 		// args are owned by the team_arg structure now
1320 
1321 	// ToDo: remove team resources if there are any left
1322 	// thread_atkernel_exit() might not be called at all
1323 
1324 	thread_reset_for_exec();
1325 
1326 	user_debug_prepare_for_exec();
1327 
1328 	delete_team_user_data(team);
1329 	vm_delete_areas(team->address_space);
1330 	xsi_sem_undo(team);
1331 	delete_owned_ports(team->id);
1332 	sem_delete_owned_sems(team->id);
1333 	remove_images(team);
1334 	vfs_exec_io_context(team->io_context);
1335 	delete_realtime_sem_context(team->realtime_sem_context);
1336 	team->realtime_sem_context = NULL;
1337 
1338 	status = create_team_user_data(team);
1339 	if (status != B_OK) {
1340 		// creating the user data failed -- we're toast
1341 		// TODO: We should better keep the old user area in the first place.
1342 		exit_thread(status);
1343 		return status;
1344 	}
1345 
1346 	user_debug_finish_after_exec();
1347 
1348 	// rename the team
1349 
1350 	set_team_name(team, path);
1351 
1352 	// cut the path from the team name and rename the main thread, too
1353 	threadName = strrchr(path, '/');
1354 	if (threadName != NULL)
1355 		threadName++;
1356 	else
1357 		threadName = path;
1358 	rename_thread(thread_get_current_thread_id(), threadName);
1359 
1360 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1361 
1362 	// Update user/group according to the executable's set-user/group-id
1363 	// permission.
1364 	update_set_id_user_and_group(team, path);
1365 
1366 	status = team_create_thread_start(teamArgs);
1367 		// this one usually doesn't return...
1368 
1369 	// sorry, we have to kill us, there is no way out anymore
1370 	// (without any areas left and all that)
1371 	exit_thread(status);
1372 
1373 	// we return a status here since the signal that is sent by the
1374 	// call above is not immediately handled
1375 	return B_ERROR;
1376 }
1377 
1378 
1379 /*! This is the first function to be called from the newly created
1380 	main child thread.
1381 	It will fill in everything what's left to do from fork_arg, and
1382 	return from the parent's fork() syscall to the child.
1383 */
1384 static int32
1385 fork_team_thread_start(void *_args)
1386 {
1387 	struct thread *thread = thread_get_current_thread();
1388 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1389 
1390 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1391 		// we need a local copy of the arch dependent part
1392 
1393 	thread->user_stack_area = forkArgs->user_stack_area;
1394 	thread->user_stack_base = forkArgs->user_stack_base;
1395 	thread->user_stack_size = forkArgs->user_stack_size;
1396 	thread->user_local_storage = forkArgs->user_local_storage;
1397 	thread->sig_block_mask = forkArgs->sig_block_mask;
1398 	thread->user_thread = forkArgs->user_thread;
1399 
1400 	arch_thread_init_tls(thread);
1401 
1402 	free(forkArgs);
1403 
1404 	// set frame of the parent thread to this one, too
1405 
1406 	arch_restore_fork_frame(&archArgs);
1407 		// This one won't return here
1408 
1409 	return 0;
1410 }
1411 
1412 
1413 static thread_id
1414 fork_team(void)
1415 {
1416 	struct thread *parentThread = thread_get_current_thread();
1417 	struct team *parentTeam = parentThread->team, *team;
1418 	struct fork_arg *forkArgs;
1419 	struct area_info info;
1420 	thread_id threadID;
1421 	cpu_status state;
1422 	status_t status;
1423 	int32 cookie;
1424 
1425 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1426 
1427 	if (parentTeam == team_get_kernel_team())
1428 		return B_NOT_ALLOWED;
1429 
1430 	// create a new team
1431 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1432 
1433 	team = create_team_struct(parentTeam->name, false);
1434 	if (team == NULL)
1435 		return B_NO_MEMORY;
1436 
1437 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1438 
1439 	// Inherit the parent's user/group.
1440 	inherit_parent_user_and_group(team, parentTeam);
1441 
1442 	state = disable_interrupts();
1443 	GRAB_TEAM_LOCK();
1444 
1445 	hash_insert(sTeamHash, team);
1446 	insert_team_into_parent(parentTeam, team);
1447 	insert_team_into_group(parentTeam->group, team);
1448 	sUsedTeams++;
1449 
1450 	RELEASE_TEAM_LOCK();
1451 	restore_interrupts(state);
1452 
1453 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1454 	if (forkArgs == NULL) {
1455 		status = B_NO_MEMORY;
1456 		goto err1;
1457 	}
1458 
1459 	// create a new io_context for this team
1460 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1461 	if (!team->io_context) {
1462 		status = B_NO_MEMORY;
1463 		goto err2;
1464 	}
1465 
1466 	// duplicate the realtime sem context
1467 	if (parentTeam->realtime_sem_context) {
1468 		team->realtime_sem_context = clone_realtime_sem_context(
1469 			parentTeam->realtime_sem_context);
1470 		if (team->realtime_sem_context == NULL) {
1471 			status = B_NO_MEMORY;
1472 			goto err25;
1473 		}
1474 	}
1475 
1476 	// create an address space for this team
1477 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1478 		&team->address_space);
1479 	if (status < B_OK)
1480 		goto err3;
1481 
1482 	// copy all areas of the team
1483 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1484 	// ToDo: all stacks of other threads than the current one could be left out
1485 
1486 	forkArgs->user_thread = NULL;
1487 
1488 	cookie = 0;
1489 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1490 		if (info.area == parentTeam->user_data_area) {
1491 			// don't clone the user area; just create a new one
1492 			status = create_team_user_data(team);
1493 			if (status != B_OK)
1494 				break;
1495 
1496 			forkArgs->user_thread = team_allocate_user_thread(team);
1497 		} else {
1498 			void *address;
1499 			area_id area = vm_copy_area(team->address_space->id, info.name,
1500 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1501 			if (area < B_OK) {
1502 				status = area;
1503 				break;
1504 			}
1505 
1506 			if (info.area == parentThread->user_stack_area)
1507 				forkArgs->user_stack_area = area;
1508 		}
1509 	}
1510 
1511 	if (status < B_OK)
1512 		goto err4;
1513 
1514 	if (forkArgs->user_thread == NULL) {
1515 #if KDEBUG
1516 		panic("user data area not found, parent area is %ld",
1517 			parentTeam->user_data_area);
1518 #endif
1519 		status = B_ERROR;
1520 		goto err4;
1521 	}
1522 
1523 	forkArgs->user_stack_base = parentThread->user_stack_base;
1524 	forkArgs->user_stack_size = parentThread->user_stack_size;
1525 	forkArgs->user_local_storage = parentThread->user_local_storage;
1526 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1527 	arch_store_fork_frame(&forkArgs->arch_info);
1528 
1529 	// copy image list
1530 	image_info imageInfo;
1531 	cookie = 0;
1532 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1533 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1534 		if (image < 0)
1535 			goto err5;
1536 	}
1537 
1538 	// create a kernel thread under the context of the new team
1539 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1540 		parentThread->name, parentThread->priority, forkArgs,
1541 		team->id, team->id);
1542 	if (threadID < 0) {
1543 		status = threadID;
1544 		goto err5;
1545 	}
1546 
1547 	// notify the debugger
1548 	user_debug_team_created(team->id);
1549 
1550 	T(TeamForked(threadID));
1551 
1552 	resume_thread(threadID);
1553 	return threadID;
1554 
1555 err5:
1556 	remove_images(team);
1557 err4:
1558 	vm_delete_address_space(team->address_space);
1559 err3:
1560 	delete_realtime_sem_context(team->realtime_sem_context);
1561 err25:
1562 	vfs_free_io_context(team->io_context);
1563 err2:
1564 	free(forkArgs);
1565 err1:
1566 	// remove the team structure from the team hash table and delete the team structure
1567 	state = disable_interrupts();
1568 	GRAB_TEAM_LOCK();
1569 
1570 	remove_team_from_group(team);
1571 	remove_team_from_parent(parentTeam, team);
1572 	hash_remove(sTeamHash, team);
1573 
1574 	RELEASE_TEAM_LOCK();
1575 	restore_interrupts(state);
1576 
1577 	delete_team_struct(team);
1578 
1579 	return status;
1580 }
1581 
1582 
1583 /*!	Returns if the specified \a team has any children belonging to the
1584 	specified \a group.
1585 	Must be called with the team lock held.
1586 */
1587 static bool
1588 has_children_in_group(struct team *parent, pid_t groupID)
1589 {
1590 	struct team *team;
1591 
1592 	struct process_group *group = team_get_process_group_locked(
1593 		parent->group->session, groupID);
1594 	if (group == NULL)
1595 		return false;
1596 
1597 	for (team = group->teams; team; team = team->group_next) {
1598 		if (team->parent == parent)
1599 			return true;
1600 	}
1601 
1602 	return false;
1603 }
1604 
1605 
1606 static job_control_entry*
1607 get_job_control_entry(team_job_control_children* children, pid_t id)
1608 {
1609 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1610 		 job_control_entry* entry = it.Next();) {
1611 
1612 		if (id > 0) {
1613 			if (entry->thread == id)
1614 				return entry;
1615 		} else if (id == -1) {
1616 			return entry;
1617 		} else {
1618 			pid_t processGroup
1619 				= (entry->team ? entry->team->group_id : entry->group_id);
1620 			if (processGroup == -id)
1621 				return entry;
1622 		}
1623 	}
1624 
1625 	return NULL;
1626 }
1627 
1628 
1629 static job_control_entry*
1630 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1631 {
1632 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1633 
1634 	if (entry == NULL && (flags & WCONTINUED) != 0)
1635 		entry = get_job_control_entry(team->continued_children, id);
1636 
1637 	if (entry == NULL && (flags & WUNTRACED) != 0)
1638 		entry = get_job_control_entry(team->stopped_children, id);
1639 
1640 	return entry;
1641 }
1642 
1643 
1644 job_control_entry::job_control_entry()
1645 	:
1646 	has_group_ref(false)
1647 {
1648 }
1649 
1650 
1651 job_control_entry::~job_control_entry()
1652 {
1653 	if (has_group_ref) {
1654 		InterruptsSpinLocker locker(gTeamSpinlock);
1655 		release_process_group_ref(group_id);
1656 	}
1657 }
1658 
1659 
1660 /*!	Team and thread lock must be held.
1661 */
1662 void
1663 job_control_entry::InitDeadState()
1664 {
1665 	if (team != NULL) {
1666 		struct thread* thread = team->main_thread;
1667 		group_id = team->group_id;
1668 		this->thread = thread->id;
1669 		status = thread->exit.status;
1670 		reason = thread->exit.reason;
1671 		signal = thread->exit.signal;
1672 		team = NULL;
1673 		acquire_process_group_ref(group_id);
1674 		has_group_ref = true;
1675 	}
1676 }
1677 
1678 
1679 job_control_entry&
1680 job_control_entry::operator=(const job_control_entry& other)
1681 {
1682 	state = other.state;
1683 	thread = other.thread;
1684 	has_group_ref = false;
1685 	team = other.team;
1686 	group_id = other.group_id;
1687 	status = other.status;
1688 	reason = other.reason;
1689 	signal = other.signal;
1690 
1691 	return *this;
1692 }
1693 
1694 
1695 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1696 	comes to the reason why a thread has died than waitpid() can be.
1697 */
1698 static thread_id
1699 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1700 	status_t *_returnCode)
1701 {
1702 	struct thread* thread = thread_get_current_thread();
1703 	struct team* team = thread->team;
1704 	struct job_control_entry foundEntry;
1705 	struct job_control_entry* freeDeathEntry = NULL;
1706 	status_t status = B_OK;
1707 
1708 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1709 
1710 	T(WaitForChild(child, flags));
1711 
1712 	if (child == 0) {
1713 		// wait for all children in the process group of the calling team
1714 		child = -team->group_id;
1715 	}
1716 
1717 	bool ignoreFoundEntries = false;
1718 	bool ignoreFoundEntriesChecked = false;
1719 
1720 	while (true) {
1721 		InterruptsSpinLocker locker(gTeamSpinlock);
1722 
1723 		// check whether any condition holds
1724 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1725 
1726 		// If we don't have an entry yet, check whether there are any children
1727 		// complying to the process group specification at all.
1728 		if (entry == NULL) {
1729 			// No success yet -- check whether there are any children we could
1730 			// wait for.
1731 			bool childrenExist = false;
1732 			if (child == -1) {
1733 				childrenExist = team->children != NULL;
1734 			} else if (child < -1) {
1735 				childrenExist = has_children_in_group(team, -child);
1736 			} else {
1737 				if (struct team* childTeam = team_get_team_struct_locked(child))
1738 					childrenExist = childTeam->parent == team;
1739 			}
1740 
1741 			if (!childrenExist) {
1742 				// there is no child we could wait for
1743 				status = ECHILD;
1744 			} else {
1745 				// the children we're waiting for are still running
1746 				status = B_WOULD_BLOCK;
1747 			}
1748 		} else {
1749 			// got something
1750 			foundEntry = *entry;
1751 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1752 				// The child is dead. Reap its death entry.
1753 				freeDeathEntry = entry;
1754 				team->dead_children->entries.Remove(entry);
1755 				team->dead_children->count--;
1756 			} else {
1757 				// The child is well. Reset its job control state.
1758 				team_set_job_control_state(entry->team,
1759 					JOB_CONTROL_STATE_NONE, 0, false);
1760 			}
1761 		}
1762 
1763 		// If we haven't got anything yet, prepare for waiting for the
1764 		// condition variable.
1765 		ConditionVariableEntry deadWaitEntry;
1766 
1767 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1768 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1769 
1770 		locker.Unlock();
1771 
1772 		// we got our entry and can return to our caller
1773 		if (status == B_OK) {
1774 			if (ignoreFoundEntries) {
1775 				// ... unless we shall ignore found entries
1776 				delete freeDeathEntry;
1777 				freeDeathEntry = NULL;
1778 				continue;
1779 			}
1780 
1781 			break;
1782 		}
1783 
1784 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1785 			T(WaitForChildDone(status));
1786 			return status;
1787 		}
1788 
1789 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1790 		if (status == B_INTERRUPTED) {
1791 			T(WaitForChildDone(status));
1792 			return status;
1793 		}
1794 
1795 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1796 		// all our children are dead and fail with ECHILD. We check the
1797 		// condition at this point.
1798 		if (!ignoreFoundEntriesChecked) {
1799 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1800 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1801 				|| handler.sa_handler == SIG_IGN) {
1802 				ignoreFoundEntries = true;
1803 			}
1804 
1805 			ignoreFoundEntriesChecked = true;
1806 		}
1807 	}
1808 
1809 	delete freeDeathEntry;
1810 
1811 	// when we got here, we have a valid death entry, and
1812 	// already got unregistered from the team or group
1813 	int reason = 0;
1814 	switch (foundEntry.state) {
1815 		case JOB_CONTROL_STATE_DEAD:
1816 			reason = foundEntry.reason;
1817 			break;
1818 		case JOB_CONTROL_STATE_STOPPED:
1819 			reason = THREAD_STOPPED;
1820 			break;
1821 		case JOB_CONTROL_STATE_CONTINUED:
1822 			reason = THREAD_CONTINUED;
1823 			break;
1824 		case JOB_CONTROL_STATE_NONE:
1825 			// can't happen
1826 			break;
1827 	}
1828 
1829 	*_returnCode = foundEntry.status;
1830 	*_reason = (foundEntry.signal << 16) | reason;
1831 
1832 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1833 	// status is available.
1834 	if (is_signal_blocked(SIGCHLD)) {
1835 		InterruptsSpinLocker locker(gTeamSpinlock);
1836 
1837 		if (get_job_control_entry(team, child, flags) == NULL)
1838 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1839 	}
1840 
1841 	// When the team is dead, the main thread continues to live in the kernel
1842 	// team for a very short time. To avoid surprises for the caller we rather
1843 	// wait until the thread is really gone.
1844 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1845 		wait_for_thread(foundEntry.thread, NULL);
1846 
1847 	T(WaitForChildDone(foundEntry));
1848 
1849 	return foundEntry.thread;
1850 }
1851 
1852 
1853 /*! Fills the team_info structure with information from the specified
1854 	team.
1855 	The team lock must be held when called.
1856 */
1857 static status_t
1858 fill_team_info(struct team *team, team_info *info, size_t size)
1859 {
1860 	if (size != sizeof(team_info))
1861 		return B_BAD_VALUE;
1862 
1863 	// ToDo: Set more informations for team_info
1864 	memset(info, 0, size);
1865 
1866 	info->team = team->id;
1867 	info->thread_count = team->num_threads;
1868 	info->image_count = count_images(team);
1869 	//info->area_count =
1870 	info->debugger_nub_thread = team->debug_info.nub_thread;
1871 	info->debugger_nub_port = team->debug_info.nub_port;
1872 	//info->uid =
1873 	//info->gid =
1874 
1875 	strlcpy(info->args, team->args, sizeof(info->args));
1876 	info->argc = 1;
1877 
1878 	return B_OK;
1879 }
1880 
1881 
1882 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1883 	Interrupts must be disabled and team lock be held.
1884 */
1885 static bool
1886 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1887 {
1888 	// Orphaned Process Group: "A process group in which the parent of every
1889 	// member is either itself a member of the group or is not a member of the
1890 	// group's session." (Open Group Base Specs Issue 6)
1891 
1892 	// once orphaned, things won't change (exception: cf. setpgid())
1893 	if (group->orphaned)
1894 		return true;
1895 
1896 	struct team* team = group->teams;
1897 	while (team != NULL) {
1898 		struct team* parent = team->parent;
1899 		if (team->id != dyingProcess && parent != NULL
1900 			&& parent->id != dyingProcess
1901 			&& parent->group_id != group->id
1902 			&& parent->session_id == group->session->id) {
1903 			return false;
1904 		}
1905 
1906 		team = team->group_next;
1907 	}
1908 
1909 	group->orphaned = true;
1910 	return true;
1911 }
1912 
1913 
1914 /*!	Returns whether the process group contains stopped processes.
1915 	Interrupts must be disabled and team lock be held.
1916 */
1917 static bool
1918 process_group_has_stopped_processes(process_group* group)
1919 {
1920 	SpinLocker _(gThreadSpinlock);
1921 
1922 	struct team* team = group->teams;
1923 	while (team != NULL) {
1924 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1925 			return true;
1926 
1927 		team = team->group_next;
1928 	}
1929 
1930 	return false;
1931 }
1932 
1933 
1934 //	#pragma mark - Private kernel API
1935 
1936 
1937 status_t
1938 team_init(kernel_args *args)
1939 {
1940 	struct process_session *session;
1941 	struct process_group *group;
1942 
1943 	// create the team hash table
1944 	sTeamHash = hash_init(16, offsetof(struct team, next),
1945 		&team_struct_compare, &team_struct_hash);
1946 
1947 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1948 		&process_group_compare, &process_group_hash);
1949 
1950 	// create initial session and process groups
1951 
1952 	session = create_process_session(1);
1953 	if (session == NULL)
1954 		panic("Could not create initial session.\n");
1955 
1956 	group = create_process_group(1);
1957 	if (group == NULL)
1958 		panic("Could not create initial process group.\n");
1959 
1960 	insert_group_into_session(session, group);
1961 
1962 	// create the kernel team
1963 	sKernelTeam = create_team_struct("kernel_team", true);
1964 	if (sKernelTeam == NULL)
1965 		panic("could not create kernel team!\n");
1966 	strcpy(sKernelTeam->args, sKernelTeam->name);
1967 	sKernelTeam->state = TEAM_STATE_NORMAL;
1968 
1969 	sKernelTeam->saved_set_uid = 0;
1970 	sKernelTeam->real_uid = 0;
1971 	sKernelTeam->effective_uid = 0;
1972 	sKernelTeam->saved_set_gid = 0;
1973 	sKernelTeam->real_gid = 0;
1974 	sKernelTeam->effective_gid = 0;
1975 	sKernelTeam->supplementary_groups = NULL;
1976 	sKernelTeam->supplementary_group_count = 0;
1977 
1978 	insert_team_into_group(group, sKernelTeam);
1979 
1980 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1981 	if (sKernelTeam->io_context == NULL)
1982 		panic("could not create io_context for kernel team!\n");
1983 
1984 	// stick it in the team hash
1985 	hash_insert(sTeamHash, sKernelTeam);
1986 
1987 	add_debugger_command_etc("team", &dump_team_info,
1988 		"Dump info about a particular team",
1989 		"[ <id> | <address> | <name> ]\n"
1990 		"Prints information about the specified team. If no argument is given\n"
1991 		"the current team is selected.\n"
1992 		"  <id>       - The ID of the team.\n"
1993 		"  <address>  - The address of the team structure.\n"
1994 		"  <name>     - The team's name.\n", 0);
1995 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1996 		"\n"
1997 		"Prints a list of all existing teams.\n", 0);
1998 	return 0;
1999 }
2000 
2001 
2002 int32
2003 team_max_teams(void)
2004 {
2005 	return sMaxTeams;
2006 }
2007 
2008 
2009 int32
2010 team_used_teams(void)
2011 {
2012 	return sUsedTeams;
2013 }
2014 
2015 
2016 /*! Fills the provided death entry if it's in the team.
2017 	You need to have the team lock held when calling this function.
2018 */
2019 job_control_entry*
2020 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2021 {
2022 	if (child <= 0)
2023 		return NULL;
2024 
2025 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2026 		child);
2027 	if (entry) {
2028 		// remove the entry only, if the caller is the parent of the found team
2029 		if (team_get_current_team_id() == entry->thread) {
2030 			team->dead_children->entries.Remove(entry);
2031 			team->dead_children->count--;
2032 			*_deleteEntry = true;
2033 		} else {
2034 			*_deleteEntry = false;
2035 		}
2036 	}
2037 
2038 	return entry;
2039 }
2040 
2041 
2042 /*! Quick check to see if we have a valid team ID. */
2043 bool
2044 team_is_valid(team_id id)
2045 {
2046 	struct team *team;
2047 	cpu_status state;
2048 
2049 	if (id <= 0)
2050 		return false;
2051 
2052 	state = disable_interrupts();
2053 	GRAB_TEAM_LOCK();
2054 
2055 	team = team_get_team_struct_locked(id);
2056 
2057 	RELEASE_TEAM_LOCK();
2058 	restore_interrupts(state);
2059 
2060 	return team != NULL;
2061 }
2062 
2063 
2064 struct team *
2065 team_get_team_struct_locked(team_id id)
2066 {
2067 	struct team_key key;
2068 	key.id = id;
2069 
2070 	return (struct team*)hash_lookup(sTeamHash, &key);
2071 }
2072 
2073 
2074 /*! This searches the session of the team for the specified group ID.
2075 	You must hold the team lock when you call this function.
2076 */
2077 struct process_group *
2078 team_get_process_group_locked(struct process_session *session, pid_t id)
2079 {
2080 	struct process_group *group;
2081 	struct team_key key;
2082 	key.id = id;
2083 
2084 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2085 	if (group != NULL && (session == NULL || session == group->session))
2086 		return group;
2087 
2088 	return NULL;
2089 }
2090 
2091 
2092 void
2093 team_delete_process_group(struct process_group *group)
2094 {
2095 	if (group == NULL)
2096 		return;
2097 
2098 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2099 
2100 	// remove_group_from_session() keeps this pointer around
2101 	// only if the session can be freed as well
2102 	if (group->session) {
2103 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2104 		free(group->session);
2105 	}
2106 
2107 	free(group);
2108 }
2109 
2110 
2111 void
2112 team_set_controlling_tty(int32 ttyIndex)
2113 {
2114 	struct team* team = thread_get_current_thread()->team;
2115 
2116 	InterruptsSpinLocker _(gTeamSpinlock);
2117 
2118 	team->group->session->controlling_tty = ttyIndex;
2119 	team->group->session->foreground_group = -1;
2120 }
2121 
2122 
2123 int32
2124 team_get_controlling_tty()
2125 {
2126 	struct team* team = thread_get_current_thread()->team;
2127 
2128 	InterruptsSpinLocker _(gTeamSpinlock);
2129 
2130 	return team->group->session->controlling_tty;
2131 }
2132 
2133 
2134 status_t
2135 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2136 {
2137 	struct thread* thread = thread_get_current_thread();
2138 	struct team* team = thread->team;
2139 
2140 	InterruptsSpinLocker locker(gTeamSpinlock);
2141 
2142 	process_session* session = team->group->session;
2143 
2144 	// must be the controlling tty of the calling process
2145 	if (session->controlling_tty != ttyIndex)
2146 		return ENOTTY;
2147 
2148 	// check process group -- must belong to our session
2149 	process_group* group = team_get_process_group_locked(session,
2150 		processGroupID);
2151 	if (group == NULL)
2152 		return B_BAD_VALUE;
2153 
2154 	// If we are a background group, we can't do that unharmed, only if we
2155 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2156 	if (session->foreground_group != -1
2157 		&& session->foreground_group != team->group_id
2158 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2159 		&& !is_signal_blocked(SIGTTOU)) {
2160 		pid_t groupID = team->group->id;
2161 		locker.Unlock();
2162 		send_signal(-groupID, SIGTTOU);
2163 		return B_INTERRUPTED;
2164 	}
2165 
2166 	team->group->session->foreground_group = processGroupID;
2167 
2168 	return B_OK;
2169 }
2170 
2171 
2172 /*!	Removes the specified team from the global team hash, and from its parent.
2173 	It also moves all of its children up to the parent.
2174 	You must hold the team lock when you call this function.
2175 */
2176 void
2177 team_remove_team(struct team *team)
2178 {
2179 	struct team *parent = team->parent;
2180 
2181 	// remember how long this team lasted
2182 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2183 		+ team->dead_children->kernel_time;
2184 	parent->dead_children->user_time += team->dead_threads_user_time
2185 		+ team->dead_children->user_time;
2186 
2187 	// Also grab the thread spinlock while removing the team from the hash.
2188 	// This makes the following sequence safe: grab teams lock, lookup team,
2189 	// grab threads lock, unlock teams lock,
2190 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2191 	// lock another team's IO context.
2192 	GRAB_THREAD_LOCK();
2193 	hash_remove(sTeamHash, team);
2194 	RELEASE_THREAD_LOCK();
2195 	sUsedTeams--;
2196 
2197 	team->state = TEAM_STATE_DEATH;
2198 
2199 	// If we're a controlling process (i.e. a session leader with controlling
2200 	// terminal), there's a bit of signalling we have to do.
2201 	if (team->session_id == team->id
2202 		&& team->group->session->controlling_tty >= 0) {
2203 		process_session* session = team->group->session;
2204 
2205 		session->controlling_tty = -1;
2206 
2207 		// send SIGHUP to the foreground
2208 		if (session->foreground_group >= 0) {
2209 			send_signal_etc(-session->foreground_group, SIGHUP,
2210 				SIGNAL_FLAG_TEAMS_LOCKED);
2211 		}
2212 
2213 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2214 		// stopped processes
2215 		struct team* child = team->children;
2216 		while (child != NULL) {
2217 			process_group* childGroup = child->group;
2218 			if (!childGroup->orphaned
2219 				&& update_orphaned_process_group(childGroup, team->id)
2220 				&& process_group_has_stopped_processes(childGroup)) {
2221 				send_signal_etc(-childGroup->id, SIGHUP,
2222 					SIGNAL_FLAG_TEAMS_LOCKED);
2223 				send_signal_etc(-childGroup->id, SIGCONT,
2224 					SIGNAL_FLAG_TEAMS_LOCKED);
2225 			}
2226 
2227 			child = child->siblings_next;
2228 		}
2229 	} else {
2230 		// update "orphaned" flags of all children's process groups
2231 		struct team* child = team->children;
2232 		while (child != NULL) {
2233 			process_group* childGroup = child->group;
2234 			if (!childGroup->orphaned)
2235 				update_orphaned_process_group(childGroup, team->id);
2236 
2237 			child = child->siblings_next;
2238 		}
2239 
2240 		// update "orphaned" flag of this team's process group
2241 		update_orphaned_process_group(team->group, team->id);
2242 	}
2243 
2244 	// reparent each of the team's children
2245 	reparent_children(team);
2246 
2247 	// remove us from our process group
2248 	remove_team_from_group(team);
2249 
2250 	// remove us from our parent
2251 	remove_team_from_parent(parent, team);
2252 }
2253 
2254 
2255 void
2256 team_delete_team(struct team *team)
2257 {
2258 	team_id teamID = team->id;
2259 	port_id debuggerPort = -1;
2260 	cpu_status state;
2261 
2262 	if (team->num_threads > 0) {
2263 		// there are other threads still in this team,
2264 		// cycle through and signal kill on each of the threads
2265 		// ToDo: this can be optimized. There's got to be a better solution.
2266 		struct thread *temp_thread;
2267 		char death_sem_name[B_OS_NAME_LENGTH];
2268 		sem_id deathSem;
2269 		int32 threadCount;
2270 
2271 		sprintf(death_sem_name, "team %ld death sem", teamID);
2272 		deathSem = create_sem(0, death_sem_name);
2273 		if (deathSem < 0)
2274 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2275 
2276 		state = disable_interrupts();
2277 		GRAB_TEAM_LOCK();
2278 
2279 		team->death_sem = deathSem;
2280 		threadCount = team->num_threads;
2281 
2282 		// If the team was being debugged, that will stop with the termination
2283 		// of the nub thread. The team structure has already been removed from
2284 		// the team hash table at this point, so noone can install a debugger
2285 		// anymore. We fetch the debugger's port to send it a message at the
2286 		// bitter end.
2287 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2288 
2289 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2290 			debuggerPort = team->debug_info.debugger_port;
2291 
2292 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2293 
2294 		// we can safely walk the list because of the lock. no new threads can be created
2295 		// because of the TEAM_STATE_DEATH flag on the team
2296 		temp_thread = team->thread_list;
2297 		while (temp_thread) {
2298 			struct thread *next = temp_thread->team_next;
2299 
2300 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2301 			temp_thread = next;
2302 		}
2303 
2304 		RELEASE_TEAM_LOCK();
2305 		restore_interrupts(state);
2306 
2307 		// wait until all threads in team are dead.
2308 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2309 		delete_sem(team->death_sem);
2310 	}
2311 
2312 	// If someone is waiting for this team to be loaded, but it dies
2313 	// unexpectedly before being done, we need to notify the waiting
2314 	// thread now.
2315 
2316 	state = disable_interrupts();
2317 	GRAB_TEAM_LOCK();
2318 
2319 	if (team->loading_info) {
2320 		// there's indeed someone waiting
2321 		struct team_loading_info *loadingInfo = team->loading_info;
2322 		team->loading_info = NULL;
2323 
2324 		loadingInfo->result = B_ERROR;
2325 		loadingInfo->done = true;
2326 
2327 		GRAB_THREAD_LOCK();
2328 
2329 		// wake up the waiting thread
2330 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2331 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2332 
2333 		RELEASE_THREAD_LOCK();
2334 	}
2335 
2336 	RELEASE_TEAM_LOCK();
2337 	restore_interrupts(state);
2338 
2339 	// notify team watchers
2340 
2341 	{
2342 		// we're not reachable from anyone anymore at this point, so we
2343 		// can safely access the list without any locking
2344 		struct team_watcher *watcher;
2345 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2346 				&team->watcher_list)) != NULL) {
2347 			watcher->hook(teamID, watcher->data);
2348 			free(watcher);
2349 		}
2350 	}
2351 
2352 	// free team resources
2353 
2354 	vfs_free_io_context(team->io_context);
2355 	delete_realtime_sem_context(team->realtime_sem_context);
2356 	xsi_sem_undo(team);
2357 	delete_owned_ports(teamID);
2358 	sem_delete_owned_sems(teamID);
2359 	remove_images(team);
2360 	vm_delete_address_space(team->address_space);
2361 
2362 	delete_team_struct(team);
2363 
2364 	// notify the debugger, that the team is gone
2365 	user_debug_team_deleted(teamID, debuggerPort);
2366 }
2367 
2368 
2369 struct team *
2370 team_get_kernel_team(void)
2371 {
2372 	return sKernelTeam;
2373 }
2374 
2375 
2376 team_id
2377 team_get_kernel_team_id(void)
2378 {
2379 	if (!sKernelTeam)
2380 		return 0;
2381 
2382 	return sKernelTeam->id;
2383 }
2384 
2385 
2386 team_id
2387 team_get_current_team_id(void)
2388 {
2389 	return thread_get_current_thread()->team->id;
2390 }
2391 
2392 
2393 status_t
2394 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2395 {
2396 	cpu_status state;
2397 	struct team *team;
2398 	status_t status;
2399 
2400 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2401 	if (id == 1) {
2402 		// we're the kernel team, so we don't have to go through all
2403 		// the hassle (locking and hash lookup)
2404 		*_addressSpace = vm_get_kernel_address_space();
2405 		return B_OK;
2406 	}
2407 
2408 	state = disable_interrupts();
2409 	GRAB_TEAM_LOCK();
2410 
2411 	team = team_get_team_struct_locked(id);
2412 	if (team != NULL) {
2413 		atomic_add(&team->address_space->ref_count, 1);
2414 		*_addressSpace = team->address_space;
2415 		status = B_OK;
2416 	} else
2417 		status = B_BAD_VALUE;
2418 
2419 	RELEASE_TEAM_LOCK();
2420 	restore_interrupts(state);
2421 
2422 	return status;
2423 }
2424 
2425 
2426 /*!	Sets the team's job control state.
2427 	Interrupts must be disabled and the team lock be held.
2428 	\a threadsLocked indicates whether the thread lock is being held, too.
2429 */
2430 void
2431 team_set_job_control_state(struct team* team, job_control_state newState,
2432 	int signal, bool threadsLocked)
2433 {
2434 	if (team == NULL || team->job_control_entry == NULL)
2435 		return;
2436 
2437 	// don't touch anything, if the state stays the same or the team is already
2438 	// dead
2439 	job_control_entry* entry = team->job_control_entry;
2440 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2441 		return;
2442 
2443 	T(SetJobControlState(team->id, newState, signal));
2444 
2445 	// remove from the old list
2446 	switch (entry->state) {
2447 		case JOB_CONTROL_STATE_NONE:
2448 			// entry is in no list ATM
2449 			break;
2450 		case JOB_CONTROL_STATE_DEAD:
2451 			// can't get here
2452 			break;
2453 		case JOB_CONTROL_STATE_STOPPED:
2454 			team->parent->stopped_children->entries.Remove(entry);
2455 			break;
2456 		case JOB_CONTROL_STATE_CONTINUED:
2457 			team->parent->continued_children->entries.Remove(entry);
2458 			break;
2459 	}
2460 
2461 	entry->state = newState;
2462 	entry->signal = signal;
2463 
2464 	// add to new list
2465 	team_job_control_children* childList = NULL;
2466 	switch (entry->state) {
2467 		case JOB_CONTROL_STATE_NONE:
2468 			// entry doesn't get into any list
2469 			break;
2470 		case JOB_CONTROL_STATE_DEAD:
2471 			childList = team->parent->dead_children;
2472 			team->parent->dead_children->count++;
2473 			break;
2474 		case JOB_CONTROL_STATE_STOPPED:
2475 			childList = team->parent->stopped_children;
2476 			break;
2477 		case JOB_CONTROL_STATE_CONTINUED:
2478 			childList = team->parent->continued_children;
2479 			break;
2480 	}
2481 
2482 	if (childList != NULL) {
2483 		childList->entries.Add(entry);
2484 		team->parent->dead_children->condition_variable.NotifyAll(
2485 			threadsLocked);
2486 	}
2487 }
2488 
2489 
2490 /*! Adds a hook to the team that is called as soon as this
2491 	team goes away.
2492 	This call might get public in the future.
2493 */
2494 status_t
2495 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2496 {
2497 	struct team_watcher *watcher;
2498 	struct team *team;
2499 	cpu_status state;
2500 
2501 	if (hook == NULL || teamID < B_OK)
2502 		return B_BAD_VALUE;
2503 
2504 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2505 	if (watcher == NULL)
2506 		return B_NO_MEMORY;
2507 
2508 	watcher->hook = hook;
2509 	watcher->data = data;
2510 
2511 	// find team and add watcher
2512 
2513 	state = disable_interrupts();
2514 	GRAB_TEAM_LOCK();
2515 
2516 	team = team_get_team_struct_locked(teamID);
2517 	if (team != NULL)
2518 		list_add_item(&team->watcher_list, watcher);
2519 
2520 	RELEASE_TEAM_LOCK();
2521 	restore_interrupts(state);
2522 
2523 	if (team == NULL) {
2524 		free(watcher);
2525 		return B_BAD_TEAM_ID;
2526 	}
2527 
2528 	return B_OK;
2529 }
2530 
2531 
2532 status_t
2533 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2534 {
2535 	struct team_watcher *watcher = NULL;
2536 	struct team *team;
2537 	cpu_status state;
2538 
2539 	if (hook == NULL || teamID < B_OK)
2540 		return B_BAD_VALUE;
2541 
2542 	// find team and remove watcher (if present)
2543 
2544 	state = disable_interrupts();
2545 	GRAB_TEAM_LOCK();
2546 
2547 	team = team_get_team_struct_locked(teamID);
2548 	if (team != NULL) {
2549 		// search for watcher
2550 		while ((watcher = (struct team_watcher*)list_get_next_item(
2551 				&team->watcher_list, watcher)) != NULL) {
2552 			if (watcher->hook == hook && watcher->data == data) {
2553 				// got it!
2554 				list_remove_item(&team->watcher_list, watcher);
2555 				break;
2556 			}
2557 		}
2558 	}
2559 
2560 	RELEASE_TEAM_LOCK();
2561 	restore_interrupts(state);
2562 
2563 	if (watcher == NULL)
2564 		return B_ENTRY_NOT_FOUND;
2565 
2566 	free(watcher);
2567 	return B_OK;
2568 }
2569 
2570 
2571 /*!	The team lock must be held or the team must still be single threaded.
2572 */
2573 struct user_thread*
2574 team_allocate_user_thread(struct team* team)
2575 {
2576 	if (team->user_data == 0)
2577 		return NULL;
2578 
2579 	user_thread* thread = NULL;
2580 
2581 	// take an entry from the free list, if any
2582 	if (struct free_user_thread* entry = team->free_user_threads) {
2583 		thread = entry->thread;
2584 		team->free_user_threads = entry->next;
2585 		deferred_free(entry);
2586 		return thread;
2587 	} else {
2588 		// enough space left?
2589 		size_t needed = _ALIGN(sizeof(user_thread));
2590 		if (team->user_data_size - team->used_user_data < needed)
2591 			return NULL;
2592 		// TODO: This imposes a per team thread limit! We should resize the
2593 		// area, if necessary. That's problematic at this point, though, since
2594 		// we've got the team lock.
2595 
2596 		thread = (user_thread*)(team->user_data + team->used_user_data);
2597 		team->used_user_data += needed;
2598 	}
2599 
2600 	thread->defer_signals = 0;
2601 	thread->pending_signals = 0;
2602 	thread->wait_status = B_OK;
2603 
2604 	return thread;
2605 }
2606 
2607 
2608 /*!	The team lock must not be held. \a thread must be the current thread.
2609 */
2610 void
2611 team_free_user_thread(struct thread* thread)
2612 {
2613 	user_thread* userThread = thread->user_thread;
2614 	if (userThread == NULL)
2615 		return;
2616 
2617 	// create a free list entry
2618 	free_user_thread* entry
2619 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2620 	if (entry == NULL) {
2621 		// we have to leak the user thread :-/
2622 		return;
2623 	}
2624 
2625 	InterruptsSpinLocker _(gTeamSpinlock);
2626 
2627 	entry->thread = userThread;
2628 	entry->next = thread->team->free_user_threads;
2629 	thread->team->free_user_threads = entry;
2630 }
2631 
2632 
2633 //	#pragma mark - Public kernel API
2634 
2635 
2636 thread_id
2637 load_image(int32 argCount, const char **args, const char **env)
2638 {
2639 	// we need to flatten the args and environment
2640 
2641 	if (args == NULL)
2642 		return B_BAD_VALUE;
2643 
2644 	// determine total needed size
2645 	int32 argSize = 0;
2646 	for (int32 i = 0; i < argCount; i++)
2647 		argSize += strlen(args[i]) + 1;
2648 
2649 	int32 envCount = 0;
2650 	int32 envSize = 0;
2651 	while (env != NULL && env[envCount] != NULL)
2652 		envSize += strlen(env[envCount++]) + 1;
2653 
2654 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2655 	if (size > MAX_PROCESS_ARGS_SIZE)
2656 		return B_TOO_MANY_ARGS;
2657 
2658 	// allocate space
2659 	char** flatArgs = (char**)malloc(size);
2660 	if (flatArgs == NULL)
2661 		return B_NO_MEMORY;
2662 
2663 	char** slot = flatArgs;
2664 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2665 
2666 	// copy arguments and environment
2667 	for (int32 i = 0; i < argCount; i++) {
2668 		int32 argSize = strlen(args[i]) + 1;
2669 		memcpy(stringSpace, args[i], argSize);
2670 		*slot++ = stringSpace;
2671 		stringSpace += argSize;
2672 	}
2673 
2674 	*slot++ = NULL;
2675 
2676 	for (int32 i = 0; i < envCount; i++) {
2677 		int32 envSize = strlen(env[i]) + 1;
2678 		memcpy(stringSpace, env[i], envSize);
2679 		*slot++ = stringSpace;
2680 		stringSpace += envSize;
2681 	}
2682 
2683 	*slot++ = NULL;
2684 
2685 	thread_id thread = load_image_etc(flatArgs, size, argCount, envCount,
2686 		B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED, -1, 0);
2687 
2688 	free(flatArgs);
2689 		// load_image_etc() unset our variable if it took over ownership
2690 
2691 	return thread;
2692 }
2693 
2694 
2695 status_t
2696 wait_for_team(team_id id, status_t *_returnCode)
2697 {
2698 	struct team *team;
2699 	thread_id thread;
2700 	cpu_status state;
2701 
2702 	// find main thread and wait for that
2703 
2704 	state = disable_interrupts();
2705 	GRAB_TEAM_LOCK();
2706 
2707 	team = team_get_team_struct_locked(id);
2708 	if (team != NULL && team->main_thread != NULL)
2709 		thread = team->main_thread->id;
2710 	else
2711 		thread = B_BAD_THREAD_ID;
2712 
2713 	RELEASE_TEAM_LOCK();
2714 	restore_interrupts(state);
2715 
2716 	if (thread < 0)
2717 		return thread;
2718 
2719 	return wait_for_thread(thread, _returnCode);
2720 }
2721 
2722 
2723 status_t
2724 kill_team(team_id id)
2725 {
2726 	status_t status = B_OK;
2727 	thread_id threadID = -1;
2728 	struct team *team;
2729 	cpu_status state;
2730 
2731 	state = disable_interrupts();
2732 	GRAB_TEAM_LOCK();
2733 
2734 	team = team_get_team_struct_locked(id);
2735 	if (team != NULL) {
2736 		if (team != sKernelTeam) {
2737 			threadID = team->id;
2738 				// the team ID is the same as the ID of its main thread
2739 		} else
2740 			status = B_NOT_ALLOWED;
2741 	} else
2742 		status = B_BAD_THREAD_ID;
2743 
2744 	RELEASE_TEAM_LOCK();
2745 	restore_interrupts(state);
2746 
2747 	if (status < B_OK)
2748 		return status;
2749 
2750 	// just kill the main thread in the team. The cleanup code there will
2751 	// take care of the team
2752 	return kill_thread(threadID);
2753 }
2754 
2755 
2756 status_t
2757 _get_team_info(team_id id, team_info *info, size_t size)
2758 {
2759 	cpu_status state;
2760 	status_t status = B_OK;
2761 	struct team *team;
2762 
2763 	state = disable_interrupts();
2764 	GRAB_TEAM_LOCK();
2765 
2766 	if (id == B_CURRENT_TEAM)
2767 		team = thread_get_current_thread()->team;
2768 	else
2769 		team = team_get_team_struct_locked(id);
2770 
2771 	if (team == NULL) {
2772 		status = B_BAD_TEAM_ID;
2773 		goto err;
2774 	}
2775 
2776 	status = fill_team_info(team, info, size);
2777 
2778 err:
2779 	RELEASE_TEAM_LOCK();
2780 	restore_interrupts(state);
2781 
2782 	return status;
2783 }
2784 
2785 
2786 status_t
2787 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2788 {
2789 	status_t status = B_BAD_TEAM_ID;
2790 	struct team *team = NULL;
2791 	int32 slot = *cookie;
2792 	team_id lastTeamID;
2793 	cpu_status state;
2794 
2795 	if (slot < 1)
2796 		slot = 1;
2797 
2798 	state = disable_interrupts();
2799 	GRAB_TEAM_LOCK();
2800 
2801 	lastTeamID = peek_next_thread_id();
2802 	if (slot >= lastTeamID)
2803 		goto err;
2804 
2805 	// get next valid team
2806 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2807 		slot++;
2808 
2809 	if (team) {
2810 		status = fill_team_info(team, info, size);
2811 		*cookie = ++slot;
2812 	}
2813 
2814 err:
2815 	RELEASE_TEAM_LOCK();
2816 	restore_interrupts(state);
2817 
2818 	return status;
2819 }
2820 
2821 
2822 status_t
2823 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2824 {
2825 	bigtime_t kernelTime = 0, userTime = 0;
2826 	status_t status = B_OK;
2827 	struct team *team;
2828 	cpu_status state;
2829 
2830 	if (size != sizeof(team_usage_info)
2831 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2832 		return B_BAD_VALUE;
2833 
2834 	state = disable_interrupts();
2835 	GRAB_TEAM_LOCK();
2836 
2837 	if (id == B_CURRENT_TEAM)
2838 		team = thread_get_current_thread()->team;
2839 	else
2840 		team = team_get_team_struct_locked(id);
2841 
2842 	if (team == NULL) {
2843 		status = B_BAD_TEAM_ID;
2844 		goto out;
2845 	}
2846 
2847 	switch (who) {
2848 		case B_TEAM_USAGE_SELF:
2849 		{
2850 			struct thread *thread = team->thread_list;
2851 
2852 			for (; thread != NULL; thread = thread->team_next) {
2853 				kernelTime += thread->kernel_time;
2854 				userTime += thread->user_time;
2855 			}
2856 
2857 			kernelTime += team->dead_threads_kernel_time;
2858 			userTime += team->dead_threads_user_time;
2859 			break;
2860 		}
2861 
2862 		case B_TEAM_USAGE_CHILDREN:
2863 		{
2864 			struct team *child = team->children;
2865 			for (; child != NULL; child = child->siblings_next) {
2866 				struct thread *thread = team->thread_list;
2867 
2868 				for (; thread != NULL; thread = thread->team_next) {
2869 					kernelTime += thread->kernel_time;
2870 					userTime += thread->user_time;
2871 				}
2872 
2873 				kernelTime += child->dead_threads_kernel_time;
2874 				userTime += child->dead_threads_user_time;
2875 			}
2876 
2877 			kernelTime += team->dead_children->kernel_time;
2878 			userTime += team->dead_children->user_time;
2879 			break;
2880 		}
2881 	}
2882 
2883 out:
2884 	RELEASE_TEAM_LOCK();
2885 	restore_interrupts(state);
2886 
2887 	if (status == B_OK) {
2888 		info->kernel_time = kernelTime;
2889 		info->user_time = userTime;
2890 	}
2891 
2892 	return status;
2893 }
2894 
2895 
2896 pid_t
2897 getpid(void)
2898 {
2899 	return thread_get_current_thread()->team->id;
2900 }
2901 
2902 
2903 pid_t
2904 getppid(void)
2905 {
2906 	struct team *team = thread_get_current_thread()->team;
2907 	cpu_status state;
2908 	pid_t parent;
2909 
2910 	state = disable_interrupts();
2911 	GRAB_TEAM_LOCK();
2912 
2913 	parent = team->parent->id;
2914 
2915 	RELEASE_TEAM_LOCK();
2916 	restore_interrupts(state);
2917 
2918 	return parent;
2919 }
2920 
2921 
2922 pid_t
2923 getpgid(pid_t process)
2924 {
2925 	struct thread *thread;
2926 	pid_t result = -1;
2927 	cpu_status state;
2928 
2929 	if (process == 0)
2930 		process = thread_get_current_thread()->team->id;
2931 
2932 	state = disable_interrupts();
2933 	GRAB_THREAD_LOCK();
2934 
2935 	thread = thread_get_thread_struct_locked(process);
2936 	if (thread != NULL)
2937 		result = thread->team->group_id;
2938 
2939 	RELEASE_THREAD_LOCK();
2940 	restore_interrupts(state);
2941 
2942 	return thread != NULL ? result : B_BAD_VALUE;
2943 }
2944 
2945 
2946 pid_t
2947 getsid(pid_t process)
2948 {
2949 	struct thread *thread;
2950 	pid_t result = -1;
2951 	cpu_status state;
2952 
2953 	if (process == 0)
2954 		process = thread_get_current_thread()->team->id;
2955 
2956 	state = disable_interrupts();
2957 	GRAB_THREAD_LOCK();
2958 
2959 	thread = thread_get_thread_struct_locked(process);
2960 	if (thread != NULL)
2961 		result = thread->team->session_id;
2962 
2963 	RELEASE_THREAD_LOCK();
2964 	restore_interrupts(state);
2965 
2966 	return thread != NULL ? result : B_BAD_VALUE;
2967 }
2968 
2969 
2970 //	#pragma mark - User syscalls
2971 
2972 
2973 status_t
2974 _user_exec(const char *userPath, const char* const* userFlatArgs,
2975 	size_t flatArgsSize, int32 argCount, int32 envCount)
2976 {
2977 	// NOTE: Since this function normally doesn't return, don't use automatic
2978 	// variables that need destruction in the function scope.
2979 	char path[B_PATH_NAME_LENGTH];
2980 
2981 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
2982 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2983 		return B_BAD_ADDRESS;
2984 
2985 	// copy and relocate the flat arguments
2986 	char** flatArgs;
2987 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
2988 		argCount, envCount, flatArgs);
2989 
2990 	if (error == B_OK) {
2991 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
2992 			envCount);
2993 			// this one only returns in case of error
2994 	}
2995 
2996 	free(flatArgs);
2997 	return error;
2998 }
2999 
3000 
3001 thread_id
3002 _user_fork(void)
3003 {
3004 	return fork_team();
3005 }
3006 
3007 
3008 thread_id
3009 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3010 {
3011 	status_t returnCode;
3012 	int32 reason;
3013 	thread_id deadChild;
3014 
3015 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3016 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3017 		return B_BAD_ADDRESS;
3018 
3019 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3020 
3021 	if (deadChild >= B_OK) {
3022 		// copy result data on successful completion
3023 		if ((_userReason != NULL
3024 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3025 			|| (_userReturnCode != NULL
3026 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3027 					< B_OK)) {
3028 			return B_BAD_ADDRESS;
3029 		}
3030 
3031 		return deadChild;
3032 	}
3033 
3034 	return syscall_restart_handle_post(deadChild);
3035 }
3036 
3037 
3038 pid_t
3039 _user_process_info(pid_t process, int32 which)
3040 {
3041 	// we only allow to return the parent of the current process
3042 	if (which == PARENT_ID
3043 		&& process != 0 && process != thread_get_current_thread()->team->id)
3044 		return B_BAD_VALUE;
3045 
3046 	switch (which) {
3047 		case SESSION_ID:
3048 			return getsid(process);
3049 		case GROUP_ID:
3050 			return getpgid(process);
3051 		case PARENT_ID:
3052 			return getppid();
3053 	}
3054 
3055 	return B_BAD_VALUE;
3056 }
3057 
3058 
3059 pid_t
3060 _user_setpgid(pid_t processID, pid_t groupID)
3061 {
3062 	struct thread *thread = thread_get_current_thread();
3063 	struct team *currentTeam = thread->team;
3064 	struct team *team;
3065 
3066 	if (groupID < 0)
3067 		return B_BAD_VALUE;
3068 
3069 	if (processID == 0)
3070 		processID = currentTeam->id;
3071 
3072 	// if the group ID is not specified, use the target process' ID
3073 	if (groupID == 0)
3074 		groupID = processID;
3075 
3076 	if (processID == currentTeam->id) {
3077 		// we set our own group
3078 
3079 		// we must not change our process group ID if we're a session leader
3080 		if (is_session_leader(currentTeam))
3081 			return B_NOT_ALLOWED;
3082 	} else {
3083 		// another team is the target of the call -- check it out
3084 		InterruptsSpinLocker _(gTeamSpinlock);
3085 
3086 		team = team_get_team_struct_locked(processID);
3087 		if (team == NULL)
3088 			return ESRCH;
3089 
3090 		// The team must be a child of the calling team and in the same session.
3091 		// (If that's the case it isn't a session leader either.)
3092 		if (team->parent != currentTeam
3093 			|| team->session_id != currentTeam->session_id) {
3094 			return B_NOT_ALLOWED;
3095 		}
3096 
3097 		if (team->group_id == groupID)
3098 			return groupID;
3099 
3100 		// The call is also supposed to fail on a child, when the child already
3101 		// has executed exec*() [EACCES].
3102 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3103 			return EACCES;
3104 	}
3105 
3106 	struct process_group *group = NULL;
3107 	if (groupID == processID) {
3108 		// A new process group might be needed.
3109 		group = create_process_group(groupID);
3110 		if (group == NULL)
3111 			return B_NO_MEMORY;
3112 
3113 		// Assume orphaned. We consider the situation of the team's parent
3114 		// below.
3115 		group->orphaned = true;
3116 	}
3117 
3118 	status_t status = B_OK;
3119 	struct process_group *freeGroup = NULL;
3120 
3121 	InterruptsSpinLocker locker(gTeamSpinlock);
3122 
3123 	team = team_get_team_struct_locked(processID);
3124 	if (team != NULL) {
3125 		// check the conditions again -- they might have changed in the meantime
3126 		if (is_session_leader(team)
3127 			|| team->session_id != currentTeam->session_id) {
3128 			status = B_NOT_ALLOWED;
3129 		} else if (team != currentTeam
3130 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3131 			status = EACCES;
3132 		} else if (team->group_id == groupID) {
3133 			// the team is already in the desired process group
3134 			freeGroup = group;
3135 		} else {
3136 			// Check if a process group with the requested ID already exists.
3137 			struct process_group *targetGroup
3138 				= team_get_process_group_locked(team->group->session, groupID);
3139 			if (targetGroup != NULL) {
3140 				// In case of processID == groupID we have to free the
3141 				// allocated group.
3142 				freeGroup = group;
3143 			} else if (processID == groupID) {
3144 				// We created a new process group, let us insert it into the
3145 				// team's session.
3146 				insert_group_into_session(team->group->session, group);
3147 				targetGroup = group;
3148 			}
3149 
3150 			if (targetGroup != NULL) {
3151 				// we got a group, let's move the team there
3152 				process_group* oldGroup = team->group;
3153 
3154 				remove_team_from_group(team);
3155 				insert_team_into_group(targetGroup, team);
3156 
3157 				// Update the "orphaned" flag of all potentially affected
3158 				// groups.
3159 
3160 				// the team's old group
3161 				if (oldGroup->teams != NULL) {
3162 					oldGroup->orphaned = false;
3163 					update_orphaned_process_group(oldGroup, -1);
3164 				}
3165 
3166 				// the team's new group
3167 				struct team* parent = team->parent;
3168 				targetGroup->orphaned &= parent == NULL
3169 					|| parent->group == targetGroup
3170 					|| team->parent->session_id != team->session_id;
3171 
3172 				// children's groups
3173 				struct team* child = team->children;
3174 				while (child != NULL) {
3175 					child->group->orphaned = false;
3176 					update_orphaned_process_group(child->group, -1);
3177 
3178 					child = child->siblings_next;
3179 				}
3180 			} else
3181 				status = B_NOT_ALLOWED;
3182 		}
3183 	} else
3184 		status = B_NOT_ALLOWED;
3185 
3186 	// Changing the process group might have changed the situation for a parent
3187 	// waiting in wait_for_child(). Hence we notify it.
3188 	if (status == B_OK)
3189 		team->parent->dead_children->condition_variable.NotifyAll(false);
3190 
3191 	locker.Unlock();
3192 
3193 	if (status != B_OK) {
3194 		// in case of error, the group hasn't been added into the hash
3195 		team_delete_process_group(group);
3196 	}
3197 
3198 	team_delete_process_group(freeGroup);
3199 
3200 	return status == B_OK ? groupID : status;
3201 }
3202 
3203 
3204 pid_t
3205 _user_setsid(void)
3206 {
3207 	struct team *team = thread_get_current_thread()->team;
3208 	struct process_session *session;
3209 	struct process_group *group;
3210 	cpu_status state;
3211 	bool failed = false;
3212 
3213 	// the team must not already be a process group leader
3214 	if (is_process_group_leader(team))
3215 		return B_NOT_ALLOWED;
3216 
3217 	group = create_process_group(team->id);
3218 	if (group == NULL)
3219 		return B_NO_MEMORY;
3220 
3221 	session = create_process_session(group->id);
3222 	if (session == NULL) {
3223 		team_delete_process_group(group);
3224 		return B_NO_MEMORY;
3225 	}
3226 
3227 	state = disable_interrupts();
3228 	GRAB_TEAM_LOCK();
3229 
3230 	// this may have changed since the check above
3231 	if (!is_process_group_leader(team)) {
3232 		remove_team_from_group(team);
3233 
3234 		insert_group_into_session(session, group);
3235 		insert_team_into_group(group, team);
3236 	} else
3237 		failed = true;
3238 
3239 	RELEASE_TEAM_LOCK();
3240 	restore_interrupts(state);
3241 
3242 	if (failed) {
3243 		team_delete_process_group(group);
3244 		free(session);
3245 		return B_NOT_ALLOWED;
3246 	}
3247 
3248 	return team->group_id;
3249 }
3250 
3251 
3252 status_t
3253 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3254 {
3255 	status_t returnCode;
3256 	status_t status;
3257 
3258 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3259 		return B_BAD_ADDRESS;
3260 
3261 	status = wait_for_team(id, &returnCode);
3262 	if (status >= B_OK && _userReturnCode != NULL) {
3263 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3264 			return B_BAD_ADDRESS;
3265 		return B_OK;
3266 	}
3267 
3268 	return syscall_restart_handle_post(status);
3269 }
3270 
3271 
3272 thread_id
3273 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3274 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3275 	port_id errorPort, uint32 errorToken)
3276 {
3277 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3278 
3279 	if (argCount < 1)
3280 		return B_BAD_VALUE;
3281 
3282 	// copy and relocate the flat arguments
3283 	char** flatArgs;
3284 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3285 		argCount, envCount, flatArgs);
3286 	if (error != B_OK)
3287 		return error;
3288 
3289 	thread_id thread = load_image_etc(flatArgs, _ALIGN(flatArgsSize), argCount,
3290 		envCount, priority, flags, errorPort, errorToken);
3291 
3292 	free(flatArgs);
3293 		// load_image_etc() unset our variable if it took over ownership
3294 
3295 	return thread;
3296 }
3297 
3298 
3299 void
3300 _user_exit_team(status_t returnValue)
3301 {
3302 	struct thread *thread = thread_get_current_thread();
3303 
3304 	thread->exit.status = returnValue;
3305 	thread->exit.reason = THREAD_RETURN_EXIT;
3306 
3307 	send_signal(thread->id, SIGKILL);
3308 }
3309 
3310 
3311 status_t
3312 _user_kill_team(team_id team)
3313 {
3314 	return kill_team(team);
3315 }
3316 
3317 
3318 status_t
3319 _user_get_team_info(team_id id, team_info *userInfo)
3320 {
3321 	status_t status;
3322 	team_info info;
3323 
3324 	if (!IS_USER_ADDRESS(userInfo))
3325 		return B_BAD_ADDRESS;
3326 
3327 	status = _get_team_info(id, &info, sizeof(team_info));
3328 	if (status == B_OK) {
3329 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3330 			return B_BAD_ADDRESS;
3331 	}
3332 
3333 	return status;
3334 }
3335 
3336 
3337 status_t
3338 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3339 {
3340 	status_t status;
3341 	team_info info;
3342 	int32 cookie;
3343 
3344 	if (!IS_USER_ADDRESS(userCookie)
3345 		|| !IS_USER_ADDRESS(userInfo)
3346 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3347 		return B_BAD_ADDRESS;
3348 
3349 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3350 	if (status != B_OK)
3351 		return status;
3352 
3353 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3354 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3355 		return B_BAD_ADDRESS;
3356 
3357 	return status;
3358 }
3359 
3360 
3361 team_id
3362 _user_get_current_team(void)
3363 {
3364 	return team_get_current_team_id();
3365 }
3366 
3367 
3368 status_t
3369 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3370 {
3371 	team_usage_info info;
3372 	status_t status;
3373 
3374 	if (!IS_USER_ADDRESS(userInfo))
3375 		return B_BAD_ADDRESS;
3376 
3377 	status = _get_team_usage_info(team, who, &info, size);
3378 	if (status != B_OK)
3379 		return status;
3380 
3381 	if (user_memcpy(userInfo, &info, size) < B_OK)
3382 		return B_BAD_ADDRESS;
3383 
3384 	return status;
3385 }
3386 
3387