xref: /haiku/src/system/kernel/team.cpp (revision 4f2fd49bdc6078128b1391191e4edac647044c3d)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*!	Team functions */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/wait.h>
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <FindDirectory.h>
21 
22 #include <boot_device.h>
23 #include <elf.h>
24 #include <file_cache.h>
25 #include <fs/KPath.h>
26 #include <heap.h>
27 #include <int.h>
28 #include <kernel.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <port.h>
33 #include <posix/realtime_sem.h>
34 #include <posix/xsi_semaphore.h>
35 #include <sem.h>
36 #include <syscall_process_info.h>
37 #include <syscall_restart.h>
38 #include <syscalls.h>
39 #include <team.h>
40 #include <tls.h>
41 #include <tracing.h>
42 #include <user_runtime.h>
43 #include <user_thread.h>
44 #include <usergroup.h>
45 #include <vfs.h>
46 #include <vm.h>
47 #include <vm_address_space.h>
48 #include <util/AutoLock.h>
49 #include <util/khash.h>
50 
51 //#define TRACE_TEAM
52 #ifdef TRACE_TEAM
53 #	define TRACE(x) dprintf x
54 #else
55 #	define TRACE(x) ;
56 #endif
57 
58 
59 struct team_key {
60 	team_id id;
61 };
62 
63 struct team_arg {
64 	char	*path;
65 	char	**flat_args;
66 	size_t	flat_args_size;
67 	uint32	arg_count;
68 	uint32	env_count;
69 	port_id	error_port;
70 	uint32	error_token;
71 };
72 
73 struct fork_arg {
74 	area_id		user_stack_area;
75 	addr_t		user_stack_base;
76 	size_t		user_stack_size;
77 	addr_t		user_local_storage;
78 	sigset_t	sig_block_mask;
79 	struct user_thread* user_thread;
80 
81 	struct arch_fork_arg arch_info;
82 };
83 
84 
85 static hash_table *sTeamHash = NULL;
86 static hash_table *sGroupHash = NULL;
87 static struct team *sKernelTeam = NULL;
88 
89 // some arbitrary chosen limits - should probably depend on the available
90 // memory (the limit is not yet enforced)
91 static int32 sMaxTeams = 2048;
92 static int32 sUsedTeams = 1;
93 
94 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
95 
96 
97 // #pragma mark - Tracing
98 
99 
100 #if TEAM_TRACING
101 namespace TeamTracing {
102 
103 class TeamForked : public AbstractTraceEntry {
104 public:
105 	TeamForked(thread_id forkedThread)
106 		:
107 		fForkedThread(forkedThread)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("team forked, new thread %ld", fForkedThread);
115 	}
116 
117 private:
118 	thread_id			fForkedThread;
119 };
120 
121 
122 class ExecTeam : public AbstractTraceEntry {
123 public:
124 	ExecTeam(const char* path, int32 argCount, const char* const* args,
125 			int32 envCount, const char* const* env)
126 		:
127 		fArgCount(argCount),
128 		fArgs(NULL)
129 	{
130 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
131 			false);
132 
133 		// determine the buffer size we need for the args
134 		size_t argBufferSize = 0;
135 		for (int32 i = 0; i < argCount; i++)
136 			argBufferSize += strlen(args[i]) + 1;
137 
138 		// allocate a buffer
139 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
140 		if (fArgs) {
141 			char* buffer = fArgs;
142 			for (int32 i = 0; i < argCount; i++) {
143 				size_t argSize = strlen(args[i]) + 1;
144 				memcpy(buffer, args[i], argSize);
145 				buffer += argSize;
146 			}
147 		}
148 
149 		// ignore env for the time being
150 		(void)envCount;
151 		(void)env;
152 
153 		Initialized();
154 	}
155 
156 	virtual void AddDump(TraceOutput& out)
157 	{
158 		out.Print("team exec, \"%p\", args:", fPath);
159 
160 		if (fArgs != NULL) {
161 			char* args = fArgs;
162 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
163 				out.Print(" \"%s\"", args);
164 				args += strlen(args) + 1;
165 			}
166 		} else
167 			out.Print(" <too long>");
168 	}
169 
170 private:
171 	char*	fPath;
172 	int32	fArgCount;
173 	char*	fArgs;
174 };
175 
176 
177 static const char*
178 job_control_state_name(job_control_state state)
179 {
180 	switch (state) {
181 		case JOB_CONTROL_STATE_NONE:
182 			return "none";
183 		case JOB_CONTROL_STATE_STOPPED:
184 			return "stopped";
185 		case JOB_CONTROL_STATE_CONTINUED:
186 			return "continued";
187 		case JOB_CONTROL_STATE_DEAD:
188 			return "dead";
189 		default:
190 			return "invalid";
191 	}
192 }
193 
194 
195 class SetJobControlState : public AbstractTraceEntry {
196 public:
197 	SetJobControlState(team_id team, job_control_state newState, int signal)
198 		:
199 		fTeam(team),
200 		fNewState(newState),
201 		fSignal(signal)
202 	{
203 		Initialized();
204 	}
205 
206 	virtual void AddDump(TraceOutput& out)
207 	{
208 		out.Print("team set job control state, team %ld, "
209 			"new state: %s, signal: %d",
210 			fTeam, job_control_state_name(fNewState), fSignal);
211 	}
212 
213 private:
214 	team_id				fTeam;
215 	job_control_state	fNewState;
216 	int					fSignal;
217 };
218 
219 
220 class WaitForChild : public AbstractTraceEntry {
221 public:
222 	WaitForChild(pid_t child, uint32 flags)
223 		:
224 		fChild(child),
225 		fFlags(flags)
226 	{
227 		Initialized();
228 	}
229 
230 	virtual void AddDump(TraceOutput& out)
231 	{
232 		out.Print("team wait for child, child: %ld, "
233 			"flags: 0x%lx", fChild, fFlags);
234 	}
235 
236 private:
237 	pid_t	fChild;
238 	uint32	fFlags;
239 };
240 
241 
242 class WaitForChildDone : public AbstractTraceEntry {
243 public:
244 	WaitForChildDone(const job_control_entry& entry)
245 		:
246 		fState(entry.state),
247 		fTeam(entry.thread),
248 		fStatus(entry.status),
249 		fReason(entry.reason),
250 		fSignal(entry.signal)
251 	{
252 		Initialized();
253 	}
254 
255 	WaitForChildDone(status_t error)
256 		:
257 		fTeam(error)
258 	{
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		if (fTeam >= 0) {
265 			out.Print("team wait for child done, team: %ld, "
266 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
267 				fTeam, job_control_state_name(fState), fStatus, fReason,
268 				fSignal);
269 		} else {
270 			out.Print("team wait for child failed, error: "
271 				"0x%lx, ", fTeam);
272 		}
273 	}
274 
275 private:
276 	job_control_state	fState;
277 	team_id				fTeam;
278 	status_t			fStatus;
279 	uint16				fReason;
280 	uint16				fSignal;
281 };
282 
283 }	// namespace TeamTracing
284 
285 #	define T(x) new(std::nothrow) TeamTracing::x;
286 #else
287 #	define T(x) ;
288 #endif
289 
290 
291 
292 //	#pragma mark - Private functions
293 
294 
295 static void
296 _dump_team_info(struct team *team)
297 {
298 	kprintf("TEAM: %p\n", team);
299 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
300 	kprintf("name:        '%s'\n", team->name);
301 	kprintf("args:        '%s'\n", team->args);
302 	kprintf("next:        %p\n", team->next);
303 	kprintf("parent:      %p", team->parent);
304 	if (team->parent != NULL) {
305 		kprintf(" (id = %ld)\n", team->parent->id);
306 	} else
307 		kprintf("\n");
308 
309 	kprintf("children:    %p\n", team->children);
310 	kprintf("num_threads: %d\n", team->num_threads);
311 	kprintf("state:       %d\n", team->state);
312 	kprintf("flags:       0x%lx\n", team->flags);
313 	kprintf("io_context:  %p\n", team->io_context);
314 	if (team->address_space)
315 		kprintf("address_space: %p\n", team->address_space);
316 	kprintf("main_thread: %p\n", team->main_thread);
317 	kprintf("thread_list: %p\n", team->thread_list);
318 	kprintf("group_id:    %ld\n", team->group_id);
319 	kprintf("session_id:  %ld\n", team->session_id);
320 }
321 
322 
323 static int
324 dump_team_info(int argc, char **argv)
325 {
326 	struct hash_iterator iterator;
327 	struct team *team;
328 	team_id id = -1;
329 	bool found = false;
330 
331 	if (argc < 2) {
332 		struct thread* thread = thread_get_current_thread();
333 		if (thread != NULL && thread->team != NULL)
334 			_dump_team_info(thread->team);
335 		else
336 			kprintf("No current team!\n");
337 		return 0;
338 	}
339 
340 	id = strtoul(argv[1], NULL, 0);
341 	if (IS_KERNEL_ADDRESS(id)) {
342 		// semi-hack
343 		_dump_team_info((struct team *)id);
344 		return 0;
345 	}
346 
347 	// walk through the thread list, trying to match name or id
348 	hash_open(sTeamHash, &iterator);
349 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
350 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
351 			_dump_team_info(team);
352 			found = true;
353 			break;
354 		}
355 	}
356 	hash_close(sTeamHash, &iterator, false);
357 
358 	if (!found)
359 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
360 	return 0;
361 }
362 
363 
364 static int
365 dump_teams(int argc, char **argv)
366 {
367 	struct hash_iterator iterator;
368 	struct team *team;
369 
370 	kprintf("team           id  parent      name\n");
371 	hash_open(sTeamHash, &iterator);
372 
373 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
374 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
375 	}
376 
377 	hash_close(sTeamHash, &iterator, false);
378 	return 0;
379 }
380 
381 
382 static int
383 team_struct_compare(void *_p, const void *_key)
384 {
385 	struct team *p = (struct team*)_p;
386 	const struct team_key *key = (const struct team_key*)_key;
387 
388 	if (p->id == key->id)
389 		return 0;
390 
391 	return 1;
392 }
393 
394 
395 static uint32
396 team_struct_hash(void *_p, const void *_key, uint32 range)
397 {
398 	struct team *p = (struct team*)_p;
399 	const struct team_key *key = (const struct team_key*)_key;
400 
401 	if (p != NULL)
402 		return p->id % range;
403 
404 	return (uint32)key->id % range;
405 }
406 
407 
408 static int
409 process_group_compare(void *_group, const void *_key)
410 {
411 	struct process_group *group = (struct process_group*)_group;
412 	const struct team_key *key = (const struct team_key*)_key;
413 
414 	if (group->id == key->id)
415 		return 0;
416 
417 	return 1;
418 }
419 
420 
421 static uint32
422 process_group_hash(void *_group, const void *_key, uint32 range)
423 {
424 	struct process_group *group = (struct process_group*)_group;
425 	const struct team_key *key = (const struct team_key*)_key;
426 
427 	if (group != NULL)
428 		return group->id % range;
429 
430 	return (uint32)key->id % range;
431 }
432 
433 
434 static void
435 insert_team_into_parent(struct team *parent, struct team *team)
436 {
437 	ASSERT(parent != NULL);
438 
439 	team->siblings_next = parent->children;
440 	parent->children = team;
441 	team->parent = parent;
442 }
443 
444 
445 /*!	Note: must have team lock held */
446 static void
447 remove_team_from_parent(struct team *parent, struct team *team)
448 {
449 	struct team *child, *last = NULL;
450 
451 	for (child = parent->children; child != NULL; child = child->siblings_next) {
452 		if (child == team) {
453 			if (last == NULL)
454 				parent->children = child->siblings_next;
455 			else
456 				last->siblings_next = child->siblings_next;
457 
458 			team->parent = NULL;
459 			break;
460 		}
461 		last = child;
462 	}
463 }
464 
465 
466 /*!	Reparent each of our children
467 	Note: must have team lock held
468 */
469 static void
470 reparent_children(struct team *team)
471 {
472 	struct team *child;
473 
474 	while ((child = team->children) != NULL) {
475 		// remove the child from the current proc and add to the parent
476 		remove_team_from_parent(team, child);
477 		insert_team_into_parent(sKernelTeam, child);
478 	}
479 
480 	// move job control entries too
481 	sKernelTeam->stopped_children->entries.MoveFrom(
482 		&team->stopped_children->entries);
483 	sKernelTeam->continued_children->entries.MoveFrom(
484 		&team->continued_children->entries);
485 
486 	// Note, we don't move the dead children entries. Those will be deleted
487 	// when the team structure is deleted.
488 }
489 
490 
491 static bool
492 is_session_leader(struct team *team)
493 {
494 	return team->session_id == team->id;
495 }
496 
497 
498 static bool
499 is_process_group_leader(struct team *team)
500 {
501 	return team->group_id == team->id;
502 }
503 
504 
505 static void
506 deferred_delete_process_group(struct process_group *group)
507 {
508 	if (group == NULL)
509 		return;
510 
511 	// remove_group_from_session() keeps this pointer around
512 	// only if the session can be freed as well
513 	if (group->session) {
514 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
515 			group->session->id));
516 		deferred_free(group->session);
517 	}
518 
519 	deferred_free(group);
520 }
521 
522 
523 /*!	Removes a group from a session, and puts the session object
524 	back into the session cache, if it's not used anymore.
525 	You must hold the team lock when calling this function.
526 */
527 static void
528 remove_group_from_session(struct process_group *group)
529 {
530 	struct process_session *session = group->session;
531 
532 	// the group must be in any session to let this function have any effect
533 	if (session == NULL)
534 		return;
535 
536 	hash_remove(sGroupHash, group);
537 
538 	// we cannot free the resource here, so we're keeping the group link
539 	// around - this way it'll be freed by free_process_group()
540 	if (--session->group_count > 0)
541 		group->session = NULL;
542 }
543 
544 
545 /*!	Team lock must be held.
546 */
547 static void
548 acquire_process_group_ref(pid_t groupID)
549 {
550 	process_group* group = team_get_process_group_locked(NULL, groupID);
551 	if (group == NULL) {
552 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
553 		return;
554 	}
555 
556 	group->refs++;
557 }
558 
559 
560 /*!	Team lock must be held.
561 */
562 static void
563 release_process_group_ref(pid_t groupID)
564 {
565 	process_group* group = team_get_process_group_locked(NULL, groupID);
566 	if (group == NULL) {
567 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
568 		return;
569 	}
570 
571 	if (group->refs <= 0) {
572 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
573 		return;
574 	}
575 
576 	if (--group->refs > 0)
577 		return;
578 
579 	// group is no longer used
580 
581 	remove_group_from_session(group);
582 	deferred_delete_process_group(group);
583 }
584 
585 
586 /*!	You must hold the team lock when calling this function. */
587 static void
588 insert_group_into_session(struct process_session *session, struct process_group *group)
589 {
590 	if (group == NULL)
591 		return;
592 
593 	group->session = session;
594 	hash_insert(sGroupHash, group);
595 	session->group_count++;
596 }
597 
598 
599 /*!	You must hold the team lock when calling this function. */
600 static void
601 insert_team_into_group(struct process_group *group, struct team *team)
602 {
603 	team->group = group;
604 	team->group_id = group->id;
605 	team->session_id = group->session->id;
606 
607 	team->group_next = group->teams;
608 	group->teams = team;
609 	acquire_process_group_ref(group->id);
610 }
611 
612 
613 /*!	Removes the team from the group.
614 
615 	\param team the team that'll be removed from it's group
616 */
617 static void
618 remove_team_from_group(struct team *team)
619 {
620 	struct process_group *group = team->group;
621 	struct team *current, *last = NULL;
622 
623 	// the team must be in any team to let this function have any effect
624 	if  (group == NULL)
625 		return;
626 
627 	for (current = group->teams; current != NULL; current = current->group_next) {
628 		if (current == team) {
629 			if (last == NULL)
630 				group->teams = current->group_next;
631 			else
632 				last->group_next = current->group_next;
633 
634 			team->group = NULL;
635 			break;
636 		}
637 		last = current;
638 	}
639 
640 	team->group = NULL;
641 	team->group_next = NULL;
642 
643 	release_process_group_ref(group->id);
644 }
645 
646 
647 static struct process_group *
648 create_process_group(pid_t id)
649 {
650 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
651 	if (group == NULL)
652 		return NULL;
653 
654 	group->id = id;
655 	group->refs = 0;
656 	group->session = NULL;
657 	group->teams = NULL;
658 	group->orphaned = true;
659 	return group;
660 }
661 
662 
663 static struct process_session *
664 create_process_session(pid_t id)
665 {
666 	struct process_session *session
667 		= (struct process_session *)malloc(sizeof(struct process_session));
668 	if (session == NULL)
669 		return NULL;
670 
671 	session->id = id;
672 	session->group_count = 0;
673 	session->controlling_tty = -1;
674 	session->foreground_group = -1;
675 
676 	return session;
677 }
678 
679 
680 static void
681 set_team_name(struct team* team, const char* name)
682 {
683 	if (const char* lastSlash = strrchr(name, '/'))
684 		name = lastSlash + 1;
685 
686 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
687 }
688 
689 
690 static struct team *
691 create_team_struct(const char *name, bool kernel)
692 {
693 	struct team *team = (struct team *)malloc(sizeof(struct team));
694 	if (team == NULL)
695 		return NULL;
696 	MemoryDeleter teamDeleter(team);
697 
698 	team->next = team->siblings_next = team->children = team->parent = NULL;
699 	team->id = allocate_thread_id();
700 	set_team_name(team, name);
701 	team->args[0] = '\0';
702 	team->num_threads = 0;
703 	team->io_context = NULL;
704 	team->address_space = NULL;
705 	team->realtime_sem_context = NULL;
706 	team->xsi_sem_context = NULL;
707 	team->thread_list = NULL;
708 	team->main_thread = NULL;
709 	team->loading_info = NULL;
710 	team->state = TEAM_STATE_BIRTH;
711 	team->flags = 0;
712 	team->death_sem = -1;
713 	team->user_data_area = -1;
714 	team->user_data = 0;
715 	team->used_user_data = 0;
716 	team->user_data_size = 0;
717 	team->free_user_threads = NULL;
718 
719 	team->supplementary_groups = NULL;
720 	team->supplementary_group_count = 0;
721 
722 	team->dead_threads_kernel_time = 0;
723 	team->dead_threads_user_time = 0;
724 
725 	// dead threads
726 	list_init(&team->dead_threads);
727 	team->dead_threads_count = 0;
728 
729 	// dead children
730 	team->dead_children = new(nothrow) team_dead_children;
731 	if (team->dead_children == NULL)
732 		return NULL;
733 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
734 
735 	team->dead_children->count = 0;
736 	team->dead_children->kernel_time = 0;
737 	team->dead_children->user_time = 0;
738 
739 	// stopped children
740 	team->stopped_children = new(nothrow) team_job_control_children;
741 	if (team->stopped_children == NULL)
742 		return NULL;
743 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
744 		team->stopped_children);
745 
746 	// continued children
747 	team->continued_children = new(nothrow) team_job_control_children;
748 	if (team->continued_children == NULL)
749 		return NULL;
750 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
751 		team->continued_children);
752 
753 	// job control entry
754 	team->job_control_entry = new(nothrow) job_control_entry;
755 	if (team->job_control_entry == NULL)
756 		return NULL;
757 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
758 		team->job_control_entry);
759 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
760 	team->job_control_entry->thread = team->id;
761 	team->job_control_entry->team = team;
762 
763 	list_init(&team->image_list);
764 	list_init(&team->watcher_list);
765 
766 	clear_team_debug_info(&team->debug_info, true);
767 
768 	if (arch_team_init_team_struct(team, kernel) < 0)
769 		return NULL;
770 
771 	// publish dead/stopped/continued children condition vars
772 	team->dead_children->condition_variable.Init(team->dead_children,
773 		"team children");
774 
775 	// keep all allocated structures
776 	jobControlEntryDeleter.Detach();
777 	continuedChildrenDeleter.Detach();
778 	stoppedChildrenDeleter.Detach();
779 	deadChildrenDeleter.Detach();
780 	teamDeleter.Detach();
781 
782 	return team;
783 }
784 
785 
786 static void
787 delete_team_struct(struct team *team)
788 {
789 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
790 			&team->dead_threads)) {
791 		free(threadDeathEntry);
792 	}
793 
794 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
795 		delete entry;
796 
797 	while (free_user_thread* entry = team->free_user_threads) {
798 		team->free_user_threads = entry->next;
799 		free(entry);
800 	}
801 
802 	malloc_referenced_release(team->supplementary_groups);
803 
804 	delete team->job_control_entry;
805 		// usually already NULL and transferred to the parent
806 	delete team->continued_children;
807 	delete team->stopped_children;
808 	delete team->dead_children;
809 	free(team);
810 }
811 
812 
813 static status_t
814 create_team_user_data(struct team* team)
815 {
816 	void* address = (void*)KERNEL_USER_DATA_BASE;
817 	size_t size = 4 * B_PAGE_SIZE;
818 	team->user_data_area = create_area_etc(team->id, "user area", &address,
819 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0);
820 	if (team->user_data_area < 0)
821 		return team->user_data_area;
822 
823 	team->user_data = (addr_t)address;
824 	team->used_user_data = 0;
825 	team->user_data_size = size;
826 	team->free_user_threads = NULL;
827 
828 	return B_OK;
829 }
830 
831 
832 static void
833 delete_team_user_data(struct team* team)
834 {
835 	if (team->user_data_area >= 0) {
836 		vm_delete_area(team->id, team->user_data_area, true);
837 		team->user_data = 0;
838 		team->used_user_data = 0;
839 		team->user_data_size = 0;
840 		team->user_data_area = -1;
841 		while (free_user_thread* entry = team->free_user_threads) {
842 			team->free_user_threads = entry->next;
843 			free(entry);
844 		}
845 	}
846 }
847 
848 
849 static status_t
850 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
851 	int32 argCount, int32 envCount, char**& _flatArgs)
852 {
853 	if (argCount < 0 || envCount < 0)
854 		return B_BAD_VALUE;
855 
856 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
857 		return B_TOO_MANY_ARGS;
858 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
859 		return B_BAD_VALUE;
860 
861 	if (!IS_USER_ADDRESS(userFlatArgs))
862 		return B_BAD_ADDRESS;
863 
864 	// allocate kernel memory
865 	char** flatArgs = (char**)malloc(flatArgsSize);
866 	if (flatArgs == NULL)
867 		return B_NO_MEMORY;
868 
869 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
870 		free(flatArgs);
871 		return B_BAD_ADDRESS;
872 	}
873 
874 	// check and relocate the array
875 	status_t error = B_OK;
876 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
877 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
878 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
879 		if (i == argCount || i == argCount + envCount + 1) {
880 			// check array null termination
881 			if (flatArgs[i] != NULL) {
882 				error = B_BAD_VALUE;
883 				break;
884 			}
885 		} else {
886 			// check string
887 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
888 			size_t maxLen = stringEnd - arg;
889 			if (arg < stringBase || arg >= stringEnd
890 					|| strnlen(arg, maxLen) == maxLen) {
891 				error = B_BAD_VALUE;
892 				break;
893 			}
894 
895 			flatArgs[i] = arg;
896 		}
897 	}
898 
899 	if (error == B_OK)
900 		_flatArgs = flatArgs;
901 	else
902 		free(flatArgs);
903 
904 	return error;
905 }
906 
907 
908 static void
909 free_team_arg(struct team_arg *teamArg)
910 {
911 	if (teamArg != NULL) {
912 		free(teamArg->flat_args);
913 		free(teamArg->path);
914 		free(teamArg);
915 	}
916 }
917 
918 
919 static status_t
920 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
921 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
922 	uint32 token)
923 {
924 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
925 	if (teamArg == NULL)
926 		return B_NO_MEMORY;
927 
928 	teamArg->path = strdup(path);
929 	if (teamArg->path == NULL) {
930 		free(teamArg);
931 		return B_NO_MEMORY;
932 	}
933 
934 	// copy the args over
935 
936 	teamArg->flat_args = flatArgs;
937 	teamArg->flat_args_size = flatArgsSize;
938 	teamArg->arg_count = argCount;
939 	teamArg->env_count = envCount;
940 	teamArg->error_port = port;
941 	teamArg->error_token = token;
942 
943 	*_teamArg = teamArg;
944 	return B_OK;
945 }
946 
947 
948 static int32
949 team_create_thread_start(void *args)
950 {
951 	status_t err;
952 	struct thread *t;
953 	struct team *team;
954 	struct team_arg *teamArgs = (struct team_arg*)args;
955 	const char *path;
956 	addr_t entry;
957 	char ustack_name[128];
958 	uint32 sizeLeft;
959 	char **userArgs;
960 	char **userEnv;
961 	struct user_space_program_args *programArgs;
962 	uint32 argCount, envCount, i;
963 
964 	t = thread_get_current_thread();
965 	team = t->team;
966 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
967 
968 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
969 
970 	// get a user thread for the main thread
971 	t->user_thread = team_allocate_user_thread(team);
972 
973 	// create an initial primary stack area
974 
975 	// Main stack area layout is currently as follows (starting from 0):
976 	//
977 	// size								| usage
978 	// ---------------------------------+--------------------------------
979 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
980 	// TLS_SIZE							| TLS data
981 	// sizeof(user_space_program_args)	| argument structure for the runtime
982 	//									| loader
983 	// flat arguments size				| flat process arguments and environment
984 
985 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
986 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
987 
988 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
989 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
990 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
991 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
992 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
993 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
994 		// the exact location at the end of the user stack area
995 
996 	sprintf(ustack_name, "%s_main_stack", team->name);
997 	t->user_stack_area = create_area_etc(team->id, ustack_name,
998 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
999 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
1000 	if (t->user_stack_area < 0) {
1001 		dprintf("team_create_thread_start: could not create default user stack "
1002 			"region: %s\n", strerror(t->user_stack_area));
1003 
1004 		free_team_arg(teamArgs);
1005 		return t->user_stack_area;
1006 	}
1007 
1008 	// now that the TLS area is allocated, initialize TLS
1009 	arch_thread_init_tls(t);
1010 
1011 	argCount = teamArgs->arg_count;
1012 	envCount = teamArgs->env_count;
1013 
1014 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1015 		+ t->user_stack_size + TLS_SIZE);
1016 
1017 	userArgs = (char**)(programArgs + 1);
1018 	userEnv = userArgs + argCount + 1;
1019 	path = teamArgs->path;
1020 
1021 	if (user_strlcpy(programArgs->program_path, path,
1022 				sizeof(programArgs->program_path)) < B_OK
1023 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1024 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1025 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1026 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1027 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1028 				sizeof(port_id)) < B_OK
1029 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1030 				sizeof(uint32)) < B_OK
1031 		|| user_memcpy(userArgs, teamArgs->flat_args,
1032 				teamArgs->flat_args_size) < B_OK) {
1033 		// the team deletion process will clean this mess
1034 		return B_BAD_ADDRESS;
1035 	}
1036 
1037 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1038 
1039 	// add args to info member
1040 	team->args[0] = 0;
1041 	strlcpy(team->args, path, sizeof(team->args));
1042 	for (i = 1; i < argCount; i++) {
1043 		strlcat(team->args, " ", sizeof(team->args));
1044 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1045 	}
1046 
1047 	free_team_arg(teamArgs);
1048 		// the arguments are already on the user stack, we no longer need
1049 		// them in this form
1050 
1051 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1052 	// automatic variables with function scope will never be destroyed.
1053 	{
1054 		// find runtime_loader path
1055 		KPath runtimeLoaderPath;
1056 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1057 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1058 		if (err < B_OK) {
1059 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1060 				strerror(err)));
1061 			return err;
1062 		}
1063 		runtimeLoaderPath.UnlockBuffer();
1064 		err = runtimeLoaderPath.Append("runtime_loader");
1065 
1066 		if (err == B_OK)
1067 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1068 	}
1069 
1070 	if (err < B_OK) {
1071 		// Luckily, we don't have to clean up the mess we created - that's
1072 		// done for us by the normal team deletion process
1073 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1074 			"%s\n", strerror(err)));
1075 		return err;
1076 	}
1077 
1078 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1079 
1080 	team->state = TEAM_STATE_NORMAL;
1081 
1082 	// jump to the entry point in user space
1083 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1084 		// only returns in case of error
1085 }
1086 
1087 
1088 /*!	The BeOS kernel exports a function with this name, but most probably with
1089 	different parameters; we should not make it public.
1090 */
1091 static thread_id
1092 load_image_etc(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1093 	int32 envCount, int32 priority, uint32 flags, port_id errorPort,
1094 	uint32 errorToken)
1095 {
1096 	char** flatArgs = _flatArgs;
1097 	struct team *team, *parent;
1098 	const char *threadName;
1099 	thread_id thread;
1100 	status_t status;
1101 	cpu_status state;
1102 	struct team_arg *teamArgs;
1103 	struct team_loading_info loadingInfo;
1104 
1105 	if (flatArgs == NULL || argCount == 0)
1106 		return B_BAD_VALUE;
1107 
1108 	const char* path = flatArgs[0];
1109 
1110 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1111 		path, flatArgs, argCount));
1112 
1113 	team = create_team_struct(path, false);
1114 	if (team == NULL)
1115 		return B_NO_MEMORY;
1116 
1117 	parent = thread_get_current_thread()->team;
1118 
1119 	if (flags & B_WAIT_TILL_LOADED) {
1120 		loadingInfo.thread = thread_get_current_thread();
1121 		loadingInfo.result = B_ERROR;
1122 		loadingInfo.done = false;
1123 		team->loading_info = &loadingInfo;
1124 	}
1125 
1126 	// Inherit the parent's user/group, but also check the executable's
1127 	// set-user/group-id permission
1128 	inherit_parent_user_and_group(team, parent);
1129 	update_set_id_user_and_group(team, path);
1130 
1131 	state = disable_interrupts();
1132 	GRAB_TEAM_LOCK();
1133 
1134 	hash_insert(sTeamHash, team);
1135 	insert_team_into_parent(parent, team);
1136 	insert_team_into_group(parent->group, team);
1137 	sUsedTeams++;
1138 
1139 	RELEASE_TEAM_LOCK();
1140 	restore_interrupts(state);
1141 
1142 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1143 		envCount, errorPort, errorToken);
1144 
1145 	if (status != B_OK)
1146 		goto err1;
1147 
1148 	_flatArgs = NULL;
1149 		// args are owned by the team_arg structure now
1150 
1151 	// create a new io_context for this team
1152 	team->io_context = vfs_new_io_context(parent->io_context);
1153 	if (!team->io_context) {
1154 		status = B_NO_MEMORY;
1155 		goto err2;
1156 	}
1157 
1158 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1159 	vfs_exec_io_context(team->io_context);
1160 
1161 	// create an address space for this team
1162 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1163 		&team->address_space);
1164 	if (status < B_OK)
1165 		goto err3;
1166 
1167 	// cut the path from the main thread name
1168 	threadName = strrchr(path, '/');
1169 	if (threadName != NULL)
1170 		threadName++;
1171 	else
1172 		threadName = path;
1173 
1174 	// create the user data area
1175 	status = create_team_user_data(team);
1176 	if (status != B_OK)
1177 		goto err4;
1178 
1179 	// Create a kernel thread, but under the context of the new team
1180 	// The new thread will take over ownership of teamArgs
1181 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1182 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1183 	if (thread < 0) {
1184 		status = thread;
1185 		goto err5;
1186 	}
1187 
1188 	// wait for the loader of the new team to finish its work
1189 	if (flags & B_WAIT_TILL_LOADED) {
1190 		struct thread *mainThread;
1191 
1192 		state = disable_interrupts();
1193 		GRAB_THREAD_LOCK();
1194 
1195 		mainThread = thread_get_thread_struct_locked(thread);
1196 		if (mainThread) {
1197 			// resume the team's main thread
1198 			if (mainThread->state == B_THREAD_SUSPENDED)
1199 				scheduler_enqueue_in_run_queue(mainThread);
1200 
1201 			// Now suspend ourselves until loading is finished.
1202 			// We will be woken either by the thread, when it finished or
1203 			// aborted loading, or when the team is going to die (e.g. is
1204 			// killed). In either case the one setting `loadingInfo.done' is
1205 			// responsible for removing the info from the team structure.
1206 			while (!loadingInfo.done) {
1207 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1208 				scheduler_reschedule();
1209 			}
1210 		} else {
1211 			// Impressive! Someone managed to kill the thread in this short
1212 			// time.
1213 		}
1214 
1215 		RELEASE_THREAD_LOCK();
1216 		restore_interrupts(state);
1217 
1218 		if (loadingInfo.result < B_OK)
1219 			return loadingInfo.result;
1220 	}
1221 
1222 	// notify the debugger
1223 	user_debug_team_created(team->id);
1224 
1225 	return thread;
1226 
1227 err5:
1228 	delete_team_user_data(team);
1229 err4:
1230 	vm_put_address_space(team->address_space);
1231 err3:
1232 	vfs_free_io_context(team->io_context);
1233 err2:
1234 	free_team_arg(teamArgs);
1235 err1:
1236 	// remove the team structure from the team hash table and delete the team structure
1237 	state = disable_interrupts();
1238 	GRAB_TEAM_LOCK();
1239 
1240 	remove_team_from_group(team);
1241 	remove_team_from_parent(parent, team);
1242 	hash_remove(sTeamHash, team);
1243 
1244 	RELEASE_TEAM_LOCK();
1245 	restore_interrupts(state);
1246 
1247 	delete_team_struct(team);
1248 
1249 	return status;
1250 }
1251 
1252 
1253 /*!	Almost shuts down the current team and loads a new image into it.
1254 	If successful, this function does not return and will takeover ownership of
1255 	the arguments provided.
1256 	This function may only be called from user space.
1257 */
1258 static status_t
1259 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1260 	int32 argCount, int32 envCount)
1261 {
1262 	// NOTE: Since this function normally doesn't return, don't use automatic
1263 	// variables that need destruction in the function scope.
1264 	char** flatArgs = _flatArgs;
1265 	struct team *team = thread_get_current_thread()->team;
1266 	struct team_arg *teamArgs;
1267 	const char *threadName;
1268 	status_t status = B_OK;
1269 	cpu_status state;
1270 	struct thread *thread;
1271 	thread_id nubThreadID = -1;
1272 
1273 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1274 		path, argCount, envCount, team->id));
1275 
1276 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1277 
1278 	// switching the kernel at run time is probably not a good idea :)
1279 	if (team == team_get_kernel_team())
1280 		return B_NOT_ALLOWED;
1281 
1282 	// we currently need to be single threaded here
1283 	// ToDo: maybe we should just kill all other threads and
1284 	//	make the current thread the team's main thread?
1285 	if (team->main_thread != thread_get_current_thread())
1286 		return B_NOT_ALLOWED;
1287 
1288 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1289 	// We iterate through the thread list to make sure that there's no other
1290 	// thread.
1291 	state = disable_interrupts();
1292 	GRAB_TEAM_LOCK();
1293 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1294 
1295 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1296 		nubThreadID = team->debug_info.nub_thread;
1297 
1298 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1299 
1300 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1301 		if (thread != team->main_thread && thread->id != nubThreadID) {
1302 			status = B_NOT_ALLOWED;
1303 			break;
1304 		}
1305 	}
1306 
1307 	RELEASE_TEAM_LOCK();
1308 	restore_interrupts(state);
1309 
1310 	if (status != B_OK)
1311 		return status;
1312 
1313 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1314 		envCount, -1, 0);
1315 
1316 	if (status != B_OK)
1317 		return status;
1318 
1319 	_flatArgs = NULL;
1320 		// args are owned by the team_arg structure now
1321 
1322 	// ToDo: remove team resources if there are any left
1323 	// thread_atkernel_exit() might not be called at all
1324 
1325 	thread_reset_for_exec();
1326 
1327 	user_debug_prepare_for_exec();
1328 
1329 	delete_team_user_data(team);
1330 	vm_delete_areas(team->address_space);
1331 	xsi_sem_undo(team);
1332 	delete_owned_ports(team->id);
1333 	sem_delete_owned_sems(team->id);
1334 	remove_images(team);
1335 	vfs_exec_io_context(team->io_context);
1336 	delete_realtime_sem_context(team->realtime_sem_context);
1337 	team->realtime_sem_context = NULL;
1338 
1339 	status = create_team_user_data(team);
1340 	if (status != B_OK) {
1341 		// creating the user data failed -- we're toast
1342 		// TODO: We should better keep the old user area in the first place.
1343 		exit_thread(status);
1344 		return status;
1345 	}
1346 
1347 	user_debug_finish_after_exec();
1348 
1349 	// rename the team
1350 
1351 	set_team_name(team, path);
1352 
1353 	// cut the path from the team name and rename the main thread, too
1354 	threadName = strrchr(path, '/');
1355 	if (threadName != NULL)
1356 		threadName++;
1357 	else
1358 		threadName = path;
1359 	rename_thread(thread_get_current_thread_id(), threadName);
1360 
1361 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1362 
1363 	// Update user/group according to the executable's set-user/group-id
1364 	// permission.
1365 	update_set_id_user_and_group(team, path);
1366 
1367 	user_debug_team_exec();
1368 
1369 	status = team_create_thread_start(teamArgs);
1370 		// this one usually doesn't return...
1371 
1372 	// sorry, we have to kill us, there is no way out anymore
1373 	// (without any areas left and all that)
1374 	exit_thread(status);
1375 
1376 	// we return a status here since the signal that is sent by the
1377 	// call above is not immediately handled
1378 	return B_ERROR;
1379 }
1380 
1381 
1382 /*! This is the first function to be called from the newly created
1383 	main child thread.
1384 	It will fill in everything what's left to do from fork_arg, and
1385 	return from the parent's fork() syscall to the child.
1386 */
1387 static int32
1388 fork_team_thread_start(void *_args)
1389 {
1390 	struct thread *thread = thread_get_current_thread();
1391 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1392 
1393 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1394 		// we need a local copy of the arch dependent part
1395 
1396 	thread->user_stack_area = forkArgs->user_stack_area;
1397 	thread->user_stack_base = forkArgs->user_stack_base;
1398 	thread->user_stack_size = forkArgs->user_stack_size;
1399 	thread->user_local_storage = forkArgs->user_local_storage;
1400 	thread->sig_block_mask = forkArgs->sig_block_mask;
1401 	thread->user_thread = forkArgs->user_thread;
1402 
1403 	arch_thread_init_tls(thread);
1404 
1405 	free(forkArgs);
1406 
1407 	// set frame of the parent thread to this one, too
1408 
1409 	arch_restore_fork_frame(&archArgs);
1410 		// This one won't return here
1411 
1412 	return 0;
1413 }
1414 
1415 
1416 static thread_id
1417 fork_team(void)
1418 {
1419 	struct thread *parentThread = thread_get_current_thread();
1420 	struct team *parentTeam = parentThread->team, *team;
1421 	struct fork_arg *forkArgs;
1422 	struct area_info info;
1423 	thread_id threadID;
1424 	cpu_status state;
1425 	status_t status;
1426 	int32 cookie;
1427 
1428 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1429 
1430 	if (parentTeam == team_get_kernel_team())
1431 		return B_NOT_ALLOWED;
1432 
1433 	// create a new team
1434 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1435 
1436 	team = create_team_struct(parentTeam->name, false);
1437 	if (team == NULL)
1438 		return B_NO_MEMORY;
1439 
1440 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1441 
1442 	// Inherit the parent's user/group.
1443 	inherit_parent_user_and_group(team, parentTeam);
1444 
1445 	state = disable_interrupts();
1446 	GRAB_TEAM_LOCK();
1447 
1448 	hash_insert(sTeamHash, team);
1449 	insert_team_into_parent(parentTeam, team);
1450 	insert_team_into_group(parentTeam->group, team);
1451 	sUsedTeams++;
1452 
1453 	RELEASE_TEAM_LOCK();
1454 	restore_interrupts(state);
1455 
1456 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1457 	if (forkArgs == NULL) {
1458 		status = B_NO_MEMORY;
1459 		goto err1;
1460 	}
1461 
1462 	// create a new io_context for this team
1463 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1464 	if (!team->io_context) {
1465 		status = B_NO_MEMORY;
1466 		goto err2;
1467 	}
1468 
1469 	// duplicate the realtime sem context
1470 	if (parentTeam->realtime_sem_context) {
1471 		team->realtime_sem_context = clone_realtime_sem_context(
1472 			parentTeam->realtime_sem_context);
1473 		if (team->realtime_sem_context == NULL) {
1474 			status = B_NO_MEMORY;
1475 			goto err25;
1476 		}
1477 	}
1478 
1479 	// create an address space for this team
1480 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1481 		&team->address_space);
1482 	if (status < B_OK)
1483 		goto err3;
1484 
1485 	// copy all areas of the team
1486 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1487 	// ToDo: all stacks of other threads than the current one could be left out
1488 
1489 	forkArgs->user_thread = NULL;
1490 
1491 	cookie = 0;
1492 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1493 		if (info.area == parentTeam->user_data_area) {
1494 			// don't clone the user area; just create a new one
1495 			status = create_team_user_data(team);
1496 			if (status != B_OK)
1497 				break;
1498 
1499 			forkArgs->user_thread = team_allocate_user_thread(team);
1500 		} else {
1501 			void *address;
1502 			area_id area = vm_copy_area(team->address_space->id, info.name,
1503 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1504 			if (area < B_OK) {
1505 				status = area;
1506 				break;
1507 			}
1508 
1509 			if (info.area == parentThread->user_stack_area)
1510 				forkArgs->user_stack_area = area;
1511 		}
1512 	}
1513 
1514 	if (status < B_OK)
1515 		goto err4;
1516 
1517 	if (forkArgs->user_thread == NULL) {
1518 #if KDEBUG
1519 		panic("user data area not found, parent area is %ld",
1520 			parentTeam->user_data_area);
1521 #endif
1522 		status = B_ERROR;
1523 		goto err4;
1524 	}
1525 
1526 	forkArgs->user_stack_base = parentThread->user_stack_base;
1527 	forkArgs->user_stack_size = parentThread->user_stack_size;
1528 	forkArgs->user_local_storage = parentThread->user_local_storage;
1529 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1530 	arch_store_fork_frame(&forkArgs->arch_info);
1531 
1532 	// copy image list
1533 	image_info imageInfo;
1534 	cookie = 0;
1535 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1536 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1537 		if (image < 0)
1538 			goto err5;
1539 	}
1540 
1541 	// create a kernel thread under the context of the new team
1542 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1543 		parentThread->name, parentThread->priority, forkArgs,
1544 		team->id, team->id);
1545 	if (threadID < 0) {
1546 		status = threadID;
1547 		goto err5;
1548 	}
1549 
1550 	// notify the debugger
1551 	user_debug_team_created(team->id);
1552 
1553 	T(TeamForked(threadID));
1554 
1555 	resume_thread(threadID);
1556 	return threadID;
1557 
1558 err5:
1559 	remove_images(team);
1560 err4:
1561 	vm_delete_address_space(team->address_space);
1562 err3:
1563 	delete_realtime_sem_context(team->realtime_sem_context);
1564 err25:
1565 	vfs_free_io_context(team->io_context);
1566 err2:
1567 	free(forkArgs);
1568 err1:
1569 	// remove the team structure from the team hash table and delete the team structure
1570 	state = disable_interrupts();
1571 	GRAB_TEAM_LOCK();
1572 
1573 	remove_team_from_group(team);
1574 	remove_team_from_parent(parentTeam, team);
1575 	hash_remove(sTeamHash, team);
1576 
1577 	RELEASE_TEAM_LOCK();
1578 	restore_interrupts(state);
1579 
1580 	delete_team_struct(team);
1581 
1582 	return status;
1583 }
1584 
1585 
1586 /*!	Returns if the specified \a team has any children belonging to the
1587 	specified \a group.
1588 	Must be called with the team lock held.
1589 */
1590 static bool
1591 has_children_in_group(struct team *parent, pid_t groupID)
1592 {
1593 	struct team *team;
1594 
1595 	struct process_group *group = team_get_process_group_locked(
1596 		parent->group->session, groupID);
1597 	if (group == NULL)
1598 		return false;
1599 
1600 	for (team = group->teams; team; team = team->group_next) {
1601 		if (team->parent == parent)
1602 			return true;
1603 	}
1604 
1605 	return false;
1606 }
1607 
1608 
1609 static job_control_entry*
1610 get_job_control_entry(team_job_control_children* children, pid_t id)
1611 {
1612 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1613 		 job_control_entry* entry = it.Next();) {
1614 
1615 		if (id > 0) {
1616 			if (entry->thread == id)
1617 				return entry;
1618 		} else if (id == -1) {
1619 			return entry;
1620 		} else {
1621 			pid_t processGroup
1622 				= (entry->team ? entry->team->group_id : entry->group_id);
1623 			if (processGroup == -id)
1624 				return entry;
1625 		}
1626 	}
1627 
1628 	return NULL;
1629 }
1630 
1631 
1632 static job_control_entry*
1633 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1634 {
1635 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1636 
1637 	if (entry == NULL && (flags & WCONTINUED) != 0)
1638 		entry = get_job_control_entry(team->continued_children, id);
1639 
1640 	if (entry == NULL && (flags & WUNTRACED) != 0)
1641 		entry = get_job_control_entry(team->stopped_children, id);
1642 
1643 	return entry;
1644 }
1645 
1646 
1647 job_control_entry::job_control_entry()
1648 	:
1649 	has_group_ref(false)
1650 {
1651 }
1652 
1653 
1654 job_control_entry::~job_control_entry()
1655 {
1656 	if (has_group_ref) {
1657 		InterruptsSpinLocker locker(gTeamSpinlock);
1658 		release_process_group_ref(group_id);
1659 	}
1660 }
1661 
1662 
1663 /*!	Team and thread lock must be held.
1664 */
1665 void
1666 job_control_entry::InitDeadState()
1667 {
1668 	if (team != NULL) {
1669 		struct thread* thread = team->main_thread;
1670 		group_id = team->group_id;
1671 		this->thread = thread->id;
1672 		status = thread->exit.status;
1673 		reason = thread->exit.reason;
1674 		signal = thread->exit.signal;
1675 		team = NULL;
1676 		acquire_process_group_ref(group_id);
1677 		has_group_ref = true;
1678 	}
1679 }
1680 
1681 
1682 job_control_entry&
1683 job_control_entry::operator=(const job_control_entry& other)
1684 {
1685 	state = other.state;
1686 	thread = other.thread;
1687 	has_group_ref = false;
1688 	team = other.team;
1689 	group_id = other.group_id;
1690 	status = other.status;
1691 	reason = other.reason;
1692 	signal = other.signal;
1693 
1694 	return *this;
1695 }
1696 
1697 
1698 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1699 	comes to the reason why a thread has died than waitpid() can be.
1700 */
1701 static thread_id
1702 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1703 	status_t *_returnCode)
1704 {
1705 	struct thread* thread = thread_get_current_thread();
1706 	struct team* team = thread->team;
1707 	struct job_control_entry foundEntry;
1708 	struct job_control_entry* freeDeathEntry = NULL;
1709 	status_t status = B_OK;
1710 
1711 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1712 
1713 	T(WaitForChild(child, flags));
1714 
1715 	if (child == 0) {
1716 		// wait for all children in the process group of the calling team
1717 		child = -team->group_id;
1718 	}
1719 
1720 	bool ignoreFoundEntries = false;
1721 	bool ignoreFoundEntriesChecked = false;
1722 
1723 	while (true) {
1724 		InterruptsSpinLocker locker(gTeamSpinlock);
1725 
1726 		// check whether any condition holds
1727 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1728 
1729 		// If we don't have an entry yet, check whether there are any children
1730 		// complying to the process group specification at all.
1731 		if (entry == NULL) {
1732 			// No success yet -- check whether there are any children we could
1733 			// wait for.
1734 			bool childrenExist = false;
1735 			if (child == -1) {
1736 				childrenExist = team->children != NULL;
1737 			} else if (child < -1) {
1738 				childrenExist = has_children_in_group(team, -child);
1739 			} else {
1740 				if (struct team* childTeam = team_get_team_struct_locked(child))
1741 					childrenExist = childTeam->parent == team;
1742 			}
1743 
1744 			if (!childrenExist) {
1745 				// there is no child we could wait for
1746 				status = ECHILD;
1747 			} else {
1748 				// the children we're waiting for are still running
1749 				status = B_WOULD_BLOCK;
1750 			}
1751 		} else {
1752 			// got something
1753 			foundEntry = *entry;
1754 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1755 				// The child is dead. Reap its death entry.
1756 				freeDeathEntry = entry;
1757 				team->dead_children->entries.Remove(entry);
1758 				team->dead_children->count--;
1759 			} else {
1760 				// The child is well. Reset its job control state.
1761 				team_set_job_control_state(entry->team,
1762 					JOB_CONTROL_STATE_NONE, 0, false);
1763 			}
1764 		}
1765 
1766 		// If we haven't got anything yet, prepare for waiting for the
1767 		// condition variable.
1768 		ConditionVariableEntry deadWaitEntry;
1769 
1770 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1771 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1772 
1773 		locker.Unlock();
1774 
1775 		// we got our entry and can return to our caller
1776 		if (status == B_OK) {
1777 			if (ignoreFoundEntries) {
1778 				// ... unless we shall ignore found entries
1779 				delete freeDeathEntry;
1780 				freeDeathEntry = NULL;
1781 				continue;
1782 			}
1783 
1784 			break;
1785 		}
1786 
1787 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1788 			T(WaitForChildDone(status));
1789 			return status;
1790 		}
1791 
1792 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1793 		if (status == B_INTERRUPTED) {
1794 			T(WaitForChildDone(status));
1795 			return status;
1796 		}
1797 
1798 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1799 		// all our children are dead and fail with ECHILD. We check the
1800 		// condition at this point.
1801 		if (!ignoreFoundEntriesChecked) {
1802 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1803 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1804 				|| handler.sa_handler == SIG_IGN) {
1805 				ignoreFoundEntries = true;
1806 			}
1807 
1808 			ignoreFoundEntriesChecked = true;
1809 		}
1810 	}
1811 
1812 	delete freeDeathEntry;
1813 
1814 	// when we got here, we have a valid death entry, and
1815 	// already got unregistered from the team or group
1816 	int reason = 0;
1817 	switch (foundEntry.state) {
1818 		case JOB_CONTROL_STATE_DEAD:
1819 			reason = foundEntry.reason;
1820 			break;
1821 		case JOB_CONTROL_STATE_STOPPED:
1822 			reason = THREAD_STOPPED;
1823 			break;
1824 		case JOB_CONTROL_STATE_CONTINUED:
1825 			reason = THREAD_CONTINUED;
1826 			break;
1827 		case JOB_CONTROL_STATE_NONE:
1828 			// can't happen
1829 			break;
1830 	}
1831 
1832 	*_returnCode = foundEntry.status;
1833 	*_reason = (foundEntry.signal << 16) | reason;
1834 
1835 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1836 	// status is available.
1837 	if (is_signal_blocked(SIGCHLD)) {
1838 		InterruptsSpinLocker locker(gTeamSpinlock);
1839 
1840 		if (get_job_control_entry(team, child, flags) == NULL)
1841 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1842 	}
1843 
1844 	// When the team is dead, the main thread continues to live in the kernel
1845 	// team for a very short time. To avoid surprises for the caller we rather
1846 	// wait until the thread is really gone.
1847 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1848 		wait_for_thread(foundEntry.thread, NULL);
1849 
1850 	T(WaitForChildDone(foundEntry));
1851 
1852 	return foundEntry.thread;
1853 }
1854 
1855 
1856 /*! Fills the team_info structure with information from the specified
1857 	team.
1858 	The team lock must be held when called.
1859 */
1860 static status_t
1861 fill_team_info(struct team *team, team_info *info, size_t size)
1862 {
1863 	if (size != sizeof(team_info))
1864 		return B_BAD_VALUE;
1865 
1866 	// ToDo: Set more informations for team_info
1867 	memset(info, 0, size);
1868 
1869 	info->team = team->id;
1870 	info->thread_count = team->num_threads;
1871 	info->image_count = count_images(team);
1872 	//info->area_count =
1873 	info->debugger_nub_thread = team->debug_info.nub_thread;
1874 	info->debugger_nub_port = team->debug_info.nub_port;
1875 	//info->uid =
1876 	//info->gid =
1877 
1878 	strlcpy(info->args, team->args, sizeof(info->args));
1879 	info->argc = 1;
1880 
1881 	return B_OK;
1882 }
1883 
1884 
1885 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1886 	Interrupts must be disabled and team lock be held.
1887 */
1888 static bool
1889 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1890 {
1891 	// Orphaned Process Group: "A process group in which the parent of every
1892 	// member is either itself a member of the group or is not a member of the
1893 	// group's session." (Open Group Base Specs Issue 6)
1894 
1895 	// once orphaned, things won't change (exception: cf. setpgid())
1896 	if (group->orphaned)
1897 		return true;
1898 
1899 	struct team* team = group->teams;
1900 	while (team != NULL) {
1901 		struct team* parent = team->parent;
1902 		if (team->id != dyingProcess && parent != NULL
1903 			&& parent->id != dyingProcess
1904 			&& parent->group_id != group->id
1905 			&& parent->session_id == group->session->id) {
1906 			return false;
1907 		}
1908 
1909 		team = team->group_next;
1910 	}
1911 
1912 	group->orphaned = true;
1913 	return true;
1914 }
1915 
1916 
1917 /*!	Returns whether the process group contains stopped processes.
1918 	Interrupts must be disabled and team lock be held.
1919 */
1920 static bool
1921 process_group_has_stopped_processes(process_group* group)
1922 {
1923 	SpinLocker _(gThreadSpinlock);
1924 
1925 	struct team* team = group->teams;
1926 	while (team != NULL) {
1927 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1928 			return true;
1929 
1930 		team = team->group_next;
1931 	}
1932 
1933 	return false;
1934 }
1935 
1936 
1937 //	#pragma mark - Private kernel API
1938 
1939 
1940 status_t
1941 team_init(kernel_args *args)
1942 {
1943 	struct process_session *session;
1944 	struct process_group *group;
1945 
1946 	// create the team hash table
1947 	sTeamHash = hash_init(16, offsetof(struct team, next),
1948 		&team_struct_compare, &team_struct_hash);
1949 
1950 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1951 		&process_group_compare, &process_group_hash);
1952 
1953 	// create initial session and process groups
1954 
1955 	session = create_process_session(1);
1956 	if (session == NULL)
1957 		panic("Could not create initial session.\n");
1958 
1959 	group = create_process_group(1);
1960 	if (group == NULL)
1961 		panic("Could not create initial process group.\n");
1962 
1963 	insert_group_into_session(session, group);
1964 
1965 	// create the kernel team
1966 	sKernelTeam = create_team_struct("kernel_team", true);
1967 	if (sKernelTeam == NULL)
1968 		panic("could not create kernel team!\n");
1969 	strcpy(sKernelTeam->args, sKernelTeam->name);
1970 	sKernelTeam->state = TEAM_STATE_NORMAL;
1971 
1972 	sKernelTeam->saved_set_uid = 0;
1973 	sKernelTeam->real_uid = 0;
1974 	sKernelTeam->effective_uid = 0;
1975 	sKernelTeam->saved_set_gid = 0;
1976 	sKernelTeam->real_gid = 0;
1977 	sKernelTeam->effective_gid = 0;
1978 	sKernelTeam->supplementary_groups = NULL;
1979 	sKernelTeam->supplementary_group_count = 0;
1980 
1981 	insert_team_into_group(group, sKernelTeam);
1982 
1983 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1984 	if (sKernelTeam->io_context == NULL)
1985 		panic("could not create io_context for kernel team!\n");
1986 
1987 	// stick it in the team hash
1988 	hash_insert(sTeamHash, sKernelTeam);
1989 
1990 	add_debugger_command_etc("team", &dump_team_info,
1991 		"Dump info about a particular team",
1992 		"[ <id> | <address> | <name> ]\n"
1993 		"Prints information about the specified team. If no argument is given\n"
1994 		"the current team is selected.\n"
1995 		"  <id>       - The ID of the team.\n"
1996 		"  <address>  - The address of the team structure.\n"
1997 		"  <name>     - The team's name.\n", 0);
1998 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1999 		"\n"
2000 		"Prints a list of all existing teams.\n", 0);
2001 	return 0;
2002 }
2003 
2004 
2005 int32
2006 team_max_teams(void)
2007 {
2008 	return sMaxTeams;
2009 }
2010 
2011 
2012 int32
2013 team_used_teams(void)
2014 {
2015 	return sUsedTeams;
2016 }
2017 
2018 
2019 /*! Fills the provided death entry if it's in the team.
2020 	You need to have the team lock held when calling this function.
2021 */
2022 job_control_entry*
2023 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2024 {
2025 	if (child <= 0)
2026 		return NULL;
2027 
2028 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2029 		child);
2030 	if (entry) {
2031 		// remove the entry only, if the caller is the parent of the found team
2032 		if (team_get_current_team_id() == entry->thread) {
2033 			team->dead_children->entries.Remove(entry);
2034 			team->dead_children->count--;
2035 			*_deleteEntry = true;
2036 		} else {
2037 			*_deleteEntry = false;
2038 		}
2039 	}
2040 
2041 	return entry;
2042 }
2043 
2044 
2045 /*! Quick check to see if we have a valid team ID. */
2046 bool
2047 team_is_valid(team_id id)
2048 {
2049 	struct team *team;
2050 	cpu_status state;
2051 
2052 	if (id <= 0)
2053 		return false;
2054 
2055 	state = disable_interrupts();
2056 	GRAB_TEAM_LOCK();
2057 
2058 	team = team_get_team_struct_locked(id);
2059 
2060 	RELEASE_TEAM_LOCK();
2061 	restore_interrupts(state);
2062 
2063 	return team != NULL;
2064 }
2065 
2066 
2067 struct team *
2068 team_get_team_struct_locked(team_id id)
2069 {
2070 	struct team_key key;
2071 	key.id = id;
2072 
2073 	return (struct team*)hash_lookup(sTeamHash, &key);
2074 }
2075 
2076 
2077 /*! This searches the session of the team for the specified group ID.
2078 	You must hold the team lock when you call this function.
2079 */
2080 struct process_group *
2081 team_get_process_group_locked(struct process_session *session, pid_t id)
2082 {
2083 	struct process_group *group;
2084 	struct team_key key;
2085 	key.id = id;
2086 
2087 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2088 	if (group != NULL && (session == NULL || session == group->session))
2089 		return group;
2090 
2091 	return NULL;
2092 }
2093 
2094 
2095 void
2096 team_delete_process_group(struct process_group *group)
2097 {
2098 	if (group == NULL)
2099 		return;
2100 
2101 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2102 
2103 	// remove_group_from_session() keeps this pointer around
2104 	// only if the session can be freed as well
2105 	if (group->session) {
2106 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2107 		free(group->session);
2108 	}
2109 
2110 	free(group);
2111 }
2112 
2113 
2114 void
2115 team_set_controlling_tty(int32 ttyIndex)
2116 {
2117 	struct team* team = thread_get_current_thread()->team;
2118 
2119 	InterruptsSpinLocker _(gTeamSpinlock);
2120 
2121 	team->group->session->controlling_tty = ttyIndex;
2122 	team->group->session->foreground_group = -1;
2123 }
2124 
2125 
2126 int32
2127 team_get_controlling_tty()
2128 {
2129 	struct team* team = thread_get_current_thread()->team;
2130 
2131 	InterruptsSpinLocker _(gTeamSpinlock);
2132 
2133 	return team->group->session->controlling_tty;
2134 }
2135 
2136 
2137 status_t
2138 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2139 {
2140 	struct thread* thread = thread_get_current_thread();
2141 	struct team* team = thread->team;
2142 
2143 	InterruptsSpinLocker locker(gTeamSpinlock);
2144 
2145 	process_session* session = team->group->session;
2146 
2147 	// must be the controlling tty of the calling process
2148 	if (session->controlling_tty != ttyIndex)
2149 		return ENOTTY;
2150 
2151 	// check process group -- must belong to our session
2152 	process_group* group = team_get_process_group_locked(session,
2153 		processGroupID);
2154 	if (group == NULL)
2155 		return B_BAD_VALUE;
2156 
2157 	// If we are a background group, we can't do that unharmed, only if we
2158 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2159 	if (session->foreground_group != -1
2160 		&& session->foreground_group != team->group_id
2161 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2162 		&& !is_signal_blocked(SIGTTOU)) {
2163 		pid_t groupID = team->group->id;
2164 		locker.Unlock();
2165 		send_signal(-groupID, SIGTTOU);
2166 		return B_INTERRUPTED;
2167 	}
2168 
2169 	team->group->session->foreground_group = processGroupID;
2170 
2171 	return B_OK;
2172 }
2173 
2174 
2175 /*!	Removes the specified team from the global team hash, and from its parent.
2176 	It also moves all of its children up to the parent.
2177 	You must hold the team lock when you call this function.
2178 */
2179 void
2180 team_remove_team(struct team *team)
2181 {
2182 	struct team *parent = team->parent;
2183 
2184 	// remember how long this team lasted
2185 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2186 		+ team->dead_children->kernel_time;
2187 	parent->dead_children->user_time += team->dead_threads_user_time
2188 		+ team->dead_children->user_time;
2189 
2190 	// Also grab the thread spinlock while removing the team from the hash.
2191 	// This makes the following sequence safe: grab teams lock, lookup team,
2192 	// grab threads lock, unlock teams lock,
2193 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2194 	// lock another team's IO context.
2195 	GRAB_THREAD_LOCK();
2196 	hash_remove(sTeamHash, team);
2197 	RELEASE_THREAD_LOCK();
2198 	sUsedTeams--;
2199 
2200 	team->state = TEAM_STATE_DEATH;
2201 
2202 	// If we're a controlling process (i.e. a session leader with controlling
2203 	// terminal), there's a bit of signalling we have to do.
2204 	if (team->session_id == team->id
2205 		&& team->group->session->controlling_tty >= 0) {
2206 		process_session* session = team->group->session;
2207 
2208 		session->controlling_tty = -1;
2209 
2210 		// send SIGHUP to the foreground
2211 		if (session->foreground_group >= 0) {
2212 			send_signal_etc(-session->foreground_group, SIGHUP,
2213 				SIGNAL_FLAG_TEAMS_LOCKED);
2214 		}
2215 
2216 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2217 		// stopped processes
2218 		struct team* child = team->children;
2219 		while (child != NULL) {
2220 			process_group* childGroup = child->group;
2221 			if (!childGroup->orphaned
2222 				&& update_orphaned_process_group(childGroup, team->id)
2223 				&& process_group_has_stopped_processes(childGroup)) {
2224 				send_signal_etc(-childGroup->id, SIGHUP,
2225 					SIGNAL_FLAG_TEAMS_LOCKED);
2226 				send_signal_etc(-childGroup->id, SIGCONT,
2227 					SIGNAL_FLAG_TEAMS_LOCKED);
2228 			}
2229 
2230 			child = child->siblings_next;
2231 		}
2232 	} else {
2233 		// update "orphaned" flags of all children's process groups
2234 		struct team* child = team->children;
2235 		while (child != NULL) {
2236 			process_group* childGroup = child->group;
2237 			if (!childGroup->orphaned)
2238 				update_orphaned_process_group(childGroup, team->id);
2239 
2240 			child = child->siblings_next;
2241 		}
2242 
2243 		// update "orphaned" flag of this team's process group
2244 		update_orphaned_process_group(team->group, team->id);
2245 	}
2246 
2247 	// reparent each of the team's children
2248 	reparent_children(team);
2249 
2250 	// remove us from our process group
2251 	remove_team_from_group(team);
2252 
2253 	// remove us from our parent
2254 	remove_team_from_parent(parent, team);
2255 }
2256 
2257 
2258 void
2259 team_delete_team(struct team *team)
2260 {
2261 	team_id teamID = team->id;
2262 	port_id debuggerPort = -1;
2263 	cpu_status state;
2264 
2265 	if (team->num_threads > 0) {
2266 		// there are other threads still in this team,
2267 		// cycle through and signal kill on each of the threads
2268 		// ToDo: this can be optimized. There's got to be a better solution.
2269 		struct thread *temp_thread;
2270 		char death_sem_name[B_OS_NAME_LENGTH];
2271 		sem_id deathSem;
2272 		int32 threadCount;
2273 
2274 		sprintf(death_sem_name, "team %ld death sem", teamID);
2275 		deathSem = create_sem(0, death_sem_name);
2276 		if (deathSem < 0)
2277 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2278 
2279 		state = disable_interrupts();
2280 		GRAB_TEAM_LOCK();
2281 
2282 		team->death_sem = deathSem;
2283 		threadCount = team->num_threads;
2284 
2285 		// If the team was being debugged, that will stop with the termination
2286 		// of the nub thread. The team structure has already been removed from
2287 		// the team hash table at this point, so noone can install a debugger
2288 		// anymore. We fetch the debugger's port to send it a message at the
2289 		// bitter end.
2290 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2291 
2292 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2293 			debuggerPort = team->debug_info.debugger_port;
2294 
2295 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2296 
2297 		// we can safely walk the list because of the lock. no new threads can be created
2298 		// because of the TEAM_STATE_DEATH flag on the team
2299 		temp_thread = team->thread_list;
2300 		while (temp_thread) {
2301 			struct thread *next = temp_thread->team_next;
2302 
2303 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2304 			temp_thread = next;
2305 		}
2306 
2307 		RELEASE_TEAM_LOCK();
2308 		restore_interrupts(state);
2309 
2310 		// wait until all threads in team are dead.
2311 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2312 		delete_sem(team->death_sem);
2313 	}
2314 
2315 	// If someone is waiting for this team to be loaded, but it dies
2316 	// unexpectedly before being done, we need to notify the waiting
2317 	// thread now.
2318 
2319 	state = disable_interrupts();
2320 	GRAB_TEAM_LOCK();
2321 
2322 	if (team->loading_info) {
2323 		// there's indeed someone waiting
2324 		struct team_loading_info *loadingInfo = team->loading_info;
2325 		team->loading_info = NULL;
2326 
2327 		loadingInfo->result = B_ERROR;
2328 		loadingInfo->done = true;
2329 
2330 		GRAB_THREAD_LOCK();
2331 
2332 		// wake up the waiting thread
2333 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2334 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2335 
2336 		RELEASE_THREAD_LOCK();
2337 	}
2338 
2339 	RELEASE_TEAM_LOCK();
2340 	restore_interrupts(state);
2341 
2342 	// notify team watchers
2343 
2344 	{
2345 		// we're not reachable from anyone anymore at this point, so we
2346 		// can safely access the list without any locking
2347 		struct team_watcher *watcher;
2348 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2349 				&team->watcher_list)) != NULL) {
2350 			watcher->hook(teamID, watcher->data);
2351 			free(watcher);
2352 		}
2353 	}
2354 
2355 	// free team resources
2356 
2357 	vfs_free_io_context(team->io_context);
2358 	delete_realtime_sem_context(team->realtime_sem_context);
2359 	xsi_sem_undo(team);
2360 	delete_owned_ports(teamID);
2361 	sem_delete_owned_sems(teamID);
2362 	remove_images(team);
2363 	vm_delete_address_space(team->address_space);
2364 
2365 	delete_team_struct(team);
2366 
2367 	// notify the debugger, that the team is gone
2368 	user_debug_team_deleted(teamID, debuggerPort);
2369 }
2370 
2371 
2372 struct team *
2373 team_get_kernel_team(void)
2374 {
2375 	return sKernelTeam;
2376 }
2377 
2378 
2379 team_id
2380 team_get_kernel_team_id(void)
2381 {
2382 	if (!sKernelTeam)
2383 		return 0;
2384 
2385 	return sKernelTeam->id;
2386 }
2387 
2388 
2389 team_id
2390 team_get_current_team_id(void)
2391 {
2392 	return thread_get_current_thread()->team->id;
2393 }
2394 
2395 
2396 status_t
2397 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2398 {
2399 	cpu_status state;
2400 	struct team *team;
2401 	status_t status;
2402 
2403 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2404 	if (id == 1) {
2405 		// we're the kernel team, so we don't have to go through all
2406 		// the hassle (locking and hash lookup)
2407 		*_addressSpace = vm_get_kernel_address_space();
2408 		return B_OK;
2409 	}
2410 
2411 	state = disable_interrupts();
2412 	GRAB_TEAM_LOCK();
2413 
2414 	team = team_get_team_struct_locked(id);
2415 	if (team != NULL) {
2416 		atomic_add(&team->address_space->ref_count, 1);
2417 		*_addressSpace = team->address_space;
2418 		status = B_OK;
2419 	} else
2420 		status = B_BAD_VALUE;
2421 
2422 	RELEASE_TEAM_LOCK();
2423 	restore_interrupts(state);
2424 
2425 	return status;
2426 }
2427 
2428 
2429 /*!	Sets the team's job control state.
2430 	Interrupts must be disabled and the team lock be held.
2431 	\a threadsLocked indicates whether the thread lock is being held, too.
2432 */
2433 void
2434 team_set_job_control_state(struct team* team, job_control_state newState,
2435 	int signal, bool threadsLocked)
2436 {
2437 	if (team == NULL || team->job_control_entry == NULL)
2438 		return;
2439 
2440 	// don't touch anything, if the state stays the same or the team is already
2441 	// dead
2442 	job_control_entry* entry = team->job_control_entry;
2443 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2444 		return;
2445 
2446 	T(SetJobControlState(team->id, newState, signal));
2447 
2448 	// remove from the old list
2449 	switch (entry->state) {
2450 		case JOB_CONTROL_STATE_NONE:
2451 			// entry is in no list ATM
2452 			break;
2453 		case JOB_CONTROL_STATE_DEAD:
2454 			// can't get here
2455 			break;
2456 		case JOB_CONTROL_STATE_STOPPED:
2457 			team->parent->stopped_children->entries.Remove(entry);
2458 			break;
2459 		case JOB_CONTROL_STATE_CONTINUED:
2460 			team->parent->continued_children->entries.Remove(entry);
2461 			break;
2462 	}
2463 
2464 	entry->state = newState;
2465 	entry->signal = signal;
2466 
2467 	// add to new list
2468 	team_job_control_children* childList = NULL;
2469 	switch (entry->state) {
2470 		case JOB_CONTROL_STATE_NONE:
2471 			// entry doesn't get into any list
2472 			break;
2473 		case JOB_CONTROL_STATE_DEAD:
2474 			childList = team->parent->dead_children;
2475 			team->parent->dead_children->count++;
2476 			break;
2477 		case JOB_CONTROL_STATE_STOPPED:
2478 			childList = team->parent->stopped_children;
2479 			break;
2480 		case JOB_CONTROL_STATE_CONTINUED:
2481 			childList = team->parent->continued_children;
2482 			break;
2483 	}
2484 
2485 	if (childList != NULL) {
2486 		childList->entries.Add(entry);
2487 		team->parent->dead_children->condition_variable.NotifyAll(
2488 			threadsLocked);
2489 	}
2490 }
2491 
2492 
2493 /*! Adds a hook to the team that is called as soon as this
2494 	team goes away.
2495 	This call might get public in the future.
2496 */
2497 status_t
2498 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2499 {
2500 	struct team_watcher *watcher;
2501 	struct team *team;
2502 	cpu_status state;
2503 
2504 	if (hook == NULL || teamID < B_OK)
2505 		return B_BAD_VALUE;
2506 
2507 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2508 	if (watcher == NULL)
2509 		return B_NO_MEMORY;
2510 
2511 	watcher->hook = hook;
2512 	watcher->data = data;
2513 
2514 	// find team and add watcher
2515 
2516 	state = disable_interrupts();
2517 	GRAB_TEAM_LOCK();
2518 
2519 	team = team_get_team_struct_locked(teamID);
2520 	if (team != NULL)
2521 		list_add_item(&team->watcher_list, watcher);
2522 
2523 	RELEASE_TEAM_LOCK();
2524 	restore_interrupts(state);
2525 
2526 	if (team == NULL) {
2527 		free(watcher);
2528 		return B_BAD_TEAM_ID;
2529 	}
2530 
2531 	return B_OK;
2532 }
2533 
2534 
2535 status_t
2536 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2537 {
2538 	struct team_watcher *watcher = NULL;
2539 	struct team *team;
2540 	cpu_status state;
2541 
2542 	if (hook == NULL || teamID < B_OK)
2543 		return B_BAD_VALUE;
2544 
2545 	// find team and remove watcher (if present)
2546 
2547 	state = disable_interrupts();
2548 	GRAB_TEAM_LOCK();
2549 
2550 	team = team_get_team_struct_locked(teamID);
2551 	if (team != NULL) {
2552 		// search for watcher
2553 		while ((watcher = (struct team_watcher*)list_get_next_item(
2554 				&team->watcher_list, watcher)) != NULL) {
2555 			if (watcher->hook == hook && watcher->data == data) {
2556 				// got it!
2557 				list_remove_item(&team->watcher_list, watcher);
2558 				break;
2559 			}
2560 		}
2561 	}
2562 
2563 	RELEASE_TEAM_LOCK();
2564 	restore_interrupts(state);
2565 
2566 	if (watcher == NULL)
2567 		return B_ENTRY_NOT_FOUND;
2568 
2569 	free(watcher);
2570 	return B_OK;
2571 }
2572 
2573 
2574 /*!	The team lock must be held or the team must still be single threaded.
2575 */
2576 struct user_thread*
2577 team_allocate_user_thread(struct team* team)
2578 {
2579 	if (team->user_data == 0)
2580 		return NULL;
2581 
2582 	user_thread* thread = NULL;
2583 
2584 	// take an entry from the free list, if any
2585 	if (struct free_user_thread* entry = team->free_user_threads) {
2586 		thread = entry->thread;
2587 		team->free_user_threads = entry->next;
2588 		deferred_free(entry);
2589 		return thread;
2590 	} else {
2591 		// enough space left?
2592 		size_t needed = _ALIGN(sizeof(user_thread));
2593 		if (team->user_data_size - team->used_user_data < needed)
2594 			return NULL;
2595 		// TODO: This imposes a per team thread limit! We should resize the
2596 		// area, if necessary. That's problematic at this point, though, since
2597 		// we've got the team lock.
2598 
2599 		thread = (user_thread*)(team->user_data + team->used_user_data);
2600 		team->used_user_data += needed;
2601 	}
2602 
2603 	thread->defer_signals = 0;
2604 	thread->pending_signals = 0;
2605 	thread->wait_status = B_OK;
2606 
2607 	return thread;
2608 }
2609 
2610 
2611 /*!	The team lock must not be held. \a thread must be the current thread.
2612 */
2613 void
2614 team_free_user_thread(struct thread* thread)
2615 {
2616 	user_thread* userThread = thread->user_thread;
2617 	if (userThread == NULL)
2618 		return;
2619 
2620 	// create a free list entry
2621 	free_user_thread* entry
2622 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2623 	if (entry == NULL) {
2624 		// we have to leak the user thread :-/
2625 		return;
2626 	}
2627 
2628 	InterruptsSpinLocker _(gTeamSpinlock);
2629 
2630 	entry->thread = userThread;
2631 	entry->next = thread->team->free_user_threads;
2632 	thread->team->free_user_threads = entry;
2633 }
2634 
2635 
2636 //	#pragma mark - Public kernel API
2637 
2638 
2639 thread_id
2640 load_image(int32 argCount, const char **args, const char **env)
2641 {
2642 	// we need to flatten the args and environment
2643 
2644 	if (args == NULL)
2645 		return B_BAD_VALUE;
2646 
2647 	// determine total needed size
2648 	int32 argSize = 0;
2649 	for (int32 i = 0; i < argCount; i++)
2650 		argSize += strlen(args[i]) + 1;
2651 
2652 	int32 envCount = 0;
2653 	int32 envSize = 0;
2654 	while (env != NULL && env[envCount] != NULL)
2655 		envSize += strlen(env[envCount++]) + 1;
2656 
2657 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2658 	if (size > MAX_PROCESS_ARGS_SIZE)
2659 		return B_TOO_MANY_ARGS;
2660 
2661 	// allocate space
2662 	char** flatArgs = (char**)malloc(size);
2663 	if (flatArgs == NULL)
2664 		return B_NO_MEMORY;
2665 
2666 	char** slot = flatArgs;
2667 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2668 
2669 	// copy arguments and environment
2670 	for (int32 i = 0; i < argCount; i++) {
2671 		int32 argSize = strlen(args[i]) + 1;
2672 		memcpy(stringSpace, args[i], argSize);
2673 		*slot++ = stringSpace;
2674 		stringSpace += argSize;
2675 	}
2676 
2677 	*slot++ = NULL;
2678 
2679 	for (int32 i = 0; i < envCount; i++) {
2680 		int32 envSize = strlen(env[i]) + 1;
2681 		memcpy(stringSpace, env[i], envSize);
2682 		*slot++ = stringSpace;
2683 		stringSpace += envSize;
2684 	}
2685 
2686 	*slot++ = NULL;
2687 
2688 	thread_id thread = load_image_etc(flatArgs, size, argCount, envCount,
2689 		B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED, -1, 0);
2690 
2691 	free(flatArgs);
2692 		// load_image_etc() unset our variable if it took over ownership
2693 
2694 	return thread;
2695 }
2696 
2697 
2698 status_t
2699 wait_for_team(team_id id, status_t *_returnCode)
2700 {
2701 	struct team *team;
2702 	thread_id thread;
2703 	cpu_status state;
2704 
2705 	// find main thread and wait for that
2706 
2707 	state = disable_interrupts();
2708 	GRAB_TEAM_LOCK();
2709 
2710 	team = team_get_team_struct_locked(id);
2711 	if (team != NULL && team->main_thread != NULL)
2712 		thread = team->main_thread->id;
2713 	else
2714 		thread = B_BAD_THREAD_ID;
2715 
2716 	RELEASE_TEAM_LOCK();
2717 	restore_interrupts(state);
2718 
2719 	if (thread < 0)
2720 		return thread;
2721 
2722 	return wait_for_thread(thread, _returnCode);
2723 }
2724 
2725 
2726 status_t
2727 kill_team(team_id id)
2728 {
2729 	status_t status = B_OK;
2730 	thread_id threadID = -1;
2731 	struct team *team;
2732 	cpu_status state;
2733 
2734 	state = disable_interrupts();
2735 	GRAB_TEAM_LOCK();
2736 
2737 	team = team_get_team_struct_locked(id);
2738 	if (team != NULL) {
2739 		if (team != sKernelTeam) {
2740 			threadID = team->id;
2741 				// the team ID is the same as the ID of its main thread
2742 		} else
2743 			status = B_NOT_ALLOWED;
2744 	} else
2745 		status = B_BAD_THREAD_ID;
2746 
2747 	RELEASE_TEAM_LOCK();
2748 	restore_interrupts(state);
2749 
2750 	if (status < B_OK)
2751 		return status;
2752 
2753 	// just kill the main thread in the team. The cleanup code there will
2754 	// take care of the team
2755 	return kill_thread(threadID);
2756 }
2757 
2758 
2759 status_t
2760 _get_team_info(team_id id, team_info *info, size_t size)
2761 {
2762 	cpu_status state;
2763 	status_t status = B_OK;
2764 	struct team *team;
2765 
2766 	state = disable_interrupts();
2767 	GRAB_TEAM_LOCK();
2768 
2769 	if (id == B_CURRENT_TEAM)
2770 		team = thread_get_current_thread()->team;
2771 	else
2772 		team = team_get_team_struct_locked(id);
2773 
2774 	if (team == NULL) {
2775 		status = B_BAD_TEAM_ID;
2776 		goto err;
2777 	}
2778 
2779 	status = fill_team_info(team, info, size);
2780 
2781 err:
2782 	RELEASE_TEAM_LOCK();
2783 	restore_interrupts(state);
2784 
2785 	return status;
2786 }
2787 
2788 
2789 status_t
2790 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2791 {
2792 	status_t status = B_BAD_TEAM_ID;
2793 	struct team *team = NULL;
2794 	int32 slot = *cookie;
2795 	team_id lastTeamID;
2796 	cpu_status state;
2797 
2798 	if (slot < 1)
2799 		slot = 1;
2800 
2801 	state = disable_interrupts();
2802 	GRAB_TEAM_LOCK();
2803 
2804 	lastTeamID = peek_next_thread_id();
2805 	if (slot >= lastTeamID)
2806 		goto err;
2807 
2808 	// get next valid team
2809 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2810 		slot++;
2811 
2812 	if (team) {
2813 		status = fill_team_info(team, info, size);
2814 		*cookie = ++slot;
2815 	}
2816 
2817 err:
2818 	RELEASE_TEAM_LOCK();
2819 	restore_interrupts(state);
2820 
2821 	return status;
2822 }
2823 
2824 
2825 status_t
2826 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2827 {
2828 	bigtime_t kernelTime = 0, userTime = 0;
2829 	status_t status = B_OK;
2830 	struct team *team;
2831 	cpu_status state;
2832 
2833 	if (size != sizeof(team_usage_info)
2834 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2835 		return B_BAD_VALUE;
2836 
2837 	state = disable_interrupts();
2838 	GRAB_TEAM_LOCK();
2839 
2840 	if (id == B_CURRENT_TEAM)
2841 		team = thread_get_current_thread()->team;
2842 	else
2843 		team = team_get_team_struct_locked(id);
2844 
2845 	if (team == NULL) {
2846 		status = B_BAD_TEAM_ID;
2847 		goto out;
2848 	}
2849 
2850 	switch (who) {
2851 		case B_TEAM_USAGE_SELF:
2852 		{
2853 			struct thread *thread = team->thread_list;
2854 
2855 			for (; thread != NULL; thread = thread->team_next) {
2856 				kernelTime += thread->kernel_time;
2857 				userTime += thread->user_time;
2858 			}
2859 
2860 			kernelTime += team->dead_threads_kernel_time;
2861 			userTime += team->dead_threads_user_time;
2862 			break;
2863 		}
2864 
2865 		case B_TEAM_USAGE_CHILDREN:
2866 		{
2867 			struct team *child = team->children;
2868 			for (; child != NULL; child = child->siblings_next) {
2869 				struct thread *thread = team->thread_list;
2870 
2871 				for (; thread != NULL; thread = thread->team_next) {
2872 					kernelTime += thread->kernel_time;
2873 					userTime += thread->user_time;
2874 				}
2875 
2876 				kernelTime += child->dead_threads_kernel_time;
2877 				userTime += child->dead_threads_user_time;
2878 			}
2879 
2880 			kernelTime += team->dead_children->kernel_time;
2881 			userTime += team->dead_children->user_time;
2882 			break;
2883 		}
2884 	}
2885 
2886 out:
2887 	RELEASE_TEAM_LOCK();
2888 	restore_interrupts(state);
2889 
2890 	if (status == B_OK) {
2891 		info->kernel_time = kernelTime;
2892 		info->user_time = userTime;
2893 	}
2894 
2895 	return status;
2896 }
2897 
2898 
2899 pid_t
2900 getpid(void)
2901 {
2902 	return thread_get_current_thread()->team->id;
2903 }
2904 
2905 
2906 pid_t
2907 getppid(void)
2908 {
2909 	struct team *team = thread_get_current_thread()->team;
2910 	cpu_status state;
2911 	pid_t parent;
2912 
2913 	state = disable_interrupts();
2914 	GRAB_TEAM_LOCK();
2915 
2916 	parent = team->parent->id;
2917 
2918 	RELEASE_TEAM_LOCK();
2919 	restore_interrupts(state);
2920 
2921 	return parent;
2922 }
2923 
2924 
2925 pid_t
2926 getpgid(pid_t process)
2927 {
2928 	struct thread *thread;
2929 	pid_t result = -1;
2930 	cpu_status state;
2931 
2932 	if (process == 0)
2933 		process = thread_get_current_thread()->team->id;
2934 
2935 	state = disable_interrupts();
2936 	GRAB_THREAD_LOCK();
2937 
2938 	thread = thread_get_thread_struct_locked(process);
2939 	if (thread != NULL)
2940 		result = thread->team->group_id;
2941 
2942 	RELEASE_THREAD_LOCK();
2943 	restore_interrupts(state);
2944 
2945 	return thread != NULL ? result : B_BAD_VALUE;
2946 }
2947 
2948 
2949 pid_t
2950 getsid(pid_t process)
2951 {
2952 	struct thread *thread;
2953 	pid_t result = -1;
2954 	cpu_status state;
2955 
2956 	if (process == 0)
2957 		process = thread_get_current_thread()->team->id;
2958 
2959 	state = disable_interrupts();
2960 	GRAB_THREAD_LOCK();
2961 
2962 	thread = thread_get_thread_struct_locked(process);
2963 	if (thread != NULL)
2964 		result = thread->team->session_id;
2965 
2966 	RELEASE_THREAD_LOCK();
2967 	restore_interrupts(state);
2968 
2969 	return thread != NULL ? result : B_BAD_VALUE;
2970 }
2971 
2972 
2973 //	#pragma mark - User syscalls
2974 
2975 
2976 status_t
2977 _user_exec(const char *userPath, const char* const* userFlatArgs,
2978 	size_t flatArgsSize, int32 argCount, int32 envCount)
2979 {
2980 	// NOTE: Since this function normally doesn't return, don't use automatic
2981 	// variables that need destruction in the function scope.
2982 	char path[B_PATH_NAME_LENGTH];
2983 
2984 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
2985 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2986 		return B_BAD_ADDRESS;
2987 
2988 	// copy and relocate the flat arguments
2989 	char** flatArgs;
2990 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
2991 		argCount, envCount, flatArgs);
2992 
2993 	if (error == B_OK) {
2994 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
2995 			envCount);
2996 			// this one only returns in case of error
2997 	}
2998 
2999 	free(flatArgs);
3000 	return error;
3001 }
3002 
3003 
3004 thread_id
3005 _user_fork(void)
3006 {
3007 	return fork_team();
3008 }
3009 
3010 
3011 thread_id
3012 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3013 {
3014 	status_t returnCode;
3015 	int32 reason;
3016 	thread_id deadChild;
3017 
3018 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3019 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3020 		return B_BAD_ADDRESS;
3021 
3022 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3023 
3024 	if (deadChild >= B_OK) {
3025 		// copy result data on successful completion
3026 		if ((_userReason != NULL
3027 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3028 			|| (_userReturnCode != NULL
3029 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3030 					< B_OK)) {
3031 			return B_BAD_ADDRESS;
3032 		}
3033 
3034 		return deadChild;
3035 	}
3036 
3037 	return syscall_restart_handle_post(deadChild);
3038 }
3039 
3040 
3041 pid_t
3042 _user_process_info(pid_t process, int32 which)
3043 {
3044 	// we only allow to return the parent of the current process
3045 	if (which == PARENT_ID
3046 		&& process != 0 && process != thread_get_current_thread()->team->id)
3047 		return B_BAD_VALUE;
3048 
3049 	switch (which) {
3050 		case SESSION_ID:
3051 			return getsid(process);
3052 		case GROUP_ID:
3053 			return getpgid(process);
3054 		case PARENT_ID:
3055 			return getppid();
3056 	}
3057 
3058 	return B_BAD_VALUE;
3059 }
3060 
3061 
3062 pid_t
3063 _user_setpgid(pid_t processID, pid_t groupID)
3064 {
3065 	struct thread *thread = thread_get_current_thread();
3066 	struct team *currentTeam = thread->team;
3067 	struct team *team;
3068 
3069 	if (groupID < 0)
3070 		return B_BAD_VALUE;
3071 
3072 	if (processID == 0)
3073 		processID = currentTeam->id;
3074 
3075 	// if the group ID is not specified, use the target process' ID
3076 	if (groupID == 0)
3077 		groupID = processID;
3078 
3079 	if (processID == currentTeam->id) {
3080 		// we set our own group
3081 
3082 		// we must not change our process group ID if we're a session leader
3083 		if (is_session_leader(currentTeam))
3084 			return B_NOT_ALLOWED;
3085 	} else {
3086 		// another team is the target of the call -- check it out
3087 		InterruptsSpinLocker _(gTeamSpinlock);
3088 
3089 		team = team_get_team_struct_locked(processID);
3090 		if (team == NULL)
3091 			return ESRCH;
3092 
3093 		// The team must be a child of the calling team and in the same session.
3094 		// (If that's the case it isn't a session leader either.)
3095 		if (team->parent != currentTeam
3096 			|| team->session_id != currentTeam->session_id) {
3097 			return B_NOT_ALLOWED;
3098 		}
3099 
3100 		if (team->group_id == groupID)
3101 			return groupID;
3102 
3103 		// The call is also supposed to fail on a child, when the child already
3104 		// has executed exec*() [EACCES].
3105 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3106 			return EACCES;
3107 	}
3108 
3109 	struct process_group *group = NULL;
3110 	if (groupID == processID) {
3111 		// A new process group might be needed.
3112 		group = create_process_group(groupID);
3113 		if (group == NULL)
3114 			return B_NO_MEMORY;
3115 
3116 		// Assume orphaned. We consider the situation of the team's parent
3117 		// below.
3118 		group->orphaned = true;
3119 	}
3120 
3121 	status_t status = B_OK;
3122 	struct process_group *freeGroup = NULL;
3123 
3124 	InterruptsSpinLocker locker(gTeamSpinlock);
3125 
3126 	team = team_get_team_struct_locked(processID);
3127 	if (team != NULL) {
3128 		// check the conditions again -- they might have changed in the meantime
3129 		if (is_session_leader(team)
3130 			|| team->session_id != currentTeam->session_id) {
3131 			status = B_NOT_ALLOWED;
3132 		} else if (team != currentTeam
3133 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3134 			status = EACCES;
3135 		} else if (team->group_id == groupID) {
3136 			// the team is already in the desired process group
3137 			freeGroup = group;
3138 		} else {
3139 			// Check if a process group with the requested ID already exists.
3140 			struct process_group *targetGroup
3141 				= team_get_process_group_locked(team->group->session, groupID);
3142 			if (targetGroup != NULL) {
3143 				// In case of processID == groupID we have to free the
3144 				// allocated group.
3145 				freeGroup = group;
3146 			} else if (processID == groupID) {
3147 				// We created a new process group, let us insert it into the
3148 				// team's session.
3149 				insert_group_into_session(team->group->session, group);
3150 				targetGroup = group;
3151 			}
3152 
3153 			if (targetGroup != NULL) {
3154 				// we got a group, let's move the team there
3155 				process_group* oldGroup = team->group;
3156 
3157 				remove_team_from_group(team);
3158 				insert_team_into_group(targetGroup, team);
3159 
3160 				// Update the "orphaned" flag of all potentially affected
3161 				// groups.
3162 
3163 				// the team's old group
3164 				if (oldGroup->teams != NULL) {
3165 					oldGroup->orphaned = false;
3166 					update_orphaned_process_group(oldGroup, -1);
3167 				}
3168 
3169 				// the team's new group
3170 				struct team* parent = team->parent;
3171 				targetGroup->orphaned &= parent == NULL
3172 					|| parent->group == targetGroup
3173 					|| team->parent->session_id != team->session_id;
3174 
3175 				// children's groups
3176 				struct team* child = team->children;
3177 				while (child != NULL) {
3178 					child->group->orphaned = false;
3179 					update_orphaned_process_group(child->group, -1);
3180 
3181 					child = child->siblings_next;
3182 				}
3183 			} else
3184 				status = B_NOT_ALLOWED;
3185 		}
3186 	} else
3187 		status = B_NOT_ALLOWED;
3188 
3189 	// Changing the process group might have changed the situation for a parent
3190 	// waiting in wait_for_child(). Hence we notify it.
3191 	if (status == B_OK)
3192 		team->parent->dead_children->condition_variable.NotifyAll(false);
3193 
3194 	locker.Unlock();
3195 
3196 	if (status != B_OK) {
3197 		// in case of error, the group hasn't been added into the hash
3198 		team_delete_process_group(group);
3199 	}
3200 
3201 	team_delete_process_group(freeGroup);
3202 
3203 	return status == B_OK ? groupID : status;
3204 }
3205 
3206 
3207 pid_t
3208 _user_setsid(void)
3209 {
3210 	struct team *team = thread_get_current_thread()->team;
3211 	struct process_session *session;
3212 	struct process_group *group;
3213 	cpu_status state;
3214 	bool failed = false;
3215 
3216 	// the team must not already be a process group leader
3217 	if (is_process_group_leader(team))
3218 		return B_NOT_ALLOWED;
3219 
3220 	group = create_process_group(team->id);
3221 	if (group == NULL)
3222 		return B_NO_MEMORY;
3223 
3224 	session = create_process_session(group->id);
3225 	if (session == NULL) {
3226 		team_delete_process_group(group);
3227 		return B_NO_MEMORY;
3228 	}
3229 
3230 	state = disable_interrupts();
3231 	GRAB_TEAM_LOCK();
3232 
3233 	// this may have changed since the check above
3234 	if (!is_process_group_leader(team)) {
3235 		remove_team_from_group(team);
3236 
3237 		insert_group_into_session(session, group);
3238 		insert_team_into_group(group, team);
3239 	} else
3240 		failed = true;
3241 
3242 	RELEASE_TEAM_LOCK();
3243 	restore_interrupts(state);
3244 
3245 	if (failed) {
3246 		team_delete_process_group(group);
3247 		free(session);
3248 		return B_NOT_ALLOWED;
3249 	}
3250 
3251 	return team->group_id;
3252 }
3253 
3254 
3255 status_t
3256 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3257 {
3258 	status_t returnCode;
3259 	status_t status;
3260 
3261 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3262 		return B_BAD_ADDRESS;
3263 
3264 	status = wait_for_team(id, &returnCode);
3265 	if (status >= B_OK && _userReturnCode != NULL) {
3266 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3267 			return B_BAD_ADDRESS;
3268 		return B_OK;
3269 	}
3270 
3271 	return syscall_restart_handle_post(status);
3272 }
3273 
3274 
3275 thread_id
3276 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3277 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3278 	port_id errorPort, uint32 errorToken)
3279 {
3280 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3281 
3282 	if (argCount < 1)
3283 		return B_BAD_VALUE;
3284 
3285 	// copy and relocate the flat arguments
3286 	char** flatArgs;
3287 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3288 		argCount, envCount, flatArgs);
3289 	if (error != B_OK)
3290 		return error;
3291 
3292 	thread_id thread = load_image_etc(flatArgs, _ALIGN(flatArgsSize), argCount,
3293 		envCount, priority, flags, errorPort, errorToken);
3294 
3295 	free(flatArgs);
3296 		// load_image_etc() unset our variable if it took over ownership
3297 
3298 	return thread;
3299 }
3300 
3301 
3302 void
3303 _user_exit_team(status_t returnValue)
3304 {
3305 	struct thread *thread = thread_get_current_thread();
3306 
3307 	thread->exit.status = returnValue;
3308 	thread->exit.reason = THREAD_RETURN_EXIT;
3309 
3310 	send_signal(thread->id, SIGKILL);
3311 }
3312 
3313 
3314 status_t
3315 _user_kill_team(team_id team)
3316 {
3317 	return kill_team(team);
3318 }
3319 
3320 
3321 status_t
3322 _user_get_team_info(team_id id, team_info *userInfo)
3323 {
3324 	status_t status;
3325 	team_info info;
3326 
3327 	if (!IS_USER_ADDRESS(userInfo))
3328 		return B_BAD_ADDRESS;
3329 
3330 	status = _get_team_info(id, &info, sizeof(team_info));
3331 	if (status == B_OK) {
3332 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3333 			return B_BAD_ADDRESS;
3334 	}
3335 
3336 	return status;
3337 }
3338 
3339 
3340 status_t
3341 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3342 {
3343 	status_t status;
3344 	team_info info;
3345 	int32 cookie;
3346 
3347 	if (!IS_USER_ADDRESS(userCookie)
3348 		|| !IS_USER_ADDRESS(userInfo)
3349 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3350 		return B_BAD_ADDRESS;
3351 
3352 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3353 	if (status != B_OK)
3354 		return status;
3355 
3356 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3357 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3358 		return B_BAD_ADDRESS;
3359 
3360 	return status;
3361 }
3362 
3363 
3364 team_id
3365 _user_get_current_team(void)
3366 {
3367 	return team_get_current_team_id();
3368 }
3369 
3370 
3371 status_t
3372 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3373 {
3374 	team_usage_info info;
3375 	status_t status;
3376 
3377 	if (!IS_USER_ADDRESS(userInfo))
3378 		return B_BAD_ADDRESS;
3379 
3380 	status = _get_team_usage_info(team, who, &info, size);
3381 	if (status != B_OK)
3382 		return status;
3383 
3384 	if (user_memcpy(userInfo, &info, size) < B_OK)
3385 		return B_BAD_ADDRESS;
3386 
3387 	return status;
3388 }
3389 
3390