xref: /haiku/src/system/kernel/team.cpp (revision 9760dcae2038d47442f4658c2575844c6cf92c40)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <sys/wait.h>
20 
21 #include <OS.h>
22 
23 #include <AutoDeleter.h>
24 #include <FindDirectory.h>
25 
26 #include <boot_device.h>
27 #include <elf.h>
28 #include <file_cache.h>
29 #include <fs/KPath.h>
30 #include <heap.h>
31 #include <int.h>
32 #include <kernel.h>
33 #include <kimage.h>
34 #include <kscheduler.h>
35 #include <ksignal.h>
36 #include <Notifications.h>
37 #include <port.h>
38 #include <posix/realtime_sem.h>
39 #include <posix/xsi_semaphore.h>
40 #include <sem.h>
41 #include <syscall_process_info.h>
42 #include <syscall_restart.h>
43 #include <syscalls.h>
44 #include <tls.h>
45 #include <tracing.h>
46 #include <user_runtime.h>
47 #include <user_thread.h>
48 #include <usergroup.h>
49 #include <vfs.h>
50 #include <vm/vm.h>
51 #include <vm/VMAddressSpace.h>
52 #include <util/AutoLock.h>
53 #include <util/khash.h>
54 
55 //#define TRACE_TEAM
56 #ifdef TRACE_TEAM
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 struct team_key {
64 	team_id id;
65 };
66 
67 struct team_arg {
68 	char	*path;
69 	char	**flat_args;
70 	size_t	flat_args_size;
71 	uint32	arg_count;
72 	uint32	env_count;
73 	port_id	error_port;
74 	uint32	error_token;
75 };
76 
77 struct fork_arg {
78 	area_id				user_stack_area;
79 	addr_t				user_stack_base;
80 	size_t				user_stack_size;
81 	addr_t				user_local_storage;
82 	sigset_t			sig_block_mask;
83 	struct sigaction	sig_action[32];
84 	addr_t				signal_stack_base;
85 	size_t				signal_stack_size;
86 	bool				signal_stack_enabled;
87 
88 	struct user_thread* user_thread;
89 
90 	struct arch_fork_arg arch_info;
91 };
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, struct team* team);
98 };
99 
100 
101 static hash_table* sTeamHash = NULL;
102 static hash_table* sGroupHash = NULL;
103 static struct team* sKernelTeam = NULL;
104 
105 // some arbitrary chosen limits - should probably depend on the available
106 // memory (the limit is not yet enforced)
107 static int32 sMaxTeams = 2048;
108 static int32 sUsedTeams = 1;
109 
110 static TeamNotificationService sNotificationService;
111 
112 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
113 
114 
115 // #pragma mark - Tracing
116 
117 
118 #if TEAM_TRACING
119 namespace TeamTracing {
120 
121 class TeamForked : public AbstractTraceEntry {
122 public:
123 	TeamForked(thread_id forkedThread)
124 		:
125 		fForkedThread(forkedThread)
126 	{
127 		Initialized();
128 	}
129 
130 	virtual void AddDump(TraceOutput& out)
131 	{
132 		out.Print("team forked, new thread %ld", fForkedThread);
133 	}
134 
135 private:
136 	thread_id			fForkedThread;
137 };
138 
139 
140 class ExecTeam : public AbstractTraceEntry {
141 public:
142 	ExecTeam(const char* path, int32 argCount, const char* const* args,
143 			int32 envCount, const char* const* env)
144 		:
145 		fArgCount(argCount),
146 		fArgs(NULL)
147 	{
148 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
149 			false);
150 
151 		// determine the buffer size we need for the args
152 		size_t argBufferSize = 0;
153 		for (int32 i = 0; i < argCount; i++)
154 			argBufferSize += strlen(args[i]) + 1;
155 
156 		// allocate a buffer
157 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
158 		if (fArgs) {
159 			char* buffer = fArgs;
160 			for (int32 i = 0; i < argCount; i++) {
161 				size_t argSize = strlen(args[i]) + 1;
162 				memcpy(buffer, args[i], argSize);
163 				buffer += argSize;
164 			}
165 		}
166 
167 		// ignore env for the time being
168 		(void)envCount;
169 		(void)env;
170 
171 		Initialized();
172 	}
173 
174 	virtual void AddDump(TraceOutput& out)
175 	{
176 		out.Print("team exec, \"%p\", args:", fPath);
177 
178 		if (fArgs != NULL) {
179 			char* args = fArgs;
180 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
181 				out.Print(" \"%s\"", args);
182 				args += strlen(args) + 1;
183 			}
184 		} else
185 			out.Print(" <too long>");
186 	}
187 
188 private:
189 	char*	fPath;
190 	int32	fArgCount;
191 	char*	fArgs;
192 };
193 
194 
195 static const char*
196 job_control_state_name(job_control_state state)
197 {
198 	switch (state) {
199 		case JOB_CONTROL_STATE_NONE:
200 			return "none";
201 		case JOB_CONTROL_STATE_STOPPED:
202 			return "stopped";
203 		case JOB_CONTROL_STATE_CONTINUED:
204 			return "continued";
205 		case JOB_CONTROL_STATE_DEAD:
206 			return "dead";
207 		default:
208 			return "invalid";
209 	}
210 }
211 
212 
213 class SetJobControlState : public AbstractTraceEntry {
214 public:
215 	SetJobControlState(team_id team, job_control_state newState, int signal)
216 		:
217 		fTeam(team),
218 		fNewState(newState),
219 		fSignal(signal)
220 	{
221 		Initialized();
222 	}
223 
224 	virtual void AddDump(TraceOutput& out)
225 	{
226 		out.Print("team set job control state, team %ld, "
227 			"new state: %s, signal: %d",
228 			fTeam, job_control_state_name(fNewState), fSignal);
229 	}
230 
231 private:
232 	team_id				fTeam;
233 	job_control_state	fNewState;
234 	int					fSignal;
235 };
236 
237 
238 class WaitForChild : public AbstractTraceEntry {
239 public:
240 	WaitForChild(pid_t child, uint32 flags)
241 		:
242 		fChild(child),
243 		fFlags(flags)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("team wait for child, child: %ld, "
251 			"flags: 0x%lx", fChild, fFlags);
252 	}
253 
254 private:
255 	pid_t	fChild;
256 	uint32	fFlags;
257 };
258 
259 
260 class WaitForChildDone : public AbstractTraceEntry {
261 public:
262 	WaitForChildDone(const job_control_entry& entry)
263 		:
264 		fState(entry.state),
265 		fTeam(entry.thread),
266 		fStatus(entry.status),
267 		fReason(entry.reason),
268 		fSignal(entry.signal)
269 	{
270 		Initialized();
271 	}
272 
273 	WaitForChildDone(status_t error)
274 		:
275 		fTeam(error)
276 	{
277 		Initialized();
278 	}
279 
280 	virtual void AddDump(TraceOutput& out)
281 	{
282 		if (fTeam >= 0) {
283 			out.Print("team wait for child done, team: %ld, "
284 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
285 				fTeam, job_control_state_name(fState), fStatus, fReason,
286 				fSignal);
287 		} else {
288 			out.Print("team wait for child failed, error: "
289 				"0x%lx, ", fTeam);
290 		}
291 	}
292 
293 private:
294 	job_control_state	fState;
295 	team_id				fTeam;
296 	status_t			fStatus;
297 	uint16				fReason;
298 	uint16				fSignal;
299 };
300 
301 }	// namespace TeamTracing
302 
303 #	define T(x) new(std::nothrow) TeamTracing::x;
304 #else
305 #	define T(x) ;
306 #endif
307 
308 
309 //	#pragma mark - TeamNotificationService
310 
311 
312 TeamNotificationService::TeamNotificationService()
313 	: DefaultNotificationService("teams")
314 {
315 }
316 
317 
318 void
319 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
320 {
321 	char eventBuffer[128];
322 	KMessage event;
323 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
324 	event.AddInt32("event", eventCode);
325 	event.AddInt32("team", team->id);
326 	event.AddPointer("teamStruct", team);
327 
328 	DefaultNotificationService::Notify(event, eventCode);
329 }
330 
331 
332 //	#pragma mark - Private functions
333 
334 
335 static void
336 _dump_team_info(struct team* team)
337 {
338 	kprintf("TEAM: %p\n", team);
339 	kprintf("id:               %ld (%#lx)\n", team->id, team->id);
340 	kprintf("name:             '%s'\n", team->name);
341 	kprintf("args:             '%s'\n", team->args);
342 	kprintf("next:             %p\n", team->next);
343 	kprintf("parent:           %p", team->parent);
344 	if (team->parent != NULL) {
345 		kprintf(" (id = %ld)\n", team->parent->id);
346 	} else
347 		kprintf("\n");
348 
349 	kprintf("children:         %p\n", team->children);
350 	kprintf("num_threads:      %d\n", team->num_threads);
351 	kprintf("state:            %d\n", team->state);
352 	kprintf("flags:            0x%lx\n", team->flags);
353 	kprintf("io_context:       %p\n", team->io_context);
354 	if (team->address_space)
355 		kprintf("address_space:    %p\n", team->address_space);
356 	kprintf("user data:        %p (area %ld)\n", (void*)team->user_data,
357 		team->user_data_area);
358 	kprintf("free user thread: %p\n", team->free_user_threads);
359 	kprintf("main_thread:      %p\n", team->main_thread);
360 	kprintf("thread_list:      %p\n", team->thread_list);
361 	kprintf("group_id:         %ld\n", team->group_id);
362 	kprintf("session_id:       %ld\n", team->session_id);
363 }
364 
365 
366 static int
367 dump_team_info(int argc, char** argv)
368 {
369 	struct hash_iterator iterator;
370 	struct team* team;
371 	team_id id = -1;
372 	bool found = false;
373 
374 	if (argc < 2) {
375 		struct thread* thread = thread_get_current_thread();
376 		if (thread != NULL && thread->team != NULL)
377 			_dump_team_info(thread->team);
378 		else
379 			kprintf("No current team!\n");
380 		return 0;
381 	}
382 
383 	id = strtoul(argv[1], NULL, 0);
384 	if (IS_KERNEL_ADDRESS(id)) {
385 		// semi-hack
386 		_dump_team_info((struct team*)id);
387 		return 0;
388 	}
389 
390 	// walk through the thread list, trying to match name or id
391 	hash_open(sTeamHash, &iterator);
392 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
393 		if ((team->name && strcmp(argv[1], team->name) == 0)
394 			|| team->id == id) {
395 			_dump_team_info(team);
396 			found = true;
397 			break;
398 		}
399 	}
400 	hash_close(sTeamHash, &iterator, false);
401 
402 	if (!found)
403 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
404 	return 0;
405 }
406 
407 
408 static int
409 dump_teams(int argc, char** argv)
410 {
411 	struct hash_iterator iterator;
412 	struct team* team;
413 
414 	kprintf("team           id  parent      name\n");
415 	hash_open(sTeamHash, &iterator);
416 
417 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
418 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
419 	}
420 
421 	hash_close(sTeamHash, &iterator, false);
422 	return 0;
423 }
424 
425 
426 static int
427 team_struct_compare(void* _p, const void* _key)
428 {
429 	struct team* p = (struct team*)_p;
430 	const struct team_key* key = (const struct team_key*)_key;
431 
432 	if (p->id == key->id)
433 		return 0;
434 
435 	return 1;
436 }
437 
438 
439 static uint32
440 team_struct_hash(void* _p, const void* _key, uint32 range)
441 {
442 	struct team* p = (struct team*)_p;
443 	const struct team_key* key = (const struct team_key*)_key;
444 
445 	if (p != NULL)
446 		return p->id % range;
447 
448 	return (uint32)key->id % range;
449 }
450 
451 
452 static int
453 process_group_compare(void* _group, const void* _key)
454 {
455 	struct process_group* group = (struct process_group*)_group;
456 	const struct team_key* key = (const struct team_key*)_key;
457 
458 	if (group->id == key->id)
459 		return 0;
460 
461 	return 1;
462 }
463 
464 
465 static uint32
466 process_group_hash(void* _group, const void* _key, uint32 range)
467 {
468 	struct process_group* group = (struct process_group*)_group;
469 	const struct team_key* key = (const struct team_key*)_key;
470 
471 	if (group != NULL)
472 		return group->id % range;
473 
474 	return (uint32)key->id % range;
475 }
476 
477 
478 static void
479 insert_team_into_parent(struct team* parent, struct team* team)
480 {
481 	ASSERT(parent != NULL);
482 
483 	team->siblings_next = parent->children;
484 	parent->children = team;
485 	team->parent = parent;
486 }
487 
488 
489 /*!	Note: must have team lock held */
490 static void
491 remove_team_from_parent(struct team* parent, struct team* team)
492 {
493 	struct team* child;
494 	struct team* last = NULL;
495 
496 	for (child = parent->children; child != NULL;
497 			child = child->siblings_next) {
498 		if (child == team) {
499 			if (last == NULL)
500 				parent->children = child->siblings_next;
501 			else
502 				last->siblings_next = child->siblings_next;
503 
504 			team->parent = NULL;
505 			break;
506 		}
507 		last = child;
508 	}
509 }
510 
511 
512 /*!	Reparent each of our children
513 	Note: must have team lock held
514 */
515 static void
516 reparent_children(struct team* team)
517 {
518 	struct team* child;
519 
520 	while ((child = team->children) != NULL) {
521 		// remove the child from the current proc and add to the parent
522 		remove_team_from_parent(team, child);
523 		insert_team_into_parent(sKernelTeam, child);
524 	}
525 
526 	// move job control entries too
527 	sKernelTeam->stopped_children->entries.MoveFrom(
528 		&team->stopped_children->entries);
529 	sKernelTeam->continued_children->entries.MoveFrom(
530 		&team->continued_children->entries);
531 
532 	// Note, we don't move the dead children entries. Those will be deleted
533 	// when the team structure is deleted.
534 }
535 
536 
537 static bool
538 is_session_leader(struct team* team)
539 {
540 	return team->session_id == team->id;
541 }
542 
543 
544 static bool
545 is_process_group_leader(struct team* team)
546 {
547 	return team->group_id == team->id;
548 }
549 
550 
551 static void
552 deferred_delete_process_group(struct process_group* group)
553 {
554 	if (group == NULL)
555 		return;
556 
557 	// remove_group_from_session() keeps this pointer around
558 	// only if the session can be freed as well
559 	if (group->session) {
560 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
561 			group->session->id));
562 		deferred_free(group->session);
563 	}
564 
565 	deferred_free(group);
566 }
567 
568 
569 /*!	Removes a group from a session, and puts the session object
570 	back into the session cache, if it's not used anymore.
571 	You must hold the team lock when calling this function.
572 */
573 static void
574 remove_group_from_session(struct process_group* group)
575 {
576 	struct process_session* session = group->session;
577 
578 	// the group must be in any session to let this function have any effect
579 	if (session == NULL)
580 		return;
581 
582 	hash_remove(sGroupHash, group);
583 
584 	// we cannot free the resource here, so we're keeping the group link
585 	// around - this way it'll be freed by free_process_group()
586 	if (--session->group_count > 0)
587 		group->session = NULL;
588 }
589 
590 
591 /*!	Team lock must be held.
592 */
593 static void
594 acquire_process_group_ref(pid_t groupID)
595 {
596 	process_group* group = team_get_process_group_locked(NULL, groupID);
597 	if (group == NULL) {
598 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
599 		return;
600 	}
601 
602 	group->refs++;
603 }
604 
605 
606 /*!	Team lock must be held.
607 */
608 static void
609 release_process_group_ref(pid_t groupID)
610 {
611 	process_group* group = team_get_process_group_locked(NULL, groupID);
612 	if (group == NULL) {
613 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
614 		return;
615 	}
616 
617 	if (group->refs <= 0) {
618 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
619 		return;
620 	}
621 
622 	if (--group->refs > 0)
623 		return;
624 
625 	// group is no longer used
626 
627 	remove_group_from_session(group);
628 	deferred_delete_process_group(group);
629 }
630 
631 
632 /*!	You must hold the team lock when calling this function. */
633 static void
634 insert_group_into_session(struct process_session* session,
635 	struct process_group* group)
636 {
637 	if (group == NULL)
638 		return;
639 
640 	group->session = session;
641 	hash_insert(sGroupHash, group);
642 	session->group_count++;
643 }
644 
645 
646 /*!	You must hold the team lock when calling this function. */
647 static void
648 insert_team_into_group(struct process_group* group, struct team* team)
649 {
650 	team->group = group;
651 	team->group_id = group->id;
652 	team->session_id = group->session->id;
653 
654 	team->group_next = group->teams;
655 	group->teams = team;
656 	acquire_process_group_ref(group->id);
657 }
658 
659 
660 /*!	Removes the team from the group.
661 
662 	\param team the team that'll be removed from it's group
663 */
664 static void
665 remove_team_from_group(struct team* team)
666 {
667 	struct process_group* group = team->group;
668 	struct team* current;
669 	struct team* last = NULL;
670 
671 	// the team must be in any team to let this function have any effect
672 	if  (group == NULL)
673 		return;
674 
675 	for (current = group->teams; current != NULL;
676 			current = current->group_next) {
677 		if (current == team) {
678 			if (last == NULL)
679 				group->teams = current->group_next;
680 			else
681 				last->group_next = current->group_next;
682 
683 			team->group = NULL;
684 			break;
685 		}
686 		last = current;
687 	}
688 
689 	team->group = NULL;
690 	team->group_next = NULL;
691 
692 	release_process_group_ref(group->id);
693 }
694 
695 
696 static struct process_group*
697 create_process_group(pid_t id)
698 {
699 	struct process_group* group
700 		= (struct process_group*)malloc(sizeof(struct process_group));
701 	if (group == NULL)
702 		return NULL;
703 
704 	group->id = id;
705 	group->refs = 0;
706 	group->session = NULL;
707 	group->teams = NULL;
708 	group->orphaned = true;
709 	return group;
710 }
711 
712 
713 static struct process_session*
714 create_process_session(pid_t id)
715 {
716 	struct process_session* session
717 		= (struct process_session*)malloc(sizeof(struct process_session));
718 	if (session == NULL)
719 		return NULL;
720 
721 	session->id = id;
722 	session->group_count = 0;
723 	session->controlling_tty = -1;
724 	session->foreground_group = -1;
725 
726 	return session;
727 }
728 
729 
730 static void
731 set_team_name(struct team* team, const char* name)
732 {
733 	if (const char* lastSlash = strrchr(name, '/'))
734 		name = lastSlash + 1;
735 
736 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
737 }
738 
739 
740 static struct team*
741 create_team_struct(const char* name, bool kernel)
742 {
743 	struct team* team = (struct team*)malloc(sizeof(struct team));
744 	if (team == NULL)
745 		return NULL;
746 	MemoryDeleter teamDeleter(team);
747 
748 	team->next = team->siblings_next = team->children = team->parent = NULL;
749 	team->id = allocate_thread_id();
750 	set_team_name(team, name);
751 	team->args[0] = '\0';
752 	team->num_threads = 0;
753 	team->io_context = NULL;
754 	team->address_space = NULL;
755 	team->realtime_sem_context = NULL;
756 	team->xsi_sem_context = NULL;
757 	team->thread_list = NULL;
758 	team->main_thread = NULL;
759 	team->loading_info = NULL;
760 	team->state = TEAM_STATE_BIRTH;
761 	team->flags = 0;
762 	team->death_sem = -1;
763 	team->user_data_area = -1;
764 	team->user_data = 0;
765 	team->used_user_data = 0;
766 	team->user_data_size = 0;
767 	team->free_user_threads = NULL;
768 
769 	team->supplementary_groups = NULL;
770 	team->supplementary_group_count = 0;
771 
772 	team->dead_threads_kernel_time = 0;
773 	team->dead_threads_user_time = 0;
774 
775 	// dead threads
776 	list_init(&team->dead_threads);
777 	team->dead_threads_count = 0;
778 
779 	// dead children
780 	team->dead_children = new(nothrow) team_dead_children;
781 	if (team->dead_children == NULL)
782 		return NULL;
783 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
784 
785 	team->dead_children->count = 0;
786 	team->dead_children->kernel_time = 0;
787 	team->dead_children->user_time = 0;
788 
789 	// stopped children
790 	team->stopped_children = new(nothrow) team_job_control_children;
791 	if (team->stopped_children == NULL)
792 		return NULL;
793 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
794 		team->stopped_children);
795 
796 	// continued children
797 	team->continued_children = new(nothrow) team_job_control_children;
798 	if (team->continued_children == NULL)
799 		return NULL;
800 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
801 		team->continued_children);
802 
803 	// job control entry
804 	team->job_control_entry = new(nothrow) job_control_entry;
805 	if (team->job_control_entry == NULL)
806 		return NULL;
807 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
808 		team->job_control_entry);
809 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
810 	team->job_control_entry->thread = team->id;
811 	team->job_control_entry->team = team;
812 
813 	list_init(&team->sem_list);
814 	list_init(&team->port_list);
815 	list_init(&team->image_list);
816 	list_init(&team->watcher_list);
817 
818 	clear_team_debug_info(&team->debug_info, true);
819 
820 	if (arch_team_init_team_struct(team, kernel) < 0)
821 		return NULL;
822 
823 	// publish dead/stopped/continued children condition vars
824 	team->dead_children->condition_variable.Init(team->dead_children,
825 		"team children");
826 
827 	// keep all allocated structures
828 	jobControlEntryDeleter.Detach();
829 	continuedChildrenDeleter.Detach();
830 	stoppedChildrenDeleter.Detach();
831 	deadChildrenDeleter.Detach();
832 	teamDeleter.Detach();
833 
834 	return team;
835 }
836 
837 
838 static void
839 delete_team_struct(struct team* team)
840 {
841 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
842 			&team->dead_threads)) {
843 		free(threadDeathEntry);
844 	}
845 
846 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
847 		delete entry;
848 
849 	while (free_user_thread* entry = team->free_user_threads) {
850 		team->free_user_threads = entry->next;
851 		free(entry);
852 	}
853 
854 	malloc_referenced_release(team->supplementary_groups);
855 
856 	delete team->job_control_entry;
857 		// usually already NULL and transferred to the parent
858 	delete team->continued_children;
859 	delete team->stopped_children;
860 	delete team->dead_children;
861 	free(team);
862 }
863 
864 
865 static status_t
866 create_team_user_data(struct team* team)
867 {
868 	void* address = (void*)KERNEL_USER_DATA_BASE;
869 	size_t size = 4 * B_PAGE_SIZE;
870 	team->user_data_area = create_area_etc(team->id, "user area", &address,
871 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
872 	if (team->user_data_area < 0)
873 		return team->user_data_area;
874 
875 	team->user_data = (addr_t)address;
876 	team->used_user_data = 0;
877 	team->user_data_size = size;
878 	team->free_user_threads = NULL;
879 
880 	return B_OK;
881 }
882 
883 
884 static void
885 delete_team_user_data(struct team* team)
886 {
887 	if (team->user_data_area >= 0) {
888 		vm_delete_area(team->id, team->user_data_area, true);
889 		team->user_data = 0;
890 		team->used_user_data = 0;
891 		team->user_data_size = 0;
892 		team->user_data_area = -1;
893 		while (free_user_thread* entry = team->free_user_threads) {
894 			team->free_user_threads = entry->next;
895 			free(entry);
896 		}
897 	}
898 }
899 
900 
901 static status_t
902 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
903 	int32 argCount, int32 envCount, char**& _flatArgs)
904 {
905 	if (argCount < 0 || envCount < 0)
906 		return B_BAD_VALUE;
907 
908 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
909 		return B_TOO_MANY_ARGS;
910 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
911 		return B_BAD_VALUE;
912 
913 	if (!IS_USER_ADDRESS(userFlatArgs))
914 		return B_BAD_ADDRESS;
915 
916 	// allocate kernel memory
917 	char** flatArgs = (char**)malloc(flatArgsSize);
918 	if (flatArgs == NULL)
919 		return B_NO_MEMORY;
920 
921 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
922 		free(flatArgs);
923 		return B_BAD_ADDRESS;
924 	}
925 
926 	// check and relocate the array
927 	status_t error = B_OK;
928 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
929 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
930 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
931 		if (i == argCount || i == argCount + envCount + 1) {
932 			// check array null termination
933 			if (flatArgs[i] != NULL) {
934 				error = B_BAD_VALUE;
935 				break;
936 			}
937 		} else {
938 			// check string
939 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
940 			size_t maxLen = stringEnd - arg;
941 			if (arg < stringBase || arg >= stringEnd
942 					|| strnlen(arg, maxLen) == maxLen) {
943 				error = B_BAD_VALUE;
944 				break;
945 			}
946 
947 			flatArgs[i] = arg;
948 		}
949 	}
950 
951 	if (error == B_OK)
952 		_flatArgs = flatArgs;
953 	else
954 		free(flatArgs);
955 
956 	return error;
957 }
958 
959 
960 static void
961 free_team_arg(struct team_arg* teamArg)
962 {
963 	if (teamArg != NULL) {
964 		free(teamArg->flat_args);
965 		free(teamArg->path);
966 		free(teamArg);
967 	}
968 }
969 
970 
971 static status_t
972 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
973 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
974 	uint32 token)
975 {
976 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
977 	if (teamArg == NULL)
978 		return B_NO_MEMORY;
979 
980 	teamArg->path = strdup(path);
981 	if (teamArg->path == NULL) {
982 		free(teamArg);
983 		return B_NO_MEMORY;
984 	}
985 
986 	// copy the args over
987 
988 	teamArg->flat_args = flatArgs;
989 	teamArg->flat_args_size = flatArgsSize;
990 	teamArg->arg_count = argCount;
991 	teamArg->env_count = envCount;
992 	teamArg->error_port = port;
993 	teamArg->error_token = token;
994 
995 	*_teamArg = teamArg;
996 	return B_OK;
997 }
998 
999 
1000 static int32
1001 team_create_thread_start(void* args)
1002 {
1003 	status_t err;
1004 	struct thread* thread;
1005 	struct team* team;
1006 	struct team_arg* teamArgs = (struct team_arg*)args;
1007 	const char* path;
1008 	addr_t entry;
1009 	char userStackName[128];
1010 	uint32 sizeLeft;
1011 	char** userArgs;
1012 	char** userEnv;
1013 	struct user_space_program_args* programArgs;
1014 	uint32 argCount, envCount, i;
1015 
1016 	thread = thread_get_current_thread();
1017 	team = thread->team;
1018 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1019 
1020 	TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
1021 
1022 	// get a user thread for the main thread
1023 	thread->user_thread = team_allocate_user_thread(team);
1024 
1025 	// create an initial primary stack area
1026 
1027 	// Main stack area layout is currently as follows (starting from 0):
1028 	//
1029 	// size								| usage
1030 	// ---------------------------------+--------------------------------
1031 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1032 	// TLS_SIZE							| TLS data
1033 	// sizeof(user_space_program_args)	| argument structure for the runtime
1034 	//									| loader
1035 	// flat arguments size				| flat process arguments and environment
1036 
1037 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1038 	// the heap
1039 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1040 
1041 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1042 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1043 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1044 	thread->user_stack_base
1045 		= USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1046 	thread->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1047 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1048 		// the exact location at the end of the user stack area
1049 
1050 	sprintf(userStackName, "%s_main_stack", team->name);
1051 	thread->user_stack_area = create_area_etc(team->id, userStackName,
1052 		(void**)&thread->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
1053 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
1054 	if (thread->user_stack_area < 0) {
1055 		dprintf("team_create_thread_start: could not create default user stack "
1056 			"region: %s\n", strerror(thread->user_stack_area));
1057 
1058 		free_team_arg(teamArgs);
1059 		return thread->user_stack_area;
1060 	}
1061 
1062 	// now that the TLS area is allocated, initialize TLS
1063 	arch_thread_init_tls(thread);
1064 
1065 	argCount = teamArgs->arg_count;
1066 	envCount = teamArgs->env_count;
1067 
1068 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1069 		+ thread->user_stack_size + TLS_SIZE);
1070 
1071 	userArgs = (char**)(programArgs + 1);
1072 	userEnv = userArgs + argCount + 1;
1073 	path = teamArgs->path;
1074 
1075 	if (user_strlcpy(programArgs->program_path, path,
1076 				sizeof(programArgs->program_path)) < B_OK
1077 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1078 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1079 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1080 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1081 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1082 				sizeof(port_id)) < B_OK
1083 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1084 				sizeof(uint32)) < B_OK
1085 		|| user_memcpy(userArgs, teamArgs->flat_args,
1086 				teamArgs->flat_args_size) < B_OK) {
1087 		// the team deletion process will clean this mess
1088 		return B_BAD_ADDRESS;
1089 	}
1090 
1091 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1092 
1093 	// add args to info member
1094 	team->args[0] = 0;
1095 	strlcpy(team->args, path, sizeof(team->args));
1096 	for (i = 1; i < argCount; i++) {
1097 		strlcat(team->args, " ", sizeof(team->args));
1098 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1099 	}
1100 
1101 	free_team_arg(teamArgs);
1102 		// the arguments are already on the user stack, we no longer need
1103 		// them in this form
1104 
1105 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1106 	// automatic variables with function scope will never be destroyed.
1107 	{
1108 		// find runtime_loader path
1109 		KPath runtimeLoaderPath;
1110 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1111 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1112 		if (err < B_OK) {
1113 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1114 				strerror(err)));
1115 			return err;
1116 		}
1117 		runtimeLoaderPath.UnlockBuffer();
1118 		err = runtimeLoaderPath.Append("runtime_loader");
1119 
1120 		if (err == B_OK) {
1121 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1122 				&entry);
1123 		}
1124 	}
1125 
1126 	if (err < B_OK) {
1127 		// Luckily, we don't have to clean up the mess we created - that's
1128 		// done for us by the normal team deletion process
1129 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1130 			"%s\n", strerror(err)));
1131 		return err;
1132 	}
1133 
1134 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1135 
1136 	team->state = TEAM_STATE_NORMAL;
1137 
1138 	// jump to the entry point in user space
1139 	return arch_thread_enter_userspace(thread, entry, programArgs, NULL);
1140 		// only returns in case of error
1141 }
1142 
1143 
1144 static thread_id
1145 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1146 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1147 	port_id errorPort, uint32 errorToken)
1148 {
1149 	char** flatArgs = _flatArgs;
1150 	struct team* team;
1151 	const char* threadName;
1152 	thread_id thread;
1153 	status_t status;
1154 	cpu_status state;
1155 	struct team_arg* teamArgs;
1156 	struct team_loading_info loadingInfo;
1157 	io_context* parentIOContext = NULL;
1158 
1159 	if (flatArgs == NULL || argCount == 0)
1160 		return B_BAD_VALUE;
1161 
1162 	const char* path = flatArgs[0];
1163 
1164 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1165 		path, flatArgs, argCount));
1166 
1167 	team = create_team_struct(path, false);
1168 	if (team == NULL)
1169 		return B_NO_MEMORY;
1170 
1171 	if (flags & B_WAIT_TILL_LOADED) {
1172 		loadingInfo.thread = thread_get_current_thread();
1173 		loadingInfo.result = B_ERROR;
1174 		loadingInfo.done = false;
1175 		team->loading_info = &loadingInfo;
1176 	}
1177 
1178  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1179 
1180 	// get the parent team
1181 	struct team* parent;
1182 
1183 	if (parentID == B_CURRENT_TEAM)
1184 		parent = thread_get_current_thread()->team;
1185 	else
1186 		parent = team_get_team_struct_locked(parentID);
1187 
1188 	if (parent == NULL) {
1189 		teamLocker.Unlock();
1190 		status = B_BAD_TEAM_ID;
1191 		goto err0;
1192 	}
1193 
1194 	// inherit the parent's user/group
1195 	inherit_parent_user_and_group_locked(team, parent);
1196 
1197 	hash_insert(sTeamHash, team);
1198 	insert_team_into_parent(parent, team);
1199 	insert_team_into_group(parent->group, team);
1200 	sUsedTeams++;
1201 
1202 	// get a reference to the parent's I/O context -- we need it to create ours
1203 	parentIOContext = parent->io_context;
1204 	vfs_get_io_context(parentIOContext);
1205 
1206 	teamLocker.Unlock();
1207 
1208 	// check the executable's set-user/group-id permission
1209 	update_set_id_user_and_group(team, path);
1210 
1211 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1212 		envCount, errorPort, errorToken);
1213 
1214 	if (status != B_OK)
1215 		goto err1;
1216 
1217 	_flatArgs = NULL;
1218 		// args are owned by the team_arg structure now
1219 
1220 	// create a new io_context for this team
1221 	team->io_context = vfs_new_io_context(parentIOContext, true);
1222 	if (!team->io_context) {
1223 		status = B_NO_MEMORY;
1224 		goto err2;
1225 	}
1226 
1227 	// We don't need the parent's I/O context any longer.
1228 	vfs_put_io_context(parentIOContext);
1229 	parentIOContext = NULL;
1230 
1231 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1232 	vfs_exec_io_context(team->io_context);
1233 
1234 	// create an address space for this team
1235 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1236 		&team->address_space);
1237 	if (status != B_OK)
1238 		goto err3;
1239 
1240 	// cut the path from the main thread name
1241 	threadName = strrchr(path, '/');
1242 	if (threadName != NULL)
1243 		threadName++;
1244 	else
1245 		threadName = path;
1246 
1247 	// create the user data area
1248 	status = create_team_user_data(team);
1249 	if (status != B_OK)
1250 		goto err4;
1251 
1252 	// notify team listeners
1253 	sNotificationService.Notify(TEAM_ADDED, team);
1254 
1255 	// Create a kernel thread, but under the context of the new team
1256 	// The new thread will take over ownership of teamArgs
1257 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1258 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1259 	if (thread < 0) {
1260 		status = thread;
1261 		goto err5;
1262 	}
1263 
1264 	// wait for the loader of the new team to finish its work
1265 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1266 		struct thread* mainThread;
1267 
1268 		state = disable_interrupts();
1269 		GRAB_THREAD_LOCK();
1270 
1271 		mainThread = thread_get_thread_struct_locked(thread);
1272 		if (mainThread) {
1273 			// resume the team's main thread
1274 			if (mainThread->state == B_THREAD_SUSPENDED)
1275 				scheduler_enqueue_in_run_queue(mainThread);
1276 
1277 			// Now suspend ourselves until loading is finished.
1278 			// We will be woken either by the thread, when it finished or
1279 			// aborted loading, or when the team is going to die (e.g. is
1280 			// killed). In either case the one setting `loadingInfo.done' is
1281 			// responsible for removing the info from the team structure.
1282 			while (!loadingInfo.done) {
1283 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1284 				scheduler_reschedule();
1285 			}
1286 		} else {
1287 			// Impressive! Someone managed to kill the thread in this short
1288 			// time.
1289 		}
1290 
1291 		RELEASE_THREAD_LOCK();
1292 		restore_interrupts(state);
1293 
1294 		if (loadingInfo.result < B_OK)
1295 			return loadingInfo.result;
1296 	}
1297 
1298 	// notify the debugger
1299 	user_debug_team_created(team->id);
1300 
1301 	return thread;
1302 
1303 err5:
1304 	sNotificationService.Notify(TEAM_REMOVED, team);
1305 	delete_team_user_data(team);
1306 err4:
1307 	team->address_space->Put();
1308 err3:
1309 	vfs_put_io_context(team->io_context);
1310 err2:
1311 	free_team_arg(teamArgs);
1312 err1:
1313 	if (parentIOContext != NULL)
1314 		vfs_put_io_context(parentIOContext);
1315 
1316 	// Remove the team structure from the team hash table and delete the team
1317 	// structure
1318 	state = disable_interrupts();
1319 	GRAB_TEAM_LOCK();
1320 
1321 	remove_team_from_group(team);
1322 	remove_team_from_parent(team->parent, team);
1323 	hash_remove(sTeamHash, team);
1324 
1325 	RELEASE_TEAM_LOCK();
1326 	restore_interrupts(state);
1327 
1328 err0:
1329 	delete_team_struct(team);
1330 
1331 	return status;
1332 }
1333 
1334 
1335 /*!	Almost shuts down the current team and loads a new image into it.
1336 	If successful, this function does not return and will takeover ownership of
1337 	the arguments provided.
1338 	This function may only be called from user space.
1339 */
1340 static status_t
1341 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1342 	int32 argCount, int32 envCount)
1343 {
1344 	// NOTE: Since this function normally doesn't return, don't use automatic
1345 	// variables that need destruction in the function scope.
1346 	char** flatArgs = _flatArgs;
1347 	struct team* team = thread_get_current_thread()->team;
1348 	struct team_arg* teamArgs;
1349 	const char* threadName;
1350 	status_t status = B_OK;
1351 	cpu_status state;
1352 	struct thread* thread;
1353 	thread_id nubThreadID = -1;
1354 
1355 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1356 		path, argCount, envCount, team->id));
1357 
1358 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1359 
1360 	// switching the kernel at run time is probably not a good idea :)
1361 	if (team == team_get_kernel_team())
1362 		return B_NOT_ALLOWED;
1363 
1364 	// we currently need to be single threaded here
1365 	// ToDo: maybe we should just kill all other threads and
1366 	//	make the current thread the team's main thread?
1367 	if (team->main_thread != thread_get_current_thread())
1368 		return B_NOT_ALLOWED;
1369 
1370 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1371 	// We iterate through the thread list to make sure that there's no other
1372 	// thread.
1373 	state = disable_interrupts();
1374 	GRAB_TEAM_LOCK();
1375 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1376 
1377 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1378 		nubThreadID = team->debug_info.nub_thread;
1379 
1380 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1381 
1382 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1383 		if (thread != team->main_thread && thread->id != nubThreadID) {
1384 			status = B_NOT_ALLOWED;
1385 			break;
1386 		}
1387 	}
1388 
1389 	RELEASE_TEAM_LOCK();
1390 	restore_interrupts(state);
1391 
1392 	if (status != B_OK)
1393 		return status;
1394 
1395 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1396 		envCount, -1, 0);
1397 
1398 	if (status != B_OK)
1399 		return status;
1400 
1401 	_flatArgs = NULL;
1402 		// args are owned by the team_arg structure now
1403 
1404 	// ToDo: remove team resources if there are any left
1405 	// thread_atkernel_exit() might not be called at all
1406 
1407 	thread_reset_for_exec();
1408 
1409 	user_debug_prepare_for_exec();
1410 
1411 	delete_team_user_data(team);
1412 	vm_delete_areas(team->address_space, false);
1413 	xsi_sem_undo(team);
1414 	delete_owned_ports(team);
1415 	sem_delete_owned_sems(team);
1416 	remove_images(team);
1417 	vfs_exec_io_context(team->io_context);
1418 	delete_realtime_sem_context(team->realtime_sem_context);
1419 	team->realtime_sem_context = NULL;
1420 
1421 	status = create_team_user_data(team);
1422 	if (status != B_OK) {
1423 		// creating the user data failed -- we're toast
1424 		// TODO: We should better keep the old user area in the first place.
1425 		exit_thread(status);
1426 		return status;
1427 	}
1428 
1429 	user_debug_finish_after_exec();
1430 
1431 	// rename the team
1432 
1433 	set_team_name(team, path);
1434 
1435 	// cut the path from the team name and rename the main thread, too
1436 	threadName = strrchr(path, '/');
1437 	if (threadName != NULL)
1438 		threadName++;
1439 	else
1440 		threadName = path;
1441 	rename_thread(thread_get_current_thread_id(), threadName);
1442 
1443 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1444 
1445 	// Update user/group according to the executable's set-user/group-id
1446 	// permission.
1447 	update_set_id_user_and_group(team, path);
1448 
1449 	user_debug_team_exec();
1450 
1451 	// notify team listeners
1452 	sNotificationService.Notify(TEAM_EXEC, team);
1453 
1454 	status = team_create_thread_start(teamArgs);
1455 		// this one usually doesn't return...
1456 
1457 	// sorry, we have to kill us, there is no way out anymore
1458 	// (without any areas left and all that)
1459 	exit_thread(status);
1460 
1461 	// we return a status here since the signal that is sent by the
1462 	// call above is not immediately handled
1463 	return B_ERROR;
1464 }
1465 
1466 
1467 /*! This is the first function to be called from the newly created
1468 	main child thread.
1469 	It will fill in everything what's left to do from fork_arg, and
1470 	return from the parent's fork() syscall to the child.
1471 */
1472 static int32
1473 fork_team_thread_start(void* _args)
1474 {
1475 	struct thread* thread = thread_get_current_thread();
1476 	struct fork_arg* forkArgs = (struct fork_arg*)_args;
1477 
1478 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1479 		// we need a local copy of the arch dependent part
1480 
1481 	thread->user_stack_area = forkArgs->user_stack_area;
1482 	thread->user_stack_base = forkArgs->user_stack_base;
1483 	thread->user_stack_size = forkArgs->user_stack_size;
1484 	thread->user_local_storage = forkArgs->user_local_storage;
1485 	thread->sig_block_mask = forkArgs->sig_block_mask;
1486 	thread->user_thread = forkArgs->user_thread;
1487 	memcpy(thread->sig_action, forkArgs->sig_action,
1488 		sizeof(forkArgs->sig_action));
1489 	thread->signal_stack_base = forkArgs->signal_stack_base;
1490 	thread->signal_stack_size = forkArgs->signal_stack_size;
1491 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1492 
1493 	arch_thread_init_tls(thread);
1494 
1495 	free(forkArgs);
1496 
1497 	// set frame of the parent thread to this one, too
1498 
1499 	arch_restore_fork_frame(&archArgs);
1500 		// This one won't return here
1501 
1502 	return 0;
1503 }
1504 
1505 
1506 static thread_id
1507 fork_team(void)
1508 {
1509 	struct thread* parentThread = thread_get_current_thread();
1510 	struct team* parentTeam = parentThread->team;
1511 	struct team* team;
1512 	struct fork_arg* forkArgs;
1513 	struct area_info info;
1514 	thread_id threadID;
1515 	status_t status;
1516 	int32 cookie;
1517 
1518 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1519 
1520 	if (parentTeam == team_get_kernel_team())
1521 		return B_NOT_ALLOWED;
1522 
1523 	// create a new team
1524 	// TODO: this is very similar to load_image_internal() - maybe we can do
1525 	// something about it :)
1526 
1527 	team = create_team_struct(parentTeam->name, false);
1528 	if (team == NULL)
1529 		return B_NO_MEMORY;
1530 
1531 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1532 
1533 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1534 
1535 	// Inherit the parent's user/group.
1536 	inherit_parent_user_and_group_locked(team, parentTeam);
1537 
1538 	hash_insert(sTeamHash, team);
1539 	insert_team_into_parent(parentTeam, team);
1540 	insert_team_into_group(parentTeam->group, team);
1541 	sUsedTeams++;
1542 
1543 	teamLocker.Unlock();
1544 
1545 	// inherit some team debug flags
1546 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1547 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1548 
1549 	forkArgs = (struct fork_arg*)malloc(sizeof(struct fork_arg));
1550 	if (forkArgs == NULL) {
1551 		status = B_NO_MEMORY;
1552 		goto err1;
1553 	}
1554 
1555 	// create a new io_context for this team
1556 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
1557 	if (!team->io_context) {
1558 		status = B_NO_MEMORY;
1559 		goto err2;
1560 	}
1561 
1562 	// duplicate the realtime sem context
1563 	if (parentTeam->realtime_sem_context) {
1564 		team->realtime_sem_context = clone_realtime_sem_context(
1565 			parentTeam->realtime_sem_context);
1566 		if (team->realtime_sem_context == NULL) {
1567 			status = B_NO_MEMORY;
1568 			goto err25;
1569 		}
1570 	}
1571 
1572 	// create an address space for this team
1573 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1574 		&team->address_space);
1575 	if (status < B_OK)
1576 		goto err3;
1577 
1578 	// copy all areas of the team
1579 	// TODO: should be able to handle stack areas differently (ie. don't have
1580 	// them copy-on-write)
1581 	// TODO: all stacks of other threads than the current one could be left out
1582 
1583 	forkArgs->user_thread = NULL;
1584 
1585 	cookie = 0;
1586 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1587 		if (info.area == parentTeam->user_data_area) {
1588 			// don't clone the user area; just create a new one
1589 			status = create_team_user_data(team);
1590 			if (status != B_OK)
1591 				break;
1592 
1593 			forkArgs->user_thread = team_allocate_user_thread(team);
1594 		} else {
1595 			void* address;
1596 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
1597 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1598 			if (area < B_OK) {
1599 				status = area;
1600 				break;
1601 			}
1602 
1603 			if (info.area == parentThread->user_stack_area)
1604 				forkArgs->user_stack_area = area;
1605 		}
1606 	}
1607 
1608 	if (status < B_OK)
1609 		goto err4;
1610 
1611 	if (forkArgs->user_thread == NULL) {
1612 #if KDEBUG
1613 		panic("user data area not found, parent area is %ld",
1614 			parentTeam->user_data_area);
1615 #endif
1616 		status = B_ERROR;
1617 		goto err4;
1618 	}
1619 
1620 	forkArgs->user_stack_base = parentThread->user_stack_base;
1621 	forkArgs->user_stack_size = parentThread->user_stack_size;
1622 	forkArgs->user_local_storage = parentThread->user_local_storage;
1623 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1624 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1625 		sizeof(forkArgs->sig_action));
1626 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1627 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1628 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1629 
1630 	arch_store_fork_frame(&forkArgs->arch_info);
1631 
1632 	// copy image list
1633 	image_info imageInfo;
1634 	cookie = 0;
1635 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1636 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1637 		if (image < 0)
1638 			goto err5;
1639 	}
1640 
1641 	// notify team listeners
1642 	sNotificationService.Notify(TEAM_ADDED, team);
1643 
1644 	// create a kernel thread under the context of the new team
1645 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1646 		parentThread->name, parentThread->priority, forkArgs,
1647 		team->id, team->id);
1648 	if (threadID < 0) {
1649 		status = threadID;
1650 		goto err5;
1651 	}
1652 
1653 	// notify the debugger
1654 	user_debug_team_created(team->id);
1655 
1656 	T(TeamForked(threadID));
1657 
1658 	resume_thread(threadID);
1659 	return threadID;
1660 
1661 err5:
1662 	sNotificationService.Notify(TEAM_REMOVED, team);
1663 	remove_images(team);
1664 err4:
1665 	team->address_space->RemoveAndPut();
1666 err3:
1667 	delete_realtime_sem_context(team->realtime_sem_context);
1668 err25:
1669 	vfs_put_io_context(team->io_context);
1670 err2:
1671 	free(forkArgs);
1672 err1:
1673 	// remove the team structure from the team hash table and delete the team
1674 	// structure
1675 	teamLocker.Lock();
1676 
1677 	remove_team_from_group(team);
1678 	remove_team_from_parent(parentTeam, team);
1679 	hash_remove(sTeamHash, team);
1680 
1681 	teamLocker.Unlock();
1682 
1683 	delete_team_struct(team);
1684 
1685 	return status;
1686 }
1687 
1688 
1689 /*!	Returns if the specified \a team has any children belonging to the
1690 	specified \a group.
1691 	Must be called with the team lock held.
1692 */
1693 static bool
1694 has_children_in_group(struct team* parent, pid_t groupID)
1695 {
1696 	struct team* team;
1697 
1698 	struct process_group* group = team_get_process_group_locked(
1699 		parent->group->session, groupID);
1700 	if (group == NULL)
1701 		return false;
1702 
1703 	for (team = group->teams; team; team = team->group_next) {
1704 		if (team->parent == parent)
1705 			return true;
1706 	}
1707 
1708 	return false;
1709 }
1710 
1711 
1712 static job_control_entry*
1713 get_job_control_entry(team_job_control_children* children, pid_t id)
1714 {
1715 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1716 		 job_control_entry* entry = it.Next();) {
1717 
1718 		if (id > 0) {
1719 			if (entry->thread == id)
1720 				return entry;
1721 		} else if (id == -1) {
1722 			return entry;
1723 		} else {
1724 			pid_t processGroup
1725 				= (entry->team ? entry->team->group_id : entry->group_id);
1726 			if (processGroup == -id)
1727 				return entry;
1728 		}
1729 	}
1730 
1731 	return NULL;
1732 }
1733 
1734 
1735 static job_control_entry*
1736 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1737 {
1738 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1739 
1740 	if (entry == NULL && (flags & WCONTINUED) != 0)
1741 		entry = get_job_control_entry(team->continued_children, id);
1742 
1743 	if (entry == NULL && (flags & WUNTRACED) != 0)
1744 		entry = get_job_control_entry(team->stopped_children, id);
1745 
1746 	return entry;
1747 }
1748 
1749 
1750 job_control_entry::job_control_entry()
1751 	:
1752 	has_group_ref(false)
1753 {
1754 }
1755 
1756 
1757 job_control_entry::~job_control_entry()
1758 {
1759 	if (has_group_ref) {
1760 		InterruptsSpinLocker locker(gTeamSpinlock);
1761 		release_process_group_ref(group_id);
1762 	}
1763 }
1764 
1765 
1766 /*!	Team and thread lock must be held.
1767 */
1768 void
1769 job_control_entry::InitDeadState()
1770 {
1771 	if (team != NULL) {
1772 		struct thread* thread = team->main_thread;
1773 		group_id = team->group_id;
1774 		this->thread = thread->id;
1775 		status = thread->exit.status;
1776 		reason = thread->exit.reason;
1777 		signal = thread->exit.signal;
1778 		team = NULL;
1779 		acquire_process_group_ref(group_id);
1780 		has_group_ref = true;
1781 	}
1782 }
1783 
1784 
1785 job_control_entry&
1786 job_control_entry::operator=(const job_control_entry& other)
1787 {
1788 	state = other.state;
1789 	thread = other.thread;
1790 	has_group_ref = false;
1791 	team = other.team;
1792 	group_id = other.group_id;
1793 	status = other.status;
1794 	reason = other.reason;
1795 	signal = other.signal;
1796 
1797 	return *this;
1798 }
1799 
1800 
1801 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1802 	comes to the reason why a thread has died than waitpid() can be.
1803 */
1804 static thread_id
1805 wait_for_child(pid_t child, uint32 flags, int32* _reason,
1806 	status_t* _returnCode)
1807 {
1808 	struct thread* thread = thread_get_current_thread();
1809 	struct team* team = thread->team;
1810 	struct job_control_entry foundEntry;
1811 	struct job_control_entry* freeDeathEntry = NULL;
1812 	status_t status = B_OK;
1813 
1814 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1815 
1816 	T(WaitForChild(child, flags));
1817 
1818 	if (child == 0) {
1819 		// wait for all children in the process group of the calling team
1820 		child = -team->group_id;
1821 	}
1822 
1823 	bool ignoreFoundEntries = false;
1824 	bool ignoreFoundEntriesChecked = false;
1825 
1826 	while (true) {
1827 		InterruptsSpinLocker locker(gTeamSpinlock);
1828 
1829 		// check whether any condition holds
1830 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1831 
1832 		// If we don't have an entry yet, check whether there are any children
1833 		// complying to the process group specification at all.
1834 		if (entry == NULL) {
1835 			// No success yet -- check whether there are any children we could
1836 			// wait for.
1837 			bool childrenExist = false;
1838 			if (child == -1) {
1839 				childrenExist = team->children != NULL;
1840 			} else if (child < -1) {
1841 				childrenExist = has_children_in_group(team, -child);
1842 			} else {
1843 				if (struct team* childTeam = team_get_team_struct_locked(child))
1844 					childrenExist = childTeam->parent == team;
1845 			}
1846 
1847 			if (!childrenExist) {
1848 				// there is no child we could wait for
1849 				status = ECHILD;
1850 			} else {
1851 				// the children we're waiting for are still running
1852 				status = B_WOULD_BLOCK;
1853 			}
1854 		} else {
1855 			// got something
1856 			foundEntry = *entry;
1857 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1858 				// The child is dead. Reap its death entry.
1859 				freeDeathEntry = entry;
1860 				team->dead_children->entries.Remove(entry);
1861 				team->dead_children->count--;
1862 			} else {
1863 				// The child is well. Reset its job control state.
1864 				team_set_job_control_state(entry->team,
1865 					JOB_CONTROL_STATE_NONE, 0, false);
1866 			}
1867 		}
1868 
1869 		// If we haven't got anything yet, prepare for waiting for the
1870 		// condition variable.
1871 		ConditionVariableEntry deadWaitEntry;
1872 
1873 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1874 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1875 
1876 		locker.Unlock();
1877 
1878 		// we got our entry and can return to our caller
1879 		if (status == B_OK) {
1880 			if (ignoreFoundEntries) {
1881 				// ... unless we shall ignore found entries
1882 				delete freeDeathEntry;
1883 				freeDeathEntry = NULL;
1884 				continue;
1885 			}
1886 
1887 			break;
1888 		}
1889 
1890 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1891 			T(WaitForChildDone(status));
1892 			return status;
1893 		}
1894 
1895 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1896 		if (status == B_INTERRUPTED) {
1897 			T(WaitForChildDone(status));
1898 			return status;
1899 		}
1900 
1901 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1902 		// all our children are dead and fail with ECHILD. We check the
1903 		// condition at this point.
1904 		if (!ignoreFoundEntriesChecked) {
1905 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1906 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1907 				|| handler.sa_handler == SIG_IGN) {
1908 				ignoreFoundEntries = true;
1909 			}
1910 
1911 			ignoreFoundEntriesChecked = true;
1912 		}
1913 	}
1914 
1915 	delete freeDeathEntry;
1916 
1917 	// when we got here, we have a valid death entry, and
1918 	// already got unregistered from the team or group
1919 	int reason = 0;
1920 	switch (foundEntry.state) {
1921 		case JOB_CONTROL_STATE_DEAD:
1922 			reason = foundEntry.reason;
1923 			break;
1924 		case JOB_CONTROL_STATE_STOPPED:
1925 			reason = THREAD_STOPPED;
1926 			break;
1927 		case JOB_CONTROL_STATE_CONTINUED:
1928 			reason = THREAD_CONTINUED;
1929 			break;
1930 		case JOB_CONTROL_STATE_NONE:
1931 			// can't happen
1932 			break;
1933 	}
1934 
1935 	*_returnCode = foundEntry.status;
1936 	*_reason = (foundEntry.signal << 16) | reason;
1937 
1938 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1939 	// status is available.
1940 	if (is_signal_blocked(SIGCHLD)) {
1941 		InterruptsSpinLocker locker(gTeamSpinlock);
1942 
1943 		if (get_job_control_entry(team, child, flags) == NULL)
1944 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1945 	}
1946 
1947 	// When the team is dead, the main thread continues to live in the kernel
1948 	// team for a very short time. To avoid surprises for the caller we rather
1949 	// wait until the thread is really gone.
1950 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1951 		wait_for_thread(foundEntry.thread, NULL);
1952 
1953 	T(WaitForChildDone(foundEntry));
1954 
1955 	return foundEntry.thread;
1956 }
1957 
1958 
1959 /*! Fills the team_info structure with information from the specified
1960 	team.
1961 	The team lock must be held when called.
1962 */
1963 static status_t
1964 fill_team_info(struct team* team, team_info* info, size_t size)
1965 {
1966 	if (size != sizeof(team_info))
1967 		return B_BAD_VALUE;
1968 
1969 	// ToDo: Set more informations for team_info
1970 	memset(info, 0, size);
1971 
1972 	info->team = team->id;
1973 	info->thread_count = team->num_threads;
1974 	info->image_count = count_images(team);
1975 	//info->area_count =
1976 	info->debugger_nub_thread = team->debug_info.nub_thread;
1977 	info->debugger_nub_port = team->debug_info.nub_port;
1978 	//info->uid =
1979 	//info->gid =
1980 
1981 	strlcpy(info->args, team->args, sizeof(info->args));
1982 	info->argc = 1;
1983 
1984 	return B_OK;
1985 }
1986 
1987 
1988 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1989 	Interrupts must be disabled and team lock be held.
1990 */
1991 static bool
1992 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1993 {
1994 	// Orphaned Process Group: "A process group in which the parent of every
1995 	// member is either itself a member of the group or is not a member of the
1996 	// group's session." (Open Group Base Specs Issue 6)
1997 
1998 	// once orphaned, things won't change (exception: cf. setpgid())
1999 	if (group->orphaned)
2000 		return true;
2001 
2002 	struct team* team = group->teams;
2003 	while (team != NULL) {
2004 		struct team* parent = team->parent;
2005 		if (team->id != dyingProcess && parent != NULL
2006 			&& parent->id != dyingProcess
2007 			&& parent->group_id != group->id
2008 			&& parent->session_id == group->session->id) {
2009 			return false;
2010 		}
2011 
2012 		team = team->group_next;
2013 	}
2014 
2015 	group->orphaned = true;
2016 	return true;
2017 }
2018 
2019 
2020 /*!	Returns whether the process group contains stopped processes.
2021 	Interrupts must be disabled and team lock be held.
2022 */
2023 static bool
2024 process_group_has_stopped_processes(process_group* group)
2025 {
2026 	SpinLocker _(gThreadSpinlock);
2027 
2028 	struct team* team = group->teams;
2029 	while (team != NULL) {
2030 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2031 			return true;
2032 
2033 		team = team->group_next;
2034 	}
2035 
2036 	return false;
2037 }
2038 
2039 
2040 //	#pragma mark - Private kernel API
2041 
2042 
2043 status_t
2044 team_init(kernel_args* args)
2045 {
2046 	struct process_session* session;
2047 	struct process_group* group;
2048 
2049 	// create the team hash table
2050 	sTeamHash = hash_init(16, offsetof(struct team, next),
2051 		&team_struct_compare, &team_struct_hash);
2052 
2053 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2054 		&process_group_compare, &process_group_hash);
2055 
2056 	// create initial session and process groups
2057 
2058 	session = create_process_session(1);
2059 	if (session == NULL)
2060 		panic("Could not create initial session.\n");
2061 
2062 	group = create_process_group(1);
2063 	if (group == NULL)
2064 		panic("Could not create initial process group.\n");
2065 
2066 	insert_group_into_session(session, group);
2067 
2068 	// create the kernel team
2069 	sKernelTeam = create_team_struct("kernel_team", true);
2070 	if (sKernelTeam == NULL)
2071 		panic("could not create kernel team!\n");
2072 	strcpy(sKernelTeam->args, sKernelTeam->name);
2073 	sKernelTeam->state = TEAM_STATE_NORMAL;
2074 
2075 	sKernelTeam->saved_set_uid = 0;
2076 	sKernelTeam->real_uid = 0;
2077 	sKernelTeam->effective_uid = 0;
2078 	sKernelTeam->saved_set_gid = 0;
2079 	sKernelTeam->real_gid = 0;
2080 	sKernelTeam->effective_gid = 0;
2081 	sKernelTeam->supplementary_groups = NULL;
2082 	sKernelTeam->supplementary_group_count = 0;
2083 
2084 	insert_team_into_group(group, sKernelTeam);
2085 
2086 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2087 	if (sKernelTeam->io_context == NULL)
2088 		panic("could not create io_context for kernel team!\n");
2089 
2090 	// stick it in the team hash
2091 	hash_insert(sTeamHash, sKernelTeam);
2092 
2093 	add_debugger_command_etc("team", &dump_team_info,
2094 		"Dump info about a particular team",
2095 		"[ <id> | <address> | <name> ]\n"
2096 		"Prints information about the specified team. If no argument is given\n"
2097 		"the current team is selected.\n"
2098 		"  <id>       - The ID of the team.\n"
2099 		"  <address>  - The address of the team structure.\n"
2100 		"  <name>     - The team's name.\n", 0);
2101 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2102 		"\n"
2103 		"Prints a list of all existing teams.\n", 0);
2104 
2105 	new(&sNotificationService) TeamNotificationService();
2106 
2107 	return B_OK;
2108 }
2109 
2110 
2111 int32
2112 team_max_teams(void)
2113 {
2114 	return sMaxTeams;
2115 }
2116 
2117 
2118 int32
2119 team_used_teams(void)
2120 {
2121 	return sUsedTeams;
2122 }
2123 
2124 
2125 /*!	Iterates through the list of teams. The team spinlock must be held.
2126 */
2127 struct team*
2128 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2129 {
2130 	struct hash_iterator iterator;
2131 	hash_open(sTeamHash, &iterator);
2132 
2133 	struct team* team;
2134 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2135 		if (callback(team, cookie))
2136 			break;
2137 	}
2138 
2139 	hash_close(sTeamHash, &iterator, false);
2140 
2141 	return team;
2142 }
2143 
2144 
2145 /*! Fills the provided death entry if it's in the team.
2146 	You need to have the team lock held when calling this function.
2147 */
2148 job_control_entry*
2149 team_get_death_entry(struct team* team, thread_id child, bool* _deleteEntry)
2150 {
2151 	if (child <= 0)
2152 		return NULL;
2153 
2154 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2155 		child);
2156 	if (entry) {
2157 		// remove the entry only, if the caller is the parent of the found team
2158 		if (team_get_current_team_id() == entry->thread) {
2159 			team->dead_children->entries.Remove(entry);
2160 			team->dead_children->count--;
2161 			*_deleteEntry = true;
2162 		} else {
2163 			*_deleteEntry = false;
2164 		}
2165 	}
2166 
2167 	return entry;
2168 }
2169 
2170 
2171 /*! Quick check to see if we have a valid team ID. */
2172 bool
2173 team_is_valid(team_id id)
2174 {
2175 	struct team* team;
2176 	cpu_status state;
2177 
2178 	if (id <= 0)
2179 		return false;
2180 
2181 	state = disable_interrupts();
2182 	GRAB_TEAM_LOCK();
2183 
2184 	team = team_get_team_struct_locked(id);
2185 
2186 	RELEASE_TEAM_LOCK();
2187 	restore_interrupts(state);
2188 
2189 	return team != NULL;
2190 }
2191 
2192 
2193 struct team*
2194 team_get_team_struct_locked(team_id id)
2195 {
2196 	struct team_key key;
2197 	key.id = id;
2198 
2199 	return (struct team*)hash_lookup(sTeamHash, &key);
2200 }
2201 
2202 
2203 /*! This searches the session of the team for the specified group ID.
2204 	You must hold the team lock when you call this function.
2205 */
2206 struct process_group*
2207 team_get_process_group_locked(struct process_session* session, pid_t id)
2208 {
2209 	struct process_group* group;
2210 	struct team_key key;
2211 	key.id = id;
2212 
2213 	group = (struct process_group*)hash_lookup(sGroupHash, &key);
2214 	if (group != NULL && (session == NULL || session == group->session))
2215 		return group;
2216 
2217 	return NULL;
2218 }
2219 
2220 
2221 void
2222 team_delete_process_group(struct process_group* group)
2223 {
2224 	if (group == NULL)
2225 		return;
2226 
2227 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2228 
2229 	// remove_group_from_session() keeps this pointer around
2230 	// only if the session can be freed as well
2231 	if (group->session) {
2232 		TRACE(("team_delete_process_group(): frees session %ld\n",
2233 			group->session->id));
2234 		free(group->session);
2235 	}
2236 
2237 	free(group);
2238 }
2239 
2240 
2241 void
2242 team_set_controlling_tty(int32 ttyIndex)
2243 {
2244 	struct team* team = thread_get_current_thread()->team;
2245 
2246 	InterruptsSpinLocker _(gTeamSpinlock);
2247 
2248 	team->group->session->controlling_tty = ttyIndex;
2249 	team->group->session->foreground_group = -1;
2250 }
2251 
2252 
2253 int32
2254 team_get_controlling_tty()
2255 {
2256 	struct team* team = thread_get_current_thread()->team;
2257 
2258 	InterruptsSpinLocker _(gTeamSpinlock);
2259 
2260 	return team->group->session->controlling_tty;
2261 }
2262 
2263 
2264 status_t
2265 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2266 {
2267 	struct thread* thread = thread_get_current_thread();
2268 	struct team* team = thread->team;
2269 
2270 	InterruptsSpinLocker locker(gTeamSpinlock);
2271 
2272 	process_session* session = team->group->session;
2273 
2274 	// must be the controlling tty of the calling process
2275 	if (session->controlling_tty != ttyIndex)
2276 		return ENOTTY;
2277 
2278 	// check process group -- must belong to our session
2279 	process_group* group = team_get_process_group_locked(session,
2280 		processGroupID);
2281 	if (group == NULL)
2282 		return B_BAD_VALUE;
2283 
2284 	// If we are a background group, we can't do that unharmed, only if we
2285 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2286 	if (session->foreground_group != -1
2287 		&& session->foreground_group != team->group_id
2288 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2289 		&& !is_signal_blocked(SIGTTOU)) {
2290 		pid_t groupID = team->group->id;
2291 		locker.Unlock();
2292 		send_signal(-groupID, SIGTTOU);
2293 		return B_INTERRUPTED;
2294 	}
2295 
2296 	team->group->session->foreground_group = processGroupID;
2297 
2298 	return B_OK;
2299 }
2300 
2301 
2302 /*!	Removes the specified team from the global team hash, and from its parent.
2303 	It also moves all of its children up to the parent.
2304 	You must hold the team lock when you call this function.
2305 */
2306 void
2307 team_remove_team(struct team* team)
2308 {
2309 	struct team* parent = team->parent;
2310 
2311 	// remember how long this team lasted
2312 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2313 		+ team->dead_children->kernel_time;
2314 	parent->dead_children->user_time += team->dead_threads_user_time
2315 		+ team->dead_children->user_time;
2316 
2317 	// Also grab the thread spinlock while removing the team from the hash.
2318 	// This makes the following sequence safe: grab teams lock, lookup team,
2319 	// grab threads lock, unlock teams lock,
2320 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2321 	// lock another team's IO context.
2322 	GRAB_THREAD_LOCK();
2323 	hash_remove(sTeamHash, team);
2324 	RELEASE_THREAD_LOCK();
2325 	sUsedTeams--;
2326 
2327 	team->state = TEAM_STATE_DEATH;
2328 
2329 	// If we're a controlling process (i.e. a session leader with controlling
2330 	// terminal), there's a bit of signalling we have to do.
2331 	if (team->session_id == team->id
2332 		&& team->group->session->controlling_tty >= 0) {
2333 		process_session* session = team->group->session;
2334 
2335 		session->controlling_tty = -1;
2336 
2337 		// send SIGHUP to the foreground
2338 		if (session->foreground_group >= 0) {
2339 			send_signal_etc(-session->foreground_group, SIGHUP,
2340 				SIGNAL_FLAG_TEAMS_LOCKED);
2341 		}
2342 
2343 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2344 		// stopped processes
2345 		struct team* child = team->children;
2346 		while (child != NULL) {
2347 			process_group* childGroup = child->group;
2348 			if (!childGroup->orphaned
2349 				&& update_orphaned_process_group(childGroup, team->id)
2350 				&& process_group_has_stopped_processes(childGroup)) {
2351 				send_signal_etc(-childGroup->id, SIGHUP,
2352 					SIGNAL_FLAG_TEAMS_LOCKED);
2353 				send_signal_etc(-childGroup->id, SIGCONT,
2354 					SIGNAL_FLAG_TEAMS_LOCKED);
2355 			}
2356 
2357 			child = child->siblings_next;
2358 		}
2359 	} else {
2360 		// update "orphaned" flags of all children's process groups
2361 		struct team* child = team->children;
2362 		while (child != NULL) {
2363 			process_group* childGroup = child->group;
2364 			if (!childGroup->orphaned)
2365 				update_orphaned_process_group(childGroup, team->id);
2366 
2367 			child = child->siblings_next;
2368 		}
2369 
2370 		// update "orphaned" flag of this team's process group
2371 		update_orphaned_process_group(team->group, team->id);
2372 	}
2373 
2374 	// reparent each of the team's children
2375 	reparent_children(team);
2376 
2377 	// remove us from our process group
2378 	remove_team_from_group(team);
2379 
2380 	// remove us from our parent
2381 	remove_team_from_parent(parent, team);
2382 }
2383 
2384 
2385 void
2386 team_delete_team(struct team* team)
2387 {
2388 	team_id teamID = team->id;
2389 	port_id debuggerPort = -1;
2390 	cpu_status state;
2391 
2392 	if (team->num_threads > 0) {
2393 		// there are other threads still in this team,
2394 		// cycle through and signal kill on each of the threads
2395 		// ToDo: this can be optimized. There's got to be a better solution.
2396 		struct thread* temp_thread;
2397 		char deathSemName[B_OS_NAME_LENGTH];
2398 		sem_id deathSem;
2399 		int32 threadCount;
2400 
2401 		sprintf(deathSemName, "team %ld death sem", teamID);
2402 		deathSem = create_sem(0, deathSemName);
2403 		if (deathSem < 0) {
2404 			panic("team_delete_team: cannot init death sem for team %ld\n",
2405 				teamID);
2406 		}
2407 
2408 		state = disable_interrupts();
2409 		GRAB_TEAM_LOCK();
2410 
2411 		team->death_sem = deathSem;
2412 		threadCount = team->num_threads;
2413 
2414 		// If the team was being debugged, that will stop with the termination
2415 		// of the nub thread. The team structure has already been removed from
2416 		// the team hash table at this point, so noone can install a debugger
2417 		// anymore. We fetch the debugger's port to send it a message at the
2418 		// bitter end.
2419 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2420 
2421 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2422 			debuggerPort = team->debug_info.debugger_port;
2423 
2424 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2425 
2426 		// We can safely walk the list because of the lock. no new threads can
2427 		// be created because of the TEAM_STATE_DEATH flag on the team
2428 		temp_thread = team->thread_list;
2429 		while (temp_thread) {
2430 			struct thread* next = temp_thread->team_next;
2431 
2432 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2433 			temp_thread = next;
2434 		}
2435 
2436 		RELEASE_TEAM_LOCK();
2437 		restore_interrupts(state);
2438 
2439 		// wait until all threads in team are dead.
2440 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2441 		delete_sem(team->death_sem);
2442 	}
2443 
2444 	// If someone is waiting for this team to be loaded, but it dies
2445 	// unexpectedly before being done, we need to notify the waiting
2446 	// thread now.
2447 
2448 	state = disable_interrupts();
2449 	GRAB_TEAM_LOCK();
2450 
2451 	if (team->loading_info) {
2452 		// there's indeed someone waiting
2453 		struct team_loading_info* loadingInfo = team->loading_info;
2454 		team->loading_info = NULL;
2455 
2456 		loadingInfo->result = B_ERROR;
2457 		loadingInfo->done = true;
2458 
2459 		GRAB_THREAD_LOCK();
2460 
2461 		// wake up the waiting thread
2462 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2463 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2464 
2465 		RELEASE_THREAD_LOCK();
2466 	}
2467 
2468 	RELEASE_TEAM_LOCK();
2469 	restore_interrupts(state);
2470 
2471 	// notify team watchers
2472 
2473 	{
2474 		// we're not reachable from anyone anymore at this point, so we
2475 		// can safely access the list without any locking
2476 		struct team_watcher* watcher;
2477 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2478 				&team->watcher_list)) != NULL) {
2479 			watcher->hook(teamID, watcher->data);
2480 			free(watcher);
2481 		}
2482 	}
2483 
2484 	sNotificationService.Notify(TEAM_REMOVED, team);
2485 
2486 	// free team resources
2487 
2488 	vfs_put_io_context(team->io_context);
2489 	delete_realtime_sem_context(team->realtime_sem_context);
2490 	xsi_sem_undo(team);
2491 	delete_owned_ports(team);
2492 	sem_delete_owned_sems(team);
2493 	remove_images(team);
2494 	team->address_space->RemoveAndPut();
2495 
2496 	delete_team_struct(team);
2497 
2498 	// notify the debugger, that the team is gone
2499 	user_debug_team_deleted(teamID, debuggerPort);
2500 }
2501 
2502 
2503 struct team*
2504 team_get_kernel_team(void)
2505 {
2506 	return sKernelTeam;
2507 }
2508 
2509 
2510 team_id
2511 team_get_kernel_team_id(void)
2512 {
2513 	if (!sKernelTeam)
2514 		return 0;
2515 
2516 	return sKernelTeam->id;
2517 }
2518 
2519 
2520 team_id
2521 team_get_current_team_id(void)
2522 {
2523 	return thread_get_current_thread()->team->id;
2524 }
2525 
2526 
2527 status_t
2528 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
2529 {
2530 	cpu_status state;
2531 	struct team* team;
2532 	status_t status;
2533 
2534 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2535 	if (id == 1) {
2536 		// we're the kernel team, so we don't have to go through all
2537 		// the hassle (locking and hash lookup)
2538 		*_addressSpace = VMAddressSpace::GetKernel();
2539 		return B_OK;
2540 	}
2541 
2542 	state = disable_interrupts();
2543 	GRAB_TEAM_LOCK();
2544 
2545 	team = team_get_team_struct_locked(id);
2546 	if (team != NULL) {
2547 		team->address_space->Get();
2548 		*_addressSpace = team->address_space;
2549 		status = B_OK;
2550 	} else
2551 		status = B_BAD_VALUE;
2552 
2553 	RELEASE_TEAM_LOCK();
2554 	restore_interrupts(state);
2555 
2556 	return status;
2557 }
2558 
2559 
2560 /*!	Sets the team's job control state.
2561 	Interrupts must be disabled and the team lock be held.
2562 	\a threadsLocked indicates whether the thread lock is being held, too.
2563 */
2564 void
2565 team_set_job_control_state(struct team* team, job_control_state newState,
2566 	int signal, bool threadsLocked)
2567 {
2568 	if (team == NULL || team->job_control_entry == NULL)
2569 		return;
2570 
2571 	// don't touch anything, if the state stays the same or the team is already
2572 	// dead
2573 	job_control_entry* entry = team->job_control_entry;
2574 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2575 		return;
2576 
2577 	T(SetJobControlState(team->id, newState, signal));
2578 
2579 	// remove from the old list
2580 	switch (entry->state) {
2581 		case JOB_CONTROL_STATE_NONE:
2582 			// entry is in no list ATM
2583 			break;
2584 		case JOB_CONTROL_STATE_DEAD:
2585 			// can't get here
2586 			break;
2587 		case JOB_CONTROL_STATE_STOPPED:
2588 			team->parent->stopped_children->entries.Remove(entry);
2589 			break;
2590 		case JOB_CONTROL_STATE_CONTINUED:
2591 			team->parent->continued_children->entries.Remove(entry);
2592 			break;
2593 	}
2594 
2595 	entry->state = newState;
2596 	entry->signal = signal;
2597 
2598 	// add to new list
2599 	team_job_control_children* childList = NULL;
2600 	switch (entry->state) {
2601 		case JOB_CONTROL_STATE_NONE:
2602 			// entry doesn't get into any list
2603 			break;
2604 		case JOB_CONTROL_STATE_DEAD:
2605 			childList = team->parent->dead_children;
2606 			team->parent->dead_children->count++;
2607 			break;
2608 		case JOB_CONTROL_STATE_STOPPED:
2609 			childList = team->parent->stopped_children;
2610 			break;
2611 		case JOB_CONTROL_STATE_CONTINUED:
2612 			childList = team->parent->continued_children;
2613 			break;
2614 	}
2615 
2616 	if (childList != NULL) {
2617 		childList->entries.Add(entry);
2618 		team->parent->dead_children->condition_variable.NotifyAll(
2619 			threadsLocked);
2620 	}
2621 }
2622 
2623 
2624 /*! Adds a hook to the team that is called as soon as this
2625 	team goes away.
2626 	This call might get public in the future.
2627 */
2628 status_t
2629 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2630 {
2631 	struct team_watcher* watcher;
2632 	struct team* team;
2633 	cpu_status state;
2634 
2635 	if (hook == NULL || teamID < B_OK)
2636 		return B_BAD_VALUE;
2637 
2638 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2639 	if (watcher == NULL)
2640 		return B_NO_MEMORY;
2641 
2642 	watcher->hook = hook;
2643 	watcher->data = data;
2644 
2645 	// find team and add watcher
2646 
2647 	state = disable_interrupts();
2648 	GRAB_TEAM_LOCK();
2649 
2650 	team = team_get_team_struct_locked(teamID);
2651 	if (team != NULL)
2652 		list_add_item(&team->watcher_list, watcher);
2653 
2654 	RELEASE_TEAM_LOCK();
2655 	restore_interrupts(state);
2656 
2657 	if (team == NULL) {
2658 		free(watcher);
2659 		return B_BAD_TEAM_ID;
2660 	}
2661 
2662 	return B_OK;
2663 }
2664 
2665 
2666 status_t
2667 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2668 {
2669 	struct team_watcher* watcher = NULL;
2670 	struct team* team;
2671 	cpu_status state;
2672 
2673 	if (hook == NULL || teamID < B_OK)
2674 		return B_BAD_VALUE;
2675 
2676 	// find team and remove watcher (if present)
2677 
2678 	state = disable_interrupts();
2679 	GRAB_TEAM_LOCK();
2680 
2681 	team = team_get_team_struct_locked(teamID);
2682 	if (team != NULL) {
2683 		// search for watcher
2684 		while ((watcher = (struct team_watcher*)list_get_next_item(
2685 				&team->watcher_list, watcher)) != NULL) {
2686 			if (watcher->hook == hook && watcher->data == data) {
2687 				// got it!
2688 				list_remove_item(&team->watcher_list, watcher);
2689 				break;
2690 			}
2691 		}
2692 	}
2693 
2694 	RELEASE_TEAM_LOCK();
2695 	restore_interrupts(state);
2696 
2697 	if (watcher == NULL)
2698 		return B_ENTRY_NOT_FOUND;
2699 
2700 	free(watcher);
2701 	return B_OK;
2702 }
2703 
2704 
2705 /*!	The team lock must be held or the team must still be single threaded.
2706 */
2707 struct user_thread*
2708 team_allocate_user_thread(struct team* team)
2709 {
2710 	if (team->user_data == 0)
2711 		return NULL;
2712 
2713 	user_thread* thread = NULL;
2714 
2715 	// take an entry from the free list, if any
2716 	if (struct free_user_thread* entry = team->free_user_threads) {
2717 		thread = entry->thread;
2718 		team->free_user_threads = entry->next;
2719 		deferred_free(entry);
2720 		return thread;
2721 	} else {
2722 		// enough space left?
2723 		size_t needed = _ALIGN(sizeof(user_thread));
2724 		if (team->user_data_size - team->used_user_data < needed)
2725 			return NULL;
2726 		// TODO: This imposes a per team thread limit! We should resize the
2727 		// area, if necessary. That's problematic at this point, though, since
2728 		// we've got the team lock.
2729 
2730 		thread = (user_thread*)(team->user_data + team->used_user_data);
2731 		team->used_user_data += needed;
2732 	}
2733 
2734 	thread->defer_signals = 0;
2735 	thread->pending_signals = 0;
2736 	thread->wait_status = B_OK;
2737 
2738 	return thread;
2739 }
2740 
2741 
2742 /*!	The team lock must not be held. \a thread must be the current thread.
2743 */
2744 void
2745 team_free_user_thread(struct thread* thread)
2746 {
2747 	user_thread* userThread = thread->user_thread;
2748 	if (userThread == NULL)
2749 		return;
2750 
2751 	// create a free list entry
2752 	free_user_thread* entry
2753 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2754 	if (entry == NULL) {
2755 		// we have to leak the user thread :-/
2756 		return;
2757 	}
2758 
2759 	InterruptsSpinLocker _(gTeamSpinlock);
2760 
2761 	// detach from thread
2762 	SpinLocker threadLocker(gThreadSpinlock);
2763 	thread->user_thread = NULL;
2764 	threadLocker.Unlock();
2765 
2766 	entry->thread = userThread;
2767 	entry->next = thread->team->free_user_threads;
2768 	thread->team->free_user_threads = entry;
2769 }
2770 
2771 
2772 //	#pragma mark - Public kernel API
2773 
2774 
2775 thread_id
2776 load_image(int32 argCount, const char** args, const char** env)
2777 {
2778 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2779 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2780 }
2781 
2782 
2783 thread_id
2784 load_image_etc(int32 argCount, const char* const* args,
2785 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2786 {
2787 	// we need to flatten the args and environment
2788 
2789 	if (args == NULL)
2790 		return B_BAD_VALUE;
2791 
2792 	// determine total needed size
2793 	int32 argSize = 0;
2794 	for (int32 i = 0; i < argCount; i++)
2795 		argSize += strlen(args[i]) + 1;
2796 
2797 	int32 envCount = 0;
2798 	int32 envSize = 0;
2799 	while (env != NULL && env[envCount] != NULL)
2800 		envSize += strlen(env[envCount++]) + 1;
2801 
2802 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2803 	if (size > MAX_PROCESS_ARGS_SIZE)
2804 		return B_TOO_MANY_ARGS;
2805 
2806 	// allocate space
2807 	char** flatArgs = (char**)malloc(size);
2808 	if (flatArgs == NULL)
2809 		return B_NO_MEMORY;
2810 
2811 	char** slot = flatArgs;
2812 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2813 
2814 	// copy arguments and environment
2815 	for (int32 i = 0; i < argCount; i++) {
2816 		int32 argSize = strlen(args[i]) + 1;
2817 		memcpy(stringSpace, args[i], argSize);
2818 		*slot++ = stringSpace;
2819 		stringSpace += argSize;
2820 	}
2821 
2822 	*slot++ = NULL;
2823 
2824 	for (int32 i = 0; i < envCount; i++) {
2825 		int32 envSize = strlen(env[i]) + 1;
2826 		memcpy(stringSpace, env[i], envSize);
2827 		*slot++ = stringSpace;
2828 		stringSpace += envSize;
2829 	}
2830 
2831 	*slot++ = NULL;
2832 
2833 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2834 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2835 
2836 	free(flatArgs);
2837 		// load_image_internal() unset our variable if it took over ownership
2838 
2839 	return thread;
2840 }
2841 
2842 
2843 status_t
2844 wait_for_team(team_id id, status_t* _returnCode)
2845 {
2846 	struct team* team;
2847 	thread_id thread;
2848 	cpu_status state;
2849 
2850 	// find main thread and wait for that
2851 
2852 	state = disable_interrupts();
2853 	GRAB_TEAM_LOCK();
2854 
2855 	team = team_get_team_struct_locked(id);
2856 	if (team != NULL && team->main_thread != NULL)
2857 		thread = team->main_thread->id;
2858 	else
2859 		thread = B_BAD_THREAD_ID;
2860 
2861 	RELEASE_TEAM_LOCK();
2862 	restore_interrupts(state);
2863 
2864 	if (thread < 0)
2865 		return thread;
2866 
2867 	return wait_for_thread(thread, _returnCode);
2868 }
2869 
2870 
2871 status_t
2872 kill_team(team_id id)
2873 {
2874 	status_t status = B_OK;
2875 	thread_id threadID = -1;
2876 	struct team* team;
2877 	cpu_status state;
2878 
2879 	state = disable_interrupts();
2880 	GRAB_TEAM_LOCK();
2881 
2882 	team = team_get_team_struct_locked(id);
2883 	if (team != NULL) {
2884 		if (team != sKernelTeam) {
2885 			threadID = team->id;
2886 				// the team ID is the same as the ID of its main thread
2887 		} else
2888 			status = B_NOT_ALLOWED;
2889 	} else
2890 		status = B_BAD_THREAD_ID;
2891 
2892 	RELEASE_TEAM_LOCK();
2893 	restore_interrupts(state);
2894 
2895 	if (status < B_OK)
2896 		return status;
2897 
2898 	// just kill the main thread in the team. The cleanup code there will
2899 	// take care of the team
2900 	return kill_thread(threadID);
2901 }
2902 
2903 
2904 status_t
2905 _get_team_info(team_id id, team_info* info, size_t size)
2906 {
2907 	cpu_status state;
2908 	status_t status = B_OK;
2909 	struct team* team;
2910 
2911 	state = disable_interrupts();
2912 	GRAB_TEAM_LOCK();
2913 
2914 	if (id == B_CURRENT_TEAM)
2915 		team = thread_get_current_thread()->team;
2916 	else
2917 		team = team_get_team_struct_locked(id);
2918 
2919 	if (team == NULL) {
2920 		status = B_BAD_TEAM_ID;
2921 		goto err;
2922 	}
2923 
2924 	status = fill_team_info(team, info, size);
2925 
2926 err:
2927 	RELEASE_TEAM_LOCK();
2928 	restore_interrupts(state);
2929 
2930 	return status;
2931 }
2932 
2933 
2934 status_t
2935 _get_next_team_info(int32* cookie, team_info* info, size_t size)
2936 {
2937 	status_t status = B_BAD_TEAM_ID;
2938 	struct team* team = NULL;
2939 	int32 slot = *cookie;
2940 	team_id lastTeamID;
2941 	cpu_status state;
2942 
2943 	if (slot < 1)
2944 		slot = 1;
2945 
2946 	state = disable_interrupts();
2947 	GRAB_TEAM_LOCK();
2948 
2949 	lastTeamID = peek_next_thread_id();
2950 	if (slot >= lastTeamID)
2951 		goto err;
2952 
2953 	// get next valid team
2954 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2955 		slot++;
2956 
2957 	if (team) {
2958 		status = fill_team_info(team, info, size);
2959 		*cookie = ++slot;
2960 	}
2961 
2962 err:
2963 	RELEASE_TEAM_LOCK();
2964 	restore_interrupts(state);
2965 
2966 	return status;
2967 }
2968 
2969 
2970 status_t
2971 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
2972 {
2973 	bigtime_t kernelTime = 0, userTime = 0;
2974 	status_t status = B_OK;
2975 	struct team* team;
2976 	cpu_status state;
2977 
2978 	if (size != sizeof(team_usage_info)
2979 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2980 		return B_BAD_VALUE;
2981 
2982 	state = disable_interrupts();
2983 	GRAB_TEAM_LOCK();
2984 
2985 	if (id == B_CURRENT_TEAM)
2986 		team = thread_get_current_thread()->team;
2987 	else
2988 		team = team_get_team_struct_locked(id);
2989 
2990 	if (team == NULL) {
2991 		status = B_BAD_TEAM_ID;
2992 		goto out;
2993 	}
2994 
2995 	switch (who) {
2996 		case B_TEAM_USAGE_SELF:
2997 		{
2998 			struct thread* thread = team->thread_list;
2999 
3000 			for (; thread != NULL; thread = thread->team_next) {
3001 				kernelTime += thread->kernel_time;
3002 				userTime += thread->user_time;
3003 			}
3004 
3005 			kernelTime += team->dead_threads_kernel_time;
3006 			userTime += team->dead_threads_user_time;
3007 			break;
3008 		}
3009 
3010 		case B_TEAM_USAGE_CHILDREN:
3011 		{
3012 			struct team* child = team->children;
3013 			for (; child != NULL; child = child->siblings_next) {
3014 				struct thread* thread = team->thread_list;
3015 
3016 				for (; thread != NULL; thread = thread->team_next) {
3017 					kernelTime += thread->kernel_time;
3018 					userTime += thread->user_time;
3019 				}
3020 
3021 				kernelTime += child->dead_threads_kernel_time;
3022 				userTime += child->dead_threads_user_time;
3023 			}
3024 
3025 			kernelTime += team->dead_children->kernel_time;
3026 			userTime += team->dead_children->user_time;
3027 			break;
3028 		}
3029 	}
3030 
3031 out:
3032 	RELEASE_TEAM_LOCK();
3033 	restore_interrupts(state);
3034 
3035 	if (status == B_OK) {
3036 		info->kernel_time = kernelTime;
3037 		info->user_time = userTime;
3038 	}
3039 
3040 	return status;
3041 }
3042 
3043 
3044 pid_t
3045 getpid(void)
3046 {
3047 	return thread_get_current_thread()->team->id;
3048 }
3049 
3050 
3051 pid_t
3052 getppid(void)
3053 {
3054 	struct team* team = thread_get_current_thread()->team;
3055 	cpu_status state;
3056 	pid_t parent;
3057 
3058 	state = disable_interrupts();
3059 	GRAB_TEAM_LOCK();
3060 
3061 	parent = team->parent->id;
3062 
3063 	RELEASE_TEAM_LOCK();
3064 	restore_interrupts(state);
3065 
3066 	return parent;
3067 }
3068 
3069 
3070 pid_t
3071 getpgid(pid_t process)
3072 {
3073 	struct thread* thread;
3074 	pid_t result = -1;
3075 	cpu_status state;
3076 
3077 	if (process == 0)
3078 		process = thread_get_current_thread()->team->id;
3079 
3080 	state = disable_interrupts();
3081 	GRAB_THREAD_LOCK();
3082 
3083 	thread = thread_get_thread_struct_locked(process);
3084 	if (thread != NULL)
3085 		result = thread->team->group_id;
3086 
3087 	RELEASE_THREAD_LOCK();
3088 	restore_interrupts(state);
3089 
3090 	return thread != NULL ? result : B_BAD_VALUE;
3091 }
3092 
3093 
3094 pid_t
3095 getsid(pid_t process)
3096 {
3097 	struct thread* thread;
3098 	pid_t result = -1;
3099 	cpu_status state;
3100 
3101 	if (process == 0)
3102 		process = thread_get_current_thread()->team->id;
3103 
3104 	state = disable_interrupts();
3105 	GRAB_THREAD_LOCK();
3106 
3107 	thread = thread_get_thread_struct_locked(process);
3108 	if (thread != NULL)
3109 		result = thread->team->session_id;
3110 
3111 	RELEASE_THREAD_LOCK();
3112 	restore_interrupts(state);
3113 
3114 	return thread != NULL ? result : B_BAD_VALUE;
3115 }
3116 
3117 
3118 //	#pragma mark - User syscalls
3119 
3120 
3121 status_t
3122 _user_exec(const char* userPath, const char* const* userFlatArgs,
3123 	size_t flatArgsSize, int32 argCount, int32 envCount)
3124 {
3125 	// NOTE: Since this function normally doesn't return, don't use automatic
3126 	// variables that need destruction in the function scope.
3127 	char path[B_PATH_NAME_LENGTH];
3128 
3129 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3130 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3131 		return B_BAD_ADDRESS;
3132 
3133 	// copy and relocate the flat arguments
3134 	char** flatArgs;
3135 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3136 		argCount, envCount, flatArgs);
3137 
3138 	if (error == B_OK) {
3139 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3140 			envCount);
3141 			// this one only returns in case of error
3142 	}
3143 
3144 	free(flatArgs);
3145 	return error;
3146 }
3147 
3148 
3149 thread_id
3150 _user_fork(void)
3151 {
3152 	return fork_team();
3153 }
3154 
3155 
3156 thread_id
3157 _user_wait_for_child(thread_id child, uint32 flags, int32* _userReason,
3158 	status_t* _userReturnCode)
3159 {
3160 	status_t returnCode;
3161 	int32 reason;
3162 	thread_id deadChild;
3163 
3164 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3165 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3166 		return B_BAD_ADDRESS;
3167 
3168 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3169 
3170 	if (deadChild >= B_OK) {
3171 		// copy result data on successful completion
3172 		if ((_userReason != NULL
3173 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3174 			|| (_userReturnCode != NULL
3175 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3176 					< B_OK)) {
3177 			return B_BAD_ADDRESS;
3178 		}
3179 
3180 		return deadChild;
3181 	}
3182 
3183 	return syscall_restart_handle_post(deadChild);
3184 }
3185 
3186 
3187 pid_t
3188 _user_process_info(pid_t process, int32 which)
3189 {
3190 	// we only allow to return the parent of the current process
3191 	if (which == PARENT_ID
3192 		&& process != 0 && process != thread_get_current_thread()->team->id)
3193 		return B_BAD_VALUE;
3194 
3195 	switch (which) {
3196 		case SESSION_ID:
3197 			return getsid(process);
3198 		case GROUP_ID:
3199 			return getpgid(process);
3200 		case PARENT_ID:
3201 			return getppid();
3202 	}
3203 
3204 	return B_BAD_VALUE;
3205 }
3206 
3207 
3208 pid_t
3209 _user_setpgid(pid_t processID, pid_t groupID)
3210 {
3211 	struct thread* thread = thread_get_current_thread();
3212 	struct team* currentTeam = thread->team;
3213 	struct team* team;
3214 
3215 	if (groupID < 0)
3216 		return B_BAD_VALUE;
3217 
3218 	if (processID == 0)
3219 		processID = currentTeam->id;
3220 
3221 	// if the group ID is not specified, use the target process' ID
3222 	if (groupID == 0)
3223 		groupID = processID;
3224 
3225 	if (processID == currentTeam->id) {
3226 		// we set our own group
3227 
3228 		// we must not change our process group ID if we're a session leader
3229 		if (is_session_leader(currentTeam))
3230 			return B_NOT_ALLOWED;
3231 	} else {
3232 		// another team is the target of the call -- check it out
3233 		InterruptsSpinLocker _(gTeamSpinlock);
3234 
3235 		team = team_get_team_struct_locked(processID);
3236 		if (team == NULL)
3237 			return ESRCH;
3238 
3239 		// The team must be a child of the calling team and in the same session.
3240 		// (If that's the case it isn't a session leader either.)
3241 		if (team->parent != currentTeam
3242 			|| team->session_id != currentTeam->session_id) {
3243 			return B_NOT_ALLOWED;
3244 		}
3245 
3246 		if (team->group_id == groupID)
3247 			return groupID;
3248 
3249 		// The call is also supposed to fail on a child, when the child already
3250 		// has executed exec*() [EACCES].
3251 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3252 			return EACCES;
3253 	}
3254 
3255 	struct process_group* group = NULL;
3256 	if (groupID == processID) {
3257 		// A new process group might be needed.
3258 		group = create_process_group(groupID);
3259 		if (group == NULL)
3260 			return B_NO_MEMORY;
3261 
3262 		// Assume orphaned. We consider the situation of the team's parent
3263 		// below.
3264 		group->orphaned = true;
3265 	}
3266 
3267 	status_t status = B_OK;
3268 	struct process_group* freeGroup = NULL;
3269 
3270 	InterruptsSpinLocker locker(gTeamSpinlock);
3271 
3272 	team = team_get_team_struct_locked(processID);
3273 	if (team != NULL) {
3274 		// check the conditions again -- they might have changed in the meantime
3275 		if (is_session_leader(team)
3276 			|| team->session_id != currentTeam->session_id) {
3277 			status = B_NOT_ALLOWED;
3278 		} else if (team != currentTeam
3279 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3280 			status = EACCES;
3281 		} else if (team->group_id == groupID) {
3282 			// the team is already in the desired process group
3283 			freeGroup = group;
3284 		} else {
3285 			// Check if a process group with the requested ID already exists.
3286 			struct process_group* targetGroup
3287 				= team_get_process_group_locked(team->group->session, groupID);
3288 			if (targetGroup != NULL) {
3289 				// In case of processID == groupID we have to free the
3290 				// allocated group.
3291 				freeGroup = group;
3292 			} else if (processID == groupID) {
3293 				// We created a new process group, let us insert it into the
3294 				// team's session.
3295 				insert_group_into_session(team->group->session, group);
3296 				targetGroup = group;
3297 			}
3298 
3299 			if (targetGroup != NULL) {
3300 				// we got a group, let's move the team there
3301 				process_group* oldGroup = team->group;
3302 
3303 				remove_team_from_group(team);
3304 				insert_team_into_group(targetGroup, team);
3305 
3306 				// Update the "orphaned" flag of all potentially affected
3307 				// groups.
3308 
3309 				// the team's old group
3310 				if (oldGroup->teams != NULL) {
3311 					oldGroup->orphaned = false;
3312 					update_orphaned_process_group(oldGroup, -1);
3313 				}
3314 
3315 				// the team's new group
3316 				struct team* parent = team->parent;
3317 				targetGroup->orphaned &= parent == NULL
3318 					|| parent->group == targetGroup
3319 					|| team->parent->session_id != team->session_id;
3320 
3321 				// children's groups
3322 				struct team* child = team->children;
3323 				while (child != NULL) {
3324 					child->group->orphaned = false;
3325 					update_orphaned_process_group(child->group, -1);
3326 
3327 					child = child->siblings_next;
3328 				}
3329 			} else
3330 				status = B_NOT_ALLOWED;
3331 		}
3332 	} else
3333 		status = B_NOT_ALLOWED;
3334 
3335 	// Changing the process group might have changed the situation for a parent
3336 	// waiting in wait_for_child(). Hence we notify it.
3337 	if (status == B_OK)
3338 		team->parent->dead_children->condition_variable.NotifyAll(false);
3339 
3340 	locker.Unlock();
3341 
3342 	if (status != B_OK) {
3343 		// in case of error, the group hasn't been added into the hash
3344 		team_delete_process_group(group);
3345 	}
3346 
3347 	team_delete_process_group(freeGroup);
3348 
3349 	return status == B_OK ? groupID : status;
3350 }
3351 
3352 
3353 pid_t
3354 _user_setsid(void)
3355 {
3356 	struct team* team = thread_get_current_thread()->team;
3357 	struct process_session* session;
3358 	struct process_group* group;
3359 	cpu_status state;
3360 	bool failed = false;
3361 
3362 	// the team must not already be a process group leader
3363 	if (is_process_group_leader(team))
3364 		return B_NOT_ALLOWED;
3365 
3366 	group = create_process_group(team->id);
3367 	if (group == NULL)
3368 		return B_NO_MEMORY;
3369 
3370 	session = create_process_session(group->id);
3371 	if (session == NULL) {
3372 		team_delete_process_group(group);
3373 		return B_NO_MEMORY;
3374 	}
3375 
3376 	state = disable_interrupts();
3377 	GRAB_TEAM_LOCK();
3378 
3379 	// this may have changed since the check above
3380 	if (!is_process_group_leader(team)) {
3381 		remove_team_from_group(team);
3382 
3383 		insert_group_into_session(session, group);
3384 		insert_team_into_group(group, team);
3385 	} else
3386 		failed = true;
3387 
3388 	RELEASE_TEAM_LOCK();
3389 	restore_interrupts(state);
3390 
3391 	if (failed) {
3392 		team_delete_process_group(group);
3393 		free(session);
3394 		return B_NOT_ALLOWED;
3395 	}
3396 
3397 	return team->group_id;
3398 }
3399 
3400 
3401 status_t
3402 _user_wait_for_team(team_id id, status_t* _userReturnCode)
3403 {
3404 	status_t returnCode;
3405 	status_t status;
3406 
3407 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3408 		return B_BAD_ADDRESS;
3409 
3410 	status = wait_for_team(id, &returnCode);
3411 	if (status >= B_OK && _userReturnCode != NULL) {
3412 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
3413 				!= B_OK)
3414 			return B_BAD_ADDRESS;
3415 		return B_OK;
3416 	}
3417 
3418 	return syscall_restart_handle_post(status);
3419 }
3420 
3421 
3422 thread_id
3423 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3424 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3425 	port_id errorPort, uint32 errorToken)
3426 {
3427 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3428 
3429 	if (argCount < 1)
3430 		return B_BAD_VALUE;
3431 
3432 	// copy and relocate the flat arguments
3433 	char** flatArgs;
3434 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3435 		argCount, envCount, flatArgs);
3436 	if (error != B_OK)
3437 		return error;
3438 
3439 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3440 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3441 		errorToken);
3442 
3443 	free(flatArgs);
3444 		// load_image_internal() unset our variable if it took over ownership
3445 
3446 	return thread;
3447 }
3448 
3449 
3450 void
3451 _user_exit_team(status_t returnValue)
3452 {
3453 	struct thread* thread = thread_get_current_thread();
3454 
3455 	thread->exit.status = returnValue;
3456 	thread->exit.reason = THREAD_RETURN_EXIT;
3457 
3458 	send_signal(thread->id, SIGKILL);
3459 }
3460 
3461 
3462 status_t
3463 _user_kill_team(team_id team)
3464 {
3465 	return kill_team(team);
3466 }
3467 
3468 
3469 status_t
3470 _user_get_team_info(team_id id, team_info* userInfo)
3471 {
3472 	status_t status;
3473 	team_info info;
3474 
3475 	if (!IS_USER_ADDRESS(userInfo))
3476 		return B_BAD_ADDRESS;
3477 
3478 	status = _get_team_info(id, &info, sizeof(team_info));
3479 	if (status == B_OK) {
3480 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3481 			return B_BAD_ADDRESS;
3482 	}
3483 
3484 	return status;
3485 }
3486 
3487 
3488 status_t
3489 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
3490 {
3491 	status_t status;
3492 	team_info info;
3493 	int32 cookie;
3494 
3495 	if (!IS_USER_ADDRESS(userCookie)
3496 		|| !IS_USER_ADDRESS(userInfo)
3497 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3498 		return B_BAD_ADDRESS;
3499 
3500 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3501 	if (status != B_OK)
3502 		return status;
3503 
3504 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3505 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3506 		return B_BAD_ADDRESS;
3507 
3508 	return status;
3509 }
3510 
3511 
3512 team_id
3513 _user_get_current_team(void)
3514 {
3515 	return team_get_current_team_id();
3516 }
3517 
3518 
3519 status_t
3520 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
3521 	size_t size)
3522 {
3523 	team_usage_info info;
3524 	status_t status;
3525 
3526 	if (!IS_USER_ADDRESS(userInfo))
3527 		return B_BAD_ADDRESS;
3528 
3529 	status = _get_team_usage_info(team, who, &info, size);
3530 	if (status != B_OK)
3531 		return status;
3532 
3533 	if (user_memcpy(userInfo, &info, size) < B_OK)
3534 		return B_BAD_ADDRESS;
3535 
3536 	return status;
3537 }
3538 
3539