xref: /haiku/src/system/kernel/team.cpp (revision 03187b607b2b5eec7ee059f1ead09bdba14991fb)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*!	Team functions */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/wait.h>
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <FindDirectory.h>
21 
22 #include <boot_device.h>
23 #include <elf.h>
24 #include <file_cache.h>
25 #include <fs/KPath.h>
26 #include <heap.h>
27 #include <int.h>
28 #include <kernel.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <Notifications.h>
33 #include <port.h>
34 #include <posix/realtime_sem.h>
35 #include <posix/xsi_semaphore.h>
36 #include <sem.h>
37 #include <syscall_process_info.h>
38 #include <syscall_restart.h>
39 #include <syscalls.h>
40 #include <team.h>
41 #include <tls.h>
42 #include <tracing.h>
43 #include <user_runtime.h>
44 #include <user_thread.h>
45 #include <usergroup.h>
46 #include <vfs.h>
47 #include <vm.h>
48 #include <vm_address_space.h>
49 #include <util/AutoLock.h>
50 #include <util/khash.h>
51 
52 //#define TRACE_TEAM
53 #ifdef TRACE_TEAM
54 #	define TRACE(x) dprintf x
55 #else
56 #	define TRACE(x) ;
57 #endif
58 
59 
60 struct team_key {
61 	team_id id;
62 };
63 
64 struct team_arg {
65 	char	*path;
66 	char	**flat_args;
67 	size_t	flat_args_size;
68 	uint32	arg_count;
69 	uint32	env_count;
70 	port_id	error_port;
71 	uint32	error_token;
72 };
73 
74 struct fork_arg {
75 	area_id				user_stack_area;
76 	addr_t				user_stack_base;
77 	size_t				user_stack_size;
78 	addr_t				user_local_storage;
79 	sigset_t			sig_block_mask;
80 	struct sigaction	sig_action[32];
81 	addr_t				signal_stack_base;
82 	size_t				signal_stack_size;
83 	bool				signal_stack_enabled;
84 
85 	struct user_thread* user_thread;
86 
87 	struct arch_fork_arg arch_info;
88 };
89 
90 class TeamNotificationService : public DefaultNotificationService {
91 public:
92 							TeamNotificationService();
93 
94 			void			Notify(uint32 eventCode, struct team* team);
95 };
96 
97 
98 static hash_table *sTeamHash = NULL;
99 static hash_table *sGroupHash = NULL;
100 static struct team *sKernelTeam = NULL;
101 
102 // some arbitrary chosen limits - should probably depend on the available
103 // memory (the limit is not yet enforced)
104 static int32 sMaxTeams = 2048;
105 static int32 sUsedTeams = 1;
106 
107 static TeamNotificationService sNotificationService;
108 
109 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
110 
111 
112 // #pragma mark - Tracing
113 
114 
115 #if TEAM_TRACING
116 namespace TeamTracing {
117 
118 class TeamForked : public AbstractTraceEntry {
119 public:
120 	TeamForked(thread_id forkedThread)
121 		:
122 		fForkedThread(forkedThread)
123 	{
124 		Initialized();
125 	}
126 
127 	virtual void AddDump(TraceOutput& out)
128 	{
129 		out.Print("team forked, new thread %ld", fForkedThread);
130 	}
131 
132 private:
133 	thread_id			fForkedThread;
134 };
135 
136 
137 class ExecTeam : public AbstractTraceEntry {
138 public:
139 	ExecTeam(const char* path, int32 argCount, const char* const* args,
140 			int32 envCount, const char* const* env)
141 		:
142 		fArgCount(argCount),
143 		fArgs(NULL)
144 	{
145 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
146 			false);
147 
148 		// determine the buffer size we need for the args
149 		size_t argBufferSize = 0;
150 		for (int32 i = 0; i < argCount; i++)
151 			argBufferSize += strlen(args[i]) + 1;
152 
153 		// allocate a buffer
154 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
155 		if (fArgs) {
156 			char* buffer = fArgs;
157 			for (int32 i = 0; i < argCount; i++) {
158 				size_t argSize = strlen(args[i]) + 1;
159 				memcpy(buffer, args[i], argSize);
160 				buffer += argSize;
161 			}
162 		}
163 
164 		// ignore env for the time being
165 		(void)envCount;
166 		(void)env;
167 
168 		Initialized();
169 	}
170 
171 	virtual void AddDump(TraceOutput& out)
172 	{
173 		out.Print("team exec, \"%p\", args:", fPath);
174 
175 		if (fArgs != NULL) {
176 			char* args = fArgs;
177 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
178 				out.Print(" \"%s\"", args);
179 				args += strlen(args) + 1;
180 			}
181 		} else
182 			out.Print(" <too long>");
183 	}
184 
185 private:
186 	char*	fPath;
187 	int32	fArgCount;
188 	char*	fArgs;
189 };
190 
191 
192 static const char*
193 job_control_state_name(job_control_state state)
194 {
195 	switch (state) {
196 		case JOB_CONTROL_STATE_NONE:
197 			return "none";
198 		case JOB_CONTROL_STATE_STOPPED:
199 			return "stopped";
200 		case JOB_CONTROL_STATE_CONTINUED:
201 			return "continued";
202 		case JOB_CONTROL_STATE_DEAD:
203 			return "dead";
204 		default:
205 			return "invalid";
206 	}
207 }
208 
209 
210 class SetJobControlState : public AbstractTraceEntry {
211 public:
212 	SetJobControlState(team_id team, job_control_state newState, int signal)
213 		:
214 		fTeam(team),
215 		fNewState(newState),
216 		fSignal(signal)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("team set job control state, team %ld, "
224 			"new state: %s, signal: %d",
225 			fTeam, job_control_state_name(fNewState), fSignal);
226 	}
227 
228 private:
229 	team_id				fTeam;
230 	job_control_state	fNewState;
231 	int					fSignal;
232 };
233 
234 
235 class WaitForChild : public AbstractTraceEntry {
236 public:
237 	WaitForChild(pid_t child, uint32 flags)
238 		:
239 		fChild(child),
240 		fFlags(flags)
241 	{
242 		Initialized();
243 	}
244 
245 	virtual void AddDump(TraceOutput& out)
246 	{
247 		out.Print("team wait for child, child: %ld, "
248 			"flags: 0x%lx", fChild, fFlags);
249 	}
250 
251 private:
252 	pid_t	fChild;
253 	uint32	fFlags;
254 };
255 
256 
257 class WaitForChildDone : public AbstractTraceEntry {
258 public:
259 	WaitForChildDone(const job_control_entry& entry)
260 		:
261 		fState(entry.state),
262 		fTeam(entry.thread),
263 		fStatus(entry.status),
264 		fReason(entry.reason),
265 		fSignal(entry.signal)
266 	{
267 		Initialized();
268 	}
269 
270 	WaitForChildDone(status_t error)
271 		:
272 		fTeam(error)
273 	{
274 		Initialized();
275 	}
276 
277 	virtual void AddDump(TraceOutput& out)
278 	{
279 		if (fTeam >= 0) {
280 			out.Print("team wait for child done, team: %ld, "
281 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
282 				fTeam, job_control_state_name(fState), fStatus, fReason,
283 				fSignal);
284 		} else {
285 			out.Print("team wait for child failed, error: "
286 				"0x%lx, ", fTeam);
287 		}
288 	}
289 
290 private:
291 	job_control_state	fState;
292 	team_id				fTeam;
293 	status_t			fStatus;
294 	uint16				fReason;
295 	uint16				fSignal;
296 };
297 
298 }	// namespace TeamTracing
299 
300 #	define T(x) new(std::nothrow) TeamTracing::x;
301 #else
302 #	define T(x) ;
303 #endif
304 
305 
306 //	#pragma mark - TeamNotificationService
307 
308 
309 TeamNotificationService::TeamNotificationService()
310 	: DefaultNotificationService("teams")
311 {
312 }
313 
314 
315 void
316 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
317 {
318 	char eventBuffer[128];
319 	KMessage event;
320 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
321 	event.AddInt32("event", eventCode);
322 	event.AddInt32("team", team->id);
323 	event.AddPointer("teamStruct", team);
324 
325 	DefaultNotificationService::Notify(event, eventCode);
326 }
327 
328 
329 //	#pragma mark - Private functions
330 
331 
332 static void
333 _dump_team_info(struct team *team)
334 {
335 	kprintf("TEAM: %p\n", team);
336 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
337 	kprintf("name:        '%s'\n", team->name);
338 	kprintf("args:        '%s'\n", team->args);
339 	kprintf("next:        %p\n", team->next);
340 	kprintf("parent:      %p", team->parent);
341 	if (team->parent != NULL) {
342 		kprintf(" (id = %ld)\n", team->parent->id);
343 	} else
344 		kprintf("\n");
345 
346 	kprintf("children:    %p\n", team->children);
347 	kprintf("num_threads: %d\n", team->num_threads);
348 	kprintf("state:       %d\n", team->state);
349 	kprintf("flags:       0x%lx\n", team->flags);
350 	kprintf("io_context:  %p\n", team->io_context);
351 	if (team->address_space)
352 		kprintf("address_space: %p\n", team->address_space);
353 	kprintf("main_thread: %p\n", team->main_thread);
354 	kprintf("thread_list: %p\n", team->thread_list);
355 	kprintf("group_id:    %ld\n", team->group_id);
356 	kprintf("session_id:  %ld\n", team->session_id);
357 }
358 
359 
360 static int
361 dump_team_info(int argc, char **argv)
362 {
363 	struct hash_iterator iterator;
364 	struct team *team;
365 	team_id id = -1;
366 	bool found = false;
367 
368 	if (argc < 2) {
369 		struct thread* thread = thread_get_current_thread();
370 		if (thread != NULL && thread->team != NULL)
371 			_dump_team_info(thread->team);
372 		else
373 			kprintf("No current team!\n");
374 		return 0;
375 	}
376 
377 	id = strtoul(argv[1], NULL, 0);
378 	if (IS_KERNEL_ADDRESS(id)) {
379 		// semi-hack
380 		_dump_team_info((struct team *)id);
381 		return 0;
382 	}
383 
384 	// walk through the thread list, trying to match name or id
385 	hash_open(sTeamHash, &iterator);
386 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
387 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
388 			_dump_team_info(team);
389 			found = true;
390 			break;
391 		}
392 	}
393 	hash_close(sTeamHash, &iterator, false);
394 
395 	if (!found)
396 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
397 	return 0;
398 }
399 
400 
401 static int
402 dump_teams(int argc, char **argv)
403 {
404 	struct hash_iterator iterator;
405 	struct team *team;
406 
407 	kprintf("team           id  parent      name\n");
408 	hash_open(sTeamHash, &iterator);
409 
410 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
411 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
412 	}
413 
414 	hash_close(sTeamHash, &iterator, false);
415 	return 0;
416 }
417 
418 
419 static int
420 team_struct_compare(void *_p, const void *_key)
421 {
422 	struct team *p = (struct team*)_p;
423 	const struct team_key *key = (const struct team_key*)_key;
424 
425 	if (p->id == key->id)
426 		return 0;
427 
428 	return 1;
429 }
430 
431 
432 static uint32
433 team_struct_hash(void *_p, const void *_key, uint32 range)
434 {
435 	struct team *p = (struct team*)_p;
436 	const struct team_key *key = (const struct team_key*)_key;
437 
438 	if (p != NULL)
439 		return p->id % range;
440 
441 	return (uint32)key->id % range;
442 }
443 
444 
445 static int
446 process_group_compare(void *_group, const void *_key)
447 {
448 	struct process_group *group = (struct process_group*)_group;
449 	const struct team_key *key = (const struct team_key*)_key;
450 
451 	if (group->id == key->id)
452 		return 0;
453 
454 	return 1;
455 }
456 
457 
458 static uint32
459 process_group_hash(void *_group, const void *_key, uint32 range)
460 {
461 	struct process_group *group = (struct process_group*)_group;
462 	const struct team_key *key = (const struct team_key*)_key;
463 
464 	if (group != NULL)
465 		return group->id % range;
466 
467 	return (uint32)key->id % range;
468 }
469 
470 
471 static void
472 insert_team_into_parent(struct team *parent, struct team *team)
473 {
474 	ASSERT(parent != NULL);
475 
476 	team->siblings_next = parent->children;
477 	parent->children = team;
478 	team->parent = parent;
479 }
480 
481 
482 /*!	Note: must have team lock held */
483 static void
484 remove_team_from_parent(struct team *parent, struct team *team)
485 {
486 	struct team *child, *last = NULL;
487 
488 	for (child = parent->children; child != NULL; child = child->siblings_next) {
489 		if (child == team) {
490 			if (last == NULL)
491 				parent->children = child->siblings_next;
492 			else
493 				last->siblings_next = child->siblings_next;
494 
495 			team->parent = NULL;
496 			break;
497 		}
498 		last = child;
499 	}
500 }
501 
502 
503 /*!	Reparent each of our children
504 	Note: must have team lock held
505 */
506 static void
507 reparent_children(struct team *team)
508 {
509 	struct team *child;
510 
511 	while ((child = team->children) != NULL) {
512 		// remove the child from the current proc and add to the parent
513 		remove_team_from_parent(team, child);
514 		insert_team_into_parent(sKernelTeam, child);
515 	}
516 
517 	// move job control entries too
518 	sKernelTeam->stopped_children->entries.MoveFrom(
519 		&team->stopped_children->entries);
520 	sKernelTeam->continued_children->entries.MoveFrom(
521 		&team->continued_children->entries);
522 
523 	// Note, we don't move the dead children entries. Those will be deleted
524 	// when the team structure is deleted.
525 }
526 
527 
528 static bool
529 is_session_leader(struct team *team)
530 {
531 	return team->session_id == team->id;
532 }
533 
534 
535 static bool
536 is_process_group_leader(struct team *team)
537 {
538 	return team->group_id == team->id;
539 }
540 
541 
542 static void
543 deferred_delete_process_group(struct process_group *group)
544 {
545 	if (group == NULL)
546 		return;
547 
548 	// remove_group_from_session() keeps this pointer around
549 	// only if the session can be freed as well
550 	if (group->session) {
551 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
552 			group->session->id));
553 		deferred_free(group->session);
554 	}
555 
556 	deferred_free(group);
557 }
558 
559 
560 /*!	Removes a group from a session, and puts the session object
561 	back into the session cache, if it's not used anymore.
562 	You must hold the team lock when calling this function.
563 */
564 static void
565 remove_group_from_session(struct process_group *group)
566 {
567 	struct process_session *session = group->session;
568 
569 	// the group must be in any session to let this function have any effect
570 	if (session == NULL)
571 		return;
572 
573 	hash_remove(sGroupHash, group);
574 
575 	// we cannot free the resource here, so we're keeping the group link
576 	// around - this way it'll be freed by free_process_group()
577 	if (--session->group_count > 0)
578 		group->session = NULL;
579 }
580 
581 
582 /*!	Team lock must be held.
583 */
584 static void
585 acquire_process_group_ref(pid_t groupID)
586 {
587 	process_group* group = team_get_process_group_locked(NULL, groupID);
588 	if (group == NULL) {
589 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
590 		return;
591 	}
592 
593 	group->refs++;
594 }
595 
596 
597 /*!	Team lock must be held.
598 */
599 static void
600 release_process_group_ref(pid_t groupID)
601 {
602 	process_group* group = team_get_process_group_locked(NULL, groupID);
603 	if (group == NULL) {
604 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
605 		return;
606 	}
607 
608 	if (group->refs <= 0) {
609 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
610 		return;
611 	}
612 
613 	if (--group->refs > 0)
614 		return;
615 
616 	// group is no longer used
617 
618 	remove_group_from_session(group);
619 	deferred_delete_process_group(group);
620 }
621 
622 
623 /*!	You must hold the team lock when calling this function. */
624 static void
625 insert_group_into_session(struct process_session *session, struct process_group *group)
626 {
627 	if (group == NULL)
628 		return;
629 
630 	group->session = session;
631 	hash_insert(sGroupHash, group);
632 	session->group_count++;
633 }
634 
635 
636 /*!	You must hold the team lock when calling this function. */
637 static void
638 insert_team_into_group(struct process_group *group, struct team *team)
639 {
640 	team->group = group;
641 	team->group_id = group->id;
642 	team->session_id = group->session->id;
643 
644 	team->group_next = group->teams;
645 	group->teams = team;
646 	acquire_process_group_ref(group->id);
647 }
648 
649 
650 /*!	Removes the team from the group.
651 
652 	\param team the team that'll be removed from it's group
653 */
654 static void
655 remove_team_from_group(struct team *team)
656 {
657 	struct process_group *group = team->group;
658 	struct team *current, *last = NULL;
659 
660 	// the team must be in any team to let this function have any effect
661 	if  (group == NULL)
662 		return;
663 
664 	for (current = group->teams; current != NULL; current = current->group_next) {
665 		if (current == team) {
666 			if (last == NULL)
667 				group->teams = current->group_next;
668 			else
669 				last->group_next = current->group_next;
670 
671 			team->group = NULL;
672 			break;
673 		}
674 		last = current;
675 	}
676 
677 	team->group = NULL;
678 	team->group_next = NULL;
679 
680 	release_process_group_ref(group->id);
681 }
682 
683 
684 static struct process_group *
685 create_process_group(pid_t id)
686 {
687 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
688 	if (group == NULL)
689 		return NULL;
690 
691 	group->id = id;
692 	group->refs = 0;
693 	group->session = NULL;
694 	group->teams = NULL;
695 	group->orphaned = true;
696 	return group;
697 }
698 
699 
700 static struct process_session *
701 create_process_session(pid_t id)
702 {
703 	struct process_session *session
704 		= (struct process_session *)malloc(sizeof(struct process_session));
705 	if (session == NULL)
706 		return NULL;
707 
708 	session->id = id;
709 	session->group_count = 0;
710 	session->controlling_tty = -1;
711 	session->foreground_group = -1;
712 
713 	return session;
714 }
715 
716 
717 static void
718 set_team_name(struct team* team, const char* name)
719 {
720 	if (const char* lastSlash = strrchr(name, '/'))
721 		name = lastSlash + 1;
722 
723 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
724 }
725 
726 
727 static struct team *
728 create_team_struct(const char *name, bool kernel)
729 {
730 	struct team *team = (struct team *)malloc(sizeof(struct team));
731 	if (team == NULL)
732 		return NULL;
733 	MemoryDeleter teamDeleter(team);
734 
735 	team->next = team->siblings_next = team->children = team->parent = NULL;
736 	team->id = allocate_thread_id();
737 	set_team_name(team, name);
738 	team->args[0] = '\0';
739 	team->num_threads = 0;
740 	team->io_context = NULL;
741 	team->address_space = NULL;
742 	team->realtime_sem_context = NULL;
743 	team->xsi_sem_context = NULL;
744 	team->thread_list = NULL;
745 	team->main_thread = NULL;
746 	team->loading_info = NULL;
747 	team->state = TEAM_STATE_BIRTH;
748 	team->flags = 0;
749 	team->death_sem = -1;
750 	team->user_data_area = -1;
751 	team->user_data = 0;
752 	team->used_user_data = 0;
753 	team->user_data_size = 0;
754 	team->free_user_threads = NULL;
755 
756 	team->supplementary_groups = NULL;
757 	team->supplementary_group_count = 0;
758 
759 	team->dead_threads_kernel_time = 0;
760 	team->dead_threads_user_time = 0;
761 
762 	// dead threads
763 	list_init(&team->dead_threads);
764 	team->dead_threads_count = 0;
765 
766 	// dead children
767 	team->dead_children = new(nothrow) team_dead_children;
768 	if (team->dead_children == NULL)
769 		return NULL;
770 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
771 
772 	team->dead_children->count = 0;
773 	team->dead_children->kernel_time = 0;
774 	team->dead_children->user_time = 0;
775 
776 	// stopped children
777 	team->stopped_children = new(nothrow) team_job_control_children;
778 	if (team->stopped_children == NULL)
779 		return NULL;
780 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
781 		team->stopped_children);
782 
783 	// continued children
784 	team->continued_children = new(nothrow) team_job_control_children;
785 	if (team->continued_children == NULL)
786 		return NULL;
787 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
788 		team->continued_children);
789 
790 	// job control entry
791 	team->job_control_entry = new(nothrow) job_control_entry;
792 	if (team->job_control_entry == NULL)
793 		return NULL;
794 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
795 		team->job_control_entry);
796 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
797 	team->job_control_entry->thread = team->id;
798 	team->job_control_entry->team = team;
799 
800 	list_init(&team->image_list);
801 	list_init(&team->watcher_list);
802 
803 	clear_team_debug_info(&team->debug_info, true);
804 
805 	if (arch_team_init_team_struct(team, kernel) < 0)
806 		return NULL;
807 
808 	// publish dead/stopped/continued children condition vars
809 	team->dead_children->condition_variable.Init(team->dead_children,
810 		"team children");
811 
812 	// keep all allocated structures
813 	jobControlEntryDeleter.Detach();
814 	continuedChildrenDeleter.Detach();
815 	stoppedChildrenDeleter.Detach();
816 	deadChildrenDeleter.Detach();
817 	teamDeleter.Detach();
818 
819 	return team;
820 }
821 
822 
823 static void
824 delete_team_struct(struct team *team)
825 {
826 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
827 			&team->dead_threads)) {
828 		free(threadDeathEntry);
829 	}
830 
831 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
832 		delete entry;
833 
834 	while (free_user_thread* entry = team->free_user_threads) {
835 		team->free_user_threads = entry->next;
836 		free(entry);
837 	}
838 
839 	malloc_referenced_release(team->supplementary_groups);
840 
841 	delete team->job_control_entry;
842 		// usually already NULL and transferred to the parent
843 	delete team->continued_children;
844 	delete team->stopped_children;
845 	delete team->dead_children;
846 	free(team);
847 }
848 
849 
850 static status_t
851 create_team_user_data(struct team* team)
852 {
853 	void* address = (void*)KERNEL_USER_DATA_BASE;
854 	size_t size = 4 * B_PAGE_SIZE;
855 	team->user_data_area = create_area_etc(team->id, "user area", &address,
856 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
857 	if (team->user_data_area < 0)
858 		return team->user_data_area;
859 
860 	team->user_data = (addr_t)address;
861 	team->used_user_data = 0;
862 	team->user_data_size = size;
863 	team->free_user_threads = NULL;
864 
865 	return B_OK;
866 }
867 
868 
869 static void
870 delete_team_user_data(struct team* team)
871 {
872 	if (team->user_data_area >= 0) {
873 		vm_delete_area(team->id, team->user_data_area, true);
874 		team->user_data = 0;
875 		team->used_user_data = 0;
876 		team->user_data_size = 0;
877 		team->user_data_area = -1;
878 		while (free_user_thread* entry = team->free_user_threads) {
879 			team->free_user_threads = entry->next;
880 			free(entry);
881 		}
882 	}
883 }
884 
885 
886 static status_t
887 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
888 	int32 argCount, int32 envCount, char**& _flatArgs)
889 {
890 	if (argCount < 0 || envCount < 0)
891 		return B_BAD_VALUE;
892 
893 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
894 		return B_TOO_MANY_ARGS;
895 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
896 		return B_BAD_VALUE;
897 
898 	if (!IS_USER_ADDRESS(userFlatArgs))
899 		return B_BAD_ADDRESS;
900 
901 	// allocate kernel memory
902 	char** flatArgs = (char**)malloc(flatArgsSize);
903 	if (flatArgs == NULL)
904 		return B_NO_MEMORY;
905 
906 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
907 		free(flatArgs);
908 		return B_BAD_ADDRESS;
909 	}
910 
911 	// check and relocate the array
912 	status_t error = B_OK;
913 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
914 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
915 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
916 		if (i == argCount || i == argCount + envCount + 1) {
917 			// check array null termination
918 			if (flatArgs[i] != NULL) {
919 				error = B_BAD_VALUE;
920 				break;
921 			}
922 		} else {
923 			// check string
924 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
925 			size_t maxLen = stringEnd - arg;
926 			if (arg < stringBase || arg >= stringEnd
927 					|| strnlen(arg, maxLen) == maxLen) {
928 				error = B_BAD_VALUE;
929 				break;
930 			}
931 
932 			flatArgs[i] = arg;
933 		}
934 	}
935 
936 	if (error == B_OK)
937 		_flatArgs = flatArgs;
938 	else
939 		free(flatArgs);
940 
941 	return error;
942 }
943 
944 
945 static void
946 free_team_arg(struct team_arg *teamArg)
947 {
948 	if (teamArg != NULL) {
949 		free(teamArg->flat_args);
950 		free(teamArg->path);
951 		free(teamArg);
952 	}
953 }
954 
955 
956 static status_t
957 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
958 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
959 	uint32 token)
960 {
961 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
962 	if (teamArg == NULL)
963 		return B_NO_MEMORY;
964 
965 	teamArg->path = strdup(path);
966 	if (teamArg->path == NULL) {
967 		free(teamArg);
968 		return B_NO_MEMORY;
969 	}
970 
971 	// copy the args over
972 
973 	teamArg->flat_args = flatArgs;
974 	teamArg->flat_args_size = flatArgsSize;
975 	teamArg->arg_count = argCount;
976 	teamArg->env_count = envCount;
977 	teamArg->error_port = port;
978 	teamArg->error_token = token;
979 
980 	*_teamArg = teamArg;
981 	return B_OK;
982 }
983 
984 
985 static int32
986 team_create_thread_start(void *args)
987 {
988 	status_t err;
989 	struct thread *t;
990 	struct team *team;
991 	struct team_arg *teamArgs = (struct team_arg*)args;
992 	const char *path;
993 	addr_t entry;
994 	char ustack_name[128];
995 	uint32 sizeLeft;
996 	char **userArgs;
997 	char **userEnv;
998 	struct user_space_program_args *programArgs;
999 	uint32 argCount, envCount, i;
1000 
1001 	t = thread_get_current_thread();
1002 	team = t->team;
1003 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1004 
1005 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1006 
1007 	// get a user thread for the main thread
1008 	t->user_thread = team_allocate_user_thread(team);
1009 
1010 	// create an initial primary stack area
1011 
1012 	// Main stack area layout is currently as follows (starting from 0):
1013 	//
1014 	// size								| usage
1015 	// ---------------------------------+--------------------------------
1016 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1017 	// TLS_SIZE							| TLS data
1018 	// sizeof(user_space_program_args)	| argument structure for the runtime
1019 	//									| loader
1020 	// flat arguments size				| flat process arguments and environment
1021 
1022 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1023 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1024 
1025 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1026 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1027 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1028 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1029 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1030 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1031 		// the exact location at the end of the user stack area
1032 
1033 	sprintf(ustack_name, "%s_main_stack", team->name);
1034 	t->user_stack_area = create_area_etc(team->id, ustack_name,
1035 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
1036 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
1037 	if (t->user_stack_area < 0) {
1038 		dprintf("team_create_thread_start: could not create default user stack "
1039 			"region: %s\n", strerror(t->user_stack_area));
1040 
1041 		free_team_arg(teamArgs);
1042 		return t->user_stack_area;
1043 	}
1044 
1045 	// now that the TLS area is allocated, initialize TLS
1046 	arch_thread_init_tls(t);
1047 
1048 	argCount = teamArgs->arg_count;
1049 	envCount = teamArgs->env_count;
1050 
1051 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1052 		+ t->user_stack_size + TLS_SIZE);
1053 
1054 	userArgs = (char**)(programArgs + 1);
1055 	userEnv = userArgs + argCount + 1;
1056 	path = teamArgs->path;
1057 
1058 	if (user_strlcpy(programArgs->program_path, path,
1059 				sizeof(programArgs->program_path)) < B_OK
1060 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1061 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1062 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1063 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1064 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1065 				sizeof(port_id)) < B_OK
1066 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1067 				sizeof(uint32)) < B_OK
1068 		|| user_memcpy(userArgs, teamArgs->flat_args,
1069 				teamArgs->flat_args_size) < B_OK) {
1070 		// the team deletion process will clean this mess
1071 		return B_BAD_ADDRESS;
1072 	}
1073 
1074 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1075 
1076 	// add args to info member
1077 	team->args[0] = 0;
1078 	strlcpy(team->args, path, sizeof(team->args));
1079 	for (i = 1; i < argCount; i++) {
1080 		strlcat(team->args, " ", sizeof(team->args));
1081 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1082 	}
1083 
1084 	free_team_arg(teamArgs);
1085 		// the arguments are already on the user stack, we no longer need
1086 		// them in this form
1087 
1088 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1089 	// automatic variables with function scope will never be destroyed.
1090 	{
1091 		// find runtime_loader path
1092 		KPath runtimeLoaderPath;
1093 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1094 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1095 		if (err < B_OK) {
1096 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1097 				strerror(err)));
1098 			return err;
1099 		}
1100 		runtimeLoaderPath.UnlockBuffer();
1101 		err = runtimeLoaderPath.Append("runtime_loader");
1102 
1103 		if (err == B_OK)
1104 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1105 	}
1106 
1107 	if (err < B_OK) {
1108 		// Luckily, we don't have to clean up the mess we created - that's
1109 		// done for us by the normal team deletion process
1110 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1111 			"%s\n", strerror(err)));
1112 		return err;
1113 	}
1114 
1115 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1116 
1117 	team->state = TEAM_STATE_NORMAL;
1118 
1119 	// jump to the entry point in user space
1120 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1121 		// only returns in case of error
1122 }
1123 
1124 
1125 static thread_id
1126 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1127 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1128 	port_id errorPort, uint32 errorToken)
1129 {
1130 	char** flatArgs = _flatArgs;
1131 	struct team *team;
1132 	const char *threadName;
1133 	thread_id thread;
1134 	status_t status;
1135 	cpu_status state;
1136 	struct team_arg *teamArgs;
1137 	struct team_loading_info loadingInfo;
1138 	io_context* parentIOContext = NULL;
1139 
1140 	if (flatArgs == NULL || argCount == 0)
1141 		return B_BAD_VALUE;
1142 
1143 	const char* path = flatArgs[0];
1144 
1145 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1146 		path, flatArgs, argCount));
1147 
1148 	team = create_team_struct(path, false);
1149 	if (team == NULL)
1150 		return B_NO_MEMORY;
1151 
1152 	if (flags & B_WAIT_TILL_LOADED) {
1153 		loadingInfo.thread = thread_get_current_thread();
1154 		loadingInfo.result = B_ERROR;
1155 		loadingInfo.done = false;
1156 		team->loading_info = &loadingInfo;
1157 	}
1158 
1159  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1160 
1161 	// get the parent team
1162 	struct team* parent;
1163 
1164 	if (parentID == B_CURRENT_TEAM)
1165 		parent = thread_get_current_thread()->team;
1166 	else
1167 		parent = team_get_team_struct_locked(parentID);
1168 
1169 	if (parent == NULL) {
1170 		teamLocker.Unlock();
1171 		status = B_BAD_TEAM_ID;
1172 		goto err0;
1173 	}
1174 
1175 	// inherit the parent's user/group
1176 	inherit_parent_user_and_group_locked(team, parent);
1177 
1178 	hash_insert(sTeamHash, team);
1179 	insert_team_into_parent(parent, team);
1180 	insert_team_into_group(parent->group, team);
1181 	sUsedTeams++;
1182 
1183 	// get a reference to the parent's I/O context -- we need it to create ours
1184 	parentIOContext = parent->io_context;
1185 	vfs_get_io_context(parentIOContext);
1186 
1187 	teamLocker.Unlock();
1188 
1189 	// check the executable's set-user/group-id permission
1190 	update_set_id_user_and_group(team, path);
1191 
1192 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1193 		envCount, errorPort, errorToken);
1194 
1195 	if (status != B_OK)
1196 		goto err1;
1197 
1198 	_flatArgs = NULL;
1199 		// args are owned by the team_arg structure now
1200 
1201 	// create a new io_context for this team
1202 	team->io_context = vfs_new_io_context(parentIOContext);
1203 	if (!team->io_context) {
1204 		status = B_NO_MEMORY;
1205 		goto err2;
1206 	}
1207 
1208 	// We don't need the parent's I/O context any longer.
1209 	vfs_put_io_context(parentIOContext);
1210 	parentIOContext = NULL;
1211 
1212 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1213 	vfs_exec_io_context(team->io_context);
1214 
1215 	// create an address space for this team
1216 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1217 		&team->address_space);
1218 	if (status < B_OK)
1219 		goto err3;
1220 
1221 	// cut the path from the main thread name
1222 	threadName = strrchr(path, '/');
1223 	if (threadName != NULL)
1224 		threadName++;
1225 	else
1226 		threadName = path;
1227 
1228 	// create the user data area
1229 	status = create_team_user_data(team);
1230 	if (status != B_OK)
1231 		goto err4;
1232 
1233 	// notify team listeners
1234 	sNotificationService.Notify(TEAM_ADDED, team);
1235 
1236 	// Create a kernel thread, but under the context of the new team
1237 	// The new thread will take over ownership of teamArgs
1238 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1239 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1240 	if (thread < 0) {
1241 		status = thread;
1242 		goto err5;
1243 	}
1244 
1245 	// wait for the loader of the new team to finish its work
1246 	if (flags & B_WAIT_TILL_LOADED) {
1247 		struct thread *mainThread;
1248 
1249 		state = disable_interrupts();
1250 		GRAB_THREAD_LOCK();
1251 
1252 		mainThread = thread_get_thread_struct_locked(thread);
1253 		if (mainThread) {
1254 			// resume the team's main thread
1255 			if (mainThread->state == B_THREAD_SUSPENDED)
1256 				scheduler_enqueue_in_run_queue(mainThread);
1257 
1258 			// Now suspend ourselves until loading is finished.
1259 			// We will be woken either by the thread, when it finished or
1260 			// aborted loading, or when the team is going to die (e.g. is
1261 			// killed). In either case the one setting `loadingInfo.done' is
1262 			// responsible for removing the info from the team structure.
1263 			while (!loadingInfo.done) {
1264 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1265 				scheduler_reschedule();
1266 			}
1267 		} else {
1268 			// Impressive! Someone managed to kill the thread in this short
1269 			// time.
1270 		}
1271 
1272 		RELEASE_THREAD_LOCK();
1273 		restore_interrupts(state);
1274 
1275 		if (loadingInfo.result < B_OK)
1276 			return loadingInfo.result;
1277 	}
1278 
1279 	// notify the debugger
1280 	user_debug_team_created(team->id);
1281 
1282 	return thread;
1283 
1284 err5:
1285 	sNotificationService.Notify(TEAM_REMOVED, team);
1286 	delete_team_user_data(team);
1287 err4:
1288 	vm_put_address_space(team->address_space);
1289 err3:
1290 	vfs_put_io_context(team->io_context);
1291 err2:
1292 	free_team_arg(teamArgs);
1293 err1:
1294 	if (parentIOContext != NULL)
1295 		vfs_put_io_context(parentIOContext);
1296 
1297 	// remove the team structure from the team hash table and delete the team structure
1298 	state = disable_interrupts();
1299 	GRAB_TEAM_LOCK();
1300 
1301 	remove_team_from_group(team);
1302 	remove_team_from_parent(team->parent, team);
1303 	hash_remove(sTeamHash, team);
1304 
1305 	RELEASE_TEAM_LOCK();
1306 	restore_interrupts(state);
1307 
1308 err0:
1309 	delete_team_struct(team);
1310 
1311 	return status;
1312 }
1313 
1314 
1315 /*!	Almost shuts down the current team and loads a new image into it.
1316 	If successful, this function does not return and will takeover ownership of
1317 	the arguments provided.
1318 	This function may only be called from user space.
1319 */
1320 static status_t
1321 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1322 	int32 argCount, int32 envCount)
1323 {
1324 	// NOTE: Since this function normally doesn't return, don't use automatic
1325 	// variables that need destruction in the function scope.
1326 	char** flatArgs = _flatArgs;
1327 	struct team *team = thread_get_current_thread()->team;
1328 	struct team_arg *teamArgs;
1329 	const char *threadName;
1330 	status_t status = B_OK;
1331 	cpu_status state;
1332 	struct thread *thread;
1333 	thread_id nubThreadID = -1;
1334 
1335 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1336 		path, argCount, envCount, team->id));
1337 
1338 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1339 
1340 	// switching the kernel at run time is probably not a good idea :)
1341 	if (team == team_get_kernel_team())
1342 		return B_NOT_ALLOWED;
1343 
1344 	// we currently need to be single threaded here
1345 	// ToDo: maybe we should just kill all other threads and
1346 	//	make the current thread the team's main thread?
1347 	if (team->main_thread != thread_get_current_thread())
1348 		return B_NOT_ALLOWED;
1349 
1350 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1351 	// We iterate through the thread list to make sure that there's no other
1352 	// thread.
1353 	state = disable_interrupts();
1354 	GRAB_TEAM_LOCK();
1355 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1356 
1357 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1358 		nubThreadID = team->debug_info.nub_thread;
1359 
1360 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1361 
1362 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1363 		if (thread != team->main_thread && thread->id != nubThreadID) {
1364 			status = B_NOT_ALLOWED;
1365 			break;
1366 		}
1367 	}
1368 
1369 	RELEASE_TEAM_LOCK();
1370 	restore_interrupts(state);
1371 
1372 	if (status != B_OK)
1373 		return status;
1374 
1375 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1376 		envCount, -1, 0);
1377 
1378 	if (status != B_OK)
1379 		return status;
1380 
1381 	_flatArgs = NULL;
1382 		// args are owned by the team_arg structure now
1383 
1384 	// ToDo: remove team resources if there are any left
1385 	// thread_atkernel_exit() might not be called at all
1386 
1387 	thread_reset_for_exec();
1388 
1389 	user_debug_prepare_for_exec();
1390 
1391 	delete_team_user_data(team);
1392 	vm_delete_areas(team->address_space);
1393 	xsi_sem_undo(team);
1394 	delete_owned_ports(team->id);
1395 	sem_delete_owned_sems(team->id);
1396 	remove_images(team);
1397 	vfs_exec_io_context(team->io_context);
1398 	delete_realtime_sem_context(team->realtime_sem_context);
1399 	team->realtime_sem_context = NULL;
1400 
1401 	status = create_team_user_data(team);
1402 	if (status != B_OK) {
1403 		// creating the user data failed -- we're toast
1404 		// TODO: We should better keep the old user area in the first place.
1405 		exit_thread(status);
1406 		return status;
1407 	}
1408 
1409 	user_debug_finish_after_exec();
1410 
1411 	// rename the team
1412 
1413 	set_team_name(team, path);
1414 
1415 	// cut the path from the team name and rename the main thread, too
1416 	threadName = strrchr(path, '/');
1417 	if (threadName != NULL)
1418 		threadName++;
1419 	else
1420 		threadName = path;
1421 	rename_thread(thread_get_current_thread_id(), threadName);
1422 
1423 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1424 
1425 	// Update user/group according to the executable's set-user/group-id
1426 	// permission.
1427 	update_set_id_user_and_group(team, path);
1428 
1429 	user_debug_team_exec();
1430 
1431 	// notify team listeners
1432 	sNotificationService.Notify(TEAM_EXEC, team);
1433 
1434 	status = team_create_thread_start(teamArgs);
1435 		// this one usually doesn't return...
1436 
1437 	// sorry, we have to kill us, there is no way out anymore
1438 	// (without any areas left and all that)
1439 	exit_thread(status);
1440 
1441 	// we return a status here since the signal that is sent by the
1442 	// call above is not immediately handled
1443 	return B_ERROR;
1444 }
1445 
1446 
1447 /*! This is the first function to be called from the newly created
1448 	main child thread.
1449 	It will fill in everything what's left to do from fork_arg, and
1450 	return from the parent's fork() syscall to the child.
1451 */
1452 static int32
1453 fork_team_thread_start(void *_args)
1454 {
1455 	struct thread *thread = thread_get_current_thread();
1456 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1457 
1458 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1459 		// we need a local copy of the arch dependent part
1460 
1461 	thread->user_stack_area = forkArgs->user_stack_area;
1462 	thread->user_stack_base = forkArgs->user_stack_base;
1463 	thread->user_stack_size = forkArgs->user_stack_size;
1464 	thread->user_local_storage = forkArgs->user_local_storage;
1465 	thread->sig_block_mask = forkArgs->sig_block_mask;
1466 	thread->user_thread = forkArgs->user_thread;
1467 	memcpy(thread->sig_action, forkArgs->sig_action,
1468 		sizeof(forkArgs->sig_action));
1469 	thread->signal_stack_base = forkArgs->signal_stack_base;
1470 	thread->signal_stack_size = forkArgs->signal_stack_size;
1471 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1472 
1473 	arch_thread_init_tls(thread);
1474 
1475 	free(forkArgs);
1476 
1477 	// set frame of the parent thread to this one, too
1478 
1479 	arch_restore_fork_frame(&archArgs);
1480 		// This one won't return here
1481 
1482 	return 0;
1483 }
1484 
1485 
1486 static thread_id
1487 fork_team(void)
1488 {
1489 	struct thread *parentThread = thread_get_current_thread();
1490 	struct team *parentTeam = parentThread->team, *team;
1491 	struct fork_arg *forkArgs;
1492 	struct area_info info;
1493 	thread_id threadID;
1494 	status_t status;
1495 	int32 cookie;
1496 
1497 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1498 
1499 	if (parentTeam == team_get_kernel_team())
1500 		return B_NOT_ALLOWED;
1501 
1502 	// create a new team
1503 	// TODO: this is very similar to load_image_internal() - maybe we can do
1504 	// something about it :)
1505 
1506 	team = create_team_struct(parentTeam->name, false);
1507 	if (team == NULL)
1508 		return B_NO_MEMORY;
1509 
1510 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1511 
1512 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1513 
1514 	// Inherit the parent's user/group.
1515 	inherit_parent_user_and_group_locked(team, parentTeam);
1516 
1517 	hash_insert(sTeamHash, team);
1518 	insert_team_into_parent(parentTeam, team);
1519 	insert_team_into_group(parentTeam->group, team);
1520 	sUsedTeams++;
1521 
1522 	teamLocker.Unlock();
1523 
1524 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1525 	if (forkArgs == NULL) {
1526 		status = B_NO_MEMORY;
1527 		goto err1;
1528 	}
1529 
1530 	// create a new io_context for this team
1531 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1532 	if (!team->io_context) {
1533 		status = B_NO_MEMORY;
1534 		goto err2;
1535 	}
1536 
1537 	// duplicate the realtime sem context
1538 	if (parentTeam->realtime_sem_context) {
1539 		team->realtime_sem_context = clone_realtime_sem_context(
1540 			parentTeam->realtime_sem_context);
1541 		if (team->realtime_sem_context == NULL) {
1542 			status = B_NO_MEMORY;
1543 			goto err25;
1544 		}
1545 	}
1546 
1547 	// create an address space for this team
1548 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1549 		&team->address_space);
1550 	if (status < B_OK)
1551 		goto err3;
1552 
1553 	// copy all areas of the team
1554 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1555 	// ToDo: all stacks of other threads than the current one could be left out
1556 
1557 	forkArgs->user_thread = NULL;
1558 
1559 	cookie = 0;
1560 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1561 		if (info.area == parentTeam->user_data_area) {
1562 			// don't clone the user area; just create a new one
1563 			status = create_team_user_data(team);
1564 			if (status != B_OK)
1565 				break;
1566 
1567 			forkArgs->user_thread = team_allocate_user_thread(team);
1568 		} else {
1569 			void *address;
1570 			area_id area = vm_copy_area(team->address_space->id, info.name,
1571 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1572 			if (area < B_OK) {
1573 				status = area;
1574 				break;
1575 			}
1576 
1577 			if (info.area == parentThread->user_stack_area)
1578 				forkArgs->user_stack_area = area;
1579 		}
1580 	}
1581 
1582 	if (status < B_OK)
1583 		goto err4;
1584 
1585 	if (forkArgs->user_thread == NULL) {
1586 #if KDEBUG
1587 		panic("user data area not found, parent area is %ld",
1588 			parentTeam->user_data_area);
1589 #endif
1590 		status = B_ERROR;
1591 		goto err4;
1592 	}
1593 
1594 	forkArgs->user_stack_base = parentThread->user_stack_base;
1595 	forkArgs->user_stack_size = parentThread->user_stack_size;
1596 	forkArgs->user_local_storage = parentThread->user_local_storage;
1597 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1598 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1599 		sizeof(forkArgs->sig_action));
1600 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1601 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1602 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1603 
1604 	arch_store_fork_frame(&forkArgs->arch_info);
1605 
1606 	// copy image list
1607 	image_info imageInfo;
1608 	cookie = 0;
1609 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1610 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1611 		if (image < 0)
1612 			goto err5;
1613 	}
1614 
1615 	// notify team listeners
1616 	sNotificationService.Notify(TEAM_ADDED, team);
1617 
1618 	// create a kernel thread under the context of the new team
1619 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1620 		parentThread->name, parentThread->priority, forkArgs,
1621 		team->id, team->id);
1622 	if (threadID < 0) {
1623 		status = threadID;
1624 		goto err5;
1625 	}
1626 
1627 	// notify the debugger
1628 	user_debug_team_created(team->id);
1629 
1630 	T(TeamForked(threadID));
1631 
1632 	resume_thread(threadID);
1633 	return threadID;
1634 
1635 err5:
1636 	sNotificationService.Notify(TEAM_REMOVED, team);
1637 	remove_images(team);
1638 err4:
1639 	vm_delete_address_space(team->address_space);
1640 err3:
1641 	delete_realtime_sem_context(team->realtime_sem_context);
1642 err25:
1643 	vfs_put_io_context(team->io_context);
1644 err2:
1645 	free(forkArgs);
1646 err1:
1647 	// remove the team structure from the team hash table and delete the team structure
1648 	teamLocker.Lock();
1649 
1650 	remove_team_from_group(team);
1651 	remove_team_from_parent(parentTeam, team);
1652 	hash_remove(sTeamHash, team);
1653 
1654 	teamLocker.Unlock();
1655 
1656 	delete_team_struct(team);
1657 
1658 	return status;
1659 }
1660 
1661 
1662 /*!	Returns if the specified \a team has any children belonging to the
1663 	specified \a group.
1664 	Must be called with the team lock held.
1665 */
1666 static bool
1667 has_children_in_group(struct team *parent, pid_t groupID)
1668 {
1669 	struct team *team;
1670 
1671 	struct process_group *group = team_get_process_group_locked(
1672 		parent->group->session, groupID);
1673 	if (group == NULL)
1674 		return false;
1675 
1676 	for (team = group->teams; team; team = team->group_next) {
1677 		if (team->parent == parent)
1678 			return true;
1679 	}
1680 
1681 	return false;
1682 }
1683 
1684 
1685 static job_control_entry*
1686 get_job_control_entry(team_job_control_children* children, pid_t id)
1687 {
1688 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1689 		 job_control_entry* entry = it.Next();) {
1690 
1691 		if (id > 0) {
1692 			if (entry->thread == id)
1693 				return entry;
1694 		} else if (id == -1) {
1695 			return entry;
1696 		} else {
1697 			pid_t processGroup
1698 				= (entry->team ? entry->team->group_id : entry->group_id);
1699 			if (processGroup == -id)
1700 				return entry;
1701 		}
1702 	}
1703 
1704 	return NULL;
1705 }
1706 
1707 
1708 static job_control_entry*
1709 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1710 {
1711 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1712 
1713 	if (entry == NULL && (flags & WCONTINUED) != 0)
1714 		entry = get_job_control_entry(team->continued_children, id);
1715 
1716 	if (entry == NULL && (flags & WUNTRACED) != 0)
1717 		entry = get_job_control_entry(team->stopped_children, id);
1718 
1719 	return entry;
1720 }
1721 
1722 
1723 job_control_entry::job_control_entry()
1724 	:
1725 	has_group_ref(false)
1726 {
1727 }
1728 
1729 
1730 job_control_entry::~job_control_entry()
1731 {
1732 	if (has_group_ref) {
1733 		InterruptsSpinLocker locker(gTeamSpinlock);
1734 		release_process_group_ref(group_id);
1735 	}
1736 }
1737 
1738 
1739 /*!	Team and thread lock must be held.
1740 */
1741 void
1742 job_control_entry::InitDeadState()
1743 {
1744 	if (team != NULL) {
1745 		struct thread* thread = team->main_thread;
1746 		group_id = team->group_id;
1747 		this->thread = thread->id;
1748 		status = thread->exit.status;
1749 		reason = thread->exit.reason;
1750 		signal = thread->exit.signal;
1751 		team = NULL;
1752 		acquire_process_group_ref(group_id);
1753 		has_group_ref = true;
1754 	}
1755 }
1756 
1757 
1758 job_control_entry&
1759 job_control_entry::operator=(const job_control_entry& other)
1760 {
1761 	state = other.state;
1762 	thread = other.thread;
1763 	has_group_ref = false;
1764 	team = other.team;
1765 	group_id = other.group_id;
1766 	status = other.status;
1767 	reason = other.reason;
1768 	signal = other.signal;
1769 
1770 	return *this;
1771 }
1772 
1773 
1774 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1775 	comes to the reason why a thread has died than waitpid() can be.
1776 */
1777 static thread_id
1778 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1779 	status_t *_returnCode)
1780 {
1781 	struct thread* thread = thread_get_current_thread();
1782 	struct team* team = thread->team;
1783 	struct job_control_entry foundEntry;
1784 	struct job_control_entry* freeDeathEntry = NULL;
1785 	status_t status = B_OK;
1786 
1787 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1788 
1789 	T(WaitForChild(child, flags));
1790 
1791 	if (child == 0) {
1792 		// wait for all children in the process group of the calling team
1793 		child = -team->group_id;
1794 	}
1795 
1796 	bool ignoreFoundEntries = false;
1797 	bool ignoreFoundEntriesChecked = false;
1798 
1799 	while (true) {
1800 		InterruptsSpinLocker locker(gTeamSpinlock);
1801 
1802 		// check whether any condition holds
1803 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1804 
1805 		// If we don't have an entry yet, check whether there are any children
1806 		// complying to the process group specification at all.
1807 		if (entry == NULL) {
1808 			// No success yet -- check whether there are any children we could
1809 			// wait for.
1810 			bool childrenExist = false;
1811 			if (child == -1) {
1812 				childrenExist = team->children != NULL;
1813 			} else if (child < -1) {
1814 				childrenExist = has_children_in_group(team, -child);
1815 			} else {
1816 				if (struct team* childTeam = team_get_team_struct_locked(child))
1817 					childrenExist = childTeam->parent == team;
1818 			}
1819 
1820 			if (!childrenExist) {
1821 				// there is no child we could wait for
1822 				status = ECHILD;
1823 			} else {
1824 				// the children we're waiting for are still running
1825 				status = B_WOULD_BLOCK;
1826 			}
1827 		} else {
1828 			// got something
1829 			foundEntry = *entry;
1830 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1831 				// The child is dead. Reap its death entry.
1832 				freeDeathEntry = entry;
1833 				team->dead_children->entries.Remove(entry);
1834 				team->dead_children->count--;
1835 			} else {
1836 				// The child is well. Reset its job control state.
1837 				team_set_job_control_state(entry->team,
1838 					JOB_CONTROL_STATE_NONE, 0, false);
1839 			}
1840 		}
1841 
1842 		// If we haven't got anything yet, prepare for waiting for the
1843 		// condition variable.
1844 		ConditionVariableEntry deadWaitEntry;
1845 
1846 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1847 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1848 
1849 		locker.Unlock();
1850 
1851 		// we got our entry and can return to our caller
1852 		if (status == B_OK) {
1853 			if (ignoreFoundEntries) {
1854 				// ... unless we shall ignore found entries
1855 				delete freeDeathEntry;
1856 				freeDeathEntry = NULL;
1857 				continue;
1858 			}
1859 
1860 			break;
1861 		}
1862 
1863 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1864 			T(WaitForChildDone(status));
1865 			return status;
1866 		}
1867 
1868 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1869 		if (status == B_INTERRUPTED) {
1870 			T(WaitForChildDone(status));
1871 			return status;
1872 		}
1873 
1874 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1875 		// all our children are dead and fail with ECHILD. We check the
1876 		// condition at this point.
1877 		if (!ignoreFoundEntriesChecked) {
1878 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1879 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1880 				|| handler.sa_handler == SIG_IGN) {
1881 				ignoreFoundEntries = true;
1882 			}
1883 
1884 			ignoreFoundEntriesChecked = true;
1885 		}
1886 	}
1887 
1888 	delete freeDeathEntry;
1889 
1890 	// when we got here, we have a valid death entry, and
1891 	// already got unregistered from the team or group
1892 	int reason = 0;
1893 	switch (foundEntry.state) {
1894 		case JOB_CONTROL_STATE_DEAD:
1895 			reason = foundEntry.reason;
1896 			break;
1897 		case JOB_CONTROL_STATE_STOPPED:
1898 			reason = THREAD_STOPPED;
1899 			break;
1900 		case JOB_CONTROL_STATE_CONTINUED:
1901 			reason = THREAD_CONTINUED;
1902 			break;
1903 		case JOB_CONTROL_STATE_NONE:
1904 			// can't happen
1905 			break;
1906 	}
1907 
1908 	*_returnCode = foundEntry.status;
1909 	*_reason = (foundEntry.signal << 16) | reason;
1910 
1911 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1912 	// status is available.
1913 	if (is_signal_blocked(SIGCHLD)) {
1914 		InterruptsSpinLocker locker(gTeamSpinlock);
1915 
1916 		if (get_job_control_entry(team, child, flags) == NULL)
1917 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1918 	}
1919 
1920 	// When the team is dead, the main thread continues to live in the kernel
1921 	// team for a very short time. To avoid surprises for the caller we rather
1922 	// wait until the thread is really gone.
1923 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1924 		wait_for_thread(foundEntry.thread, NULL);
1925 
1926 	T(WaitForChildDone(foundEntry));
1927 
1928 	return foundEntry.thread;
1929 }
1930 
1931 
1932 /*! Fills the team_info structure with information from the specified
1933 	team.
1934 	The team lock must be held when called.
1935 */
1936 static status_t
1937 fill_team_info(struct team *team, team_info *info, size_t size)
1938 {
1939 	if (size != sizeof(team_info))
1940 		return B_BAD_VALUE;
1941 
1942 	// ToDo: Set more informations for team_info
1943 	memset(info, 0, size);
1944 
1945 	info->team = team->id;
1946 	info->thread_count = team->num_threads;
1947 	info->image_count = count_images(team);
1948 	//info->area_count =
1949 	info->debugger_nub_thread = team->debug_info.nub_thread;
1950 	info->debugger_nub_port = team->debug_info.nub_port;
1951 	//info->uid =
1952 	//info->gid =
1953 
1954 	strlcpy(info->args, team->args, sizeof(info->args));
1955 	info->argc = 1;
1956 
1957 	return B_OK;
1958 }
1959 
1960 
1961 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1962 	Interrupts must be disabled and team lock be held.
1963 */
1964 static bool
1965 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1966 {
1967 	// Orphaned Process Group: "A process group in which the parent of every
1968 	// member is either itself a member of the group or is not a member of the
1969 	// group's session." (Open Group Base Specs Issue 6)
1970 
1971 	// once orphaned, things won't change (exception: cf. setpgid())
1972 	if (group->orphaned)
1973 		return true;
1974 
1975 	struct team* team = group->teams;
1976 	while (team != NULL) {
1977 		struct team* parent = team->parent;
1978 		if (team->id != dyingProcess && parent != NULL
1979 			&& parent->id != dyingProcess
1980 			&& parent->group_id != group->id
1981 			&& parent->session_id == group->session->id) {
1982 			return false;
1983 		}
1984 
1985 		team = team->group_next;
1986 	}
1987 
1988 	group->orphaned = true;
1989 	return true;
1990 }
1991 
1992 
1993 /*!	Returns whether the process group contains stopped processes.
1994 	Interrupts must be disabled and team lock be held.
1995 */
1996 static bool
1997 process_group_has_stopped_processes(process_group* group)
1998 {
1999 	SpinLocker _(gThreadSpinlock);
2000 
2001 	struct team* team = group->teams;
2002 	while (team != NULL) {
2003 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2004 			return true;
2005 
2006 		team = team->group_next;
2007 	}
2008 
2009 	return false;
2010 }
2011 
2012 
2013 //	#pragma mark - Private kernel API
2014 
2015 
2016 status_t
2017 team_init(kernel_args *args)
2018 {
2019 	struct process_session *session;
2020 	struct process_group *group;
2021 
2022 	// create the team hash table
2023 	sTeamHash = hash_init(16, offsetof(struct team, next),
2024 		&team_struct_compare, &team_struct_hash);
2025 
2026 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2027 		&process_group_compare, &process_group_hash);
2028 
2029 	// create initial session and process groups
2030 
2031 	session = create_process_session(1);
2032 	if (session == NULL)
2033 		panic("Could not create initial session.\n");
2034 
2035 	group = create_process_group(1);
2036 	if (group == NULL)
2037 		panic("Could not create initial process group.\n");
2038 
2039 	insert_group_into_session(session, group);
2040 
2041 	// create the kernel team
2042 	sKernelTeam = create_team_struct("kernel_team", true);
2043 	if (sKernelTeam == NULL)
2044 		panic("could not create kernel team!\n");
2045 	strcpy(sKernelTeam->args, sKernelTeam->name);
2046 	sKernelTeam->state = TEAM_STATE_NORMAL;
2047 
2048 	sKernelTeam->saved_set_uid = 0;
2049 	sKernelTeam->real_uid = 0;
2050 	sKernelTeam->effective_uid = 0;
2051 	sKernelTeam->saved_set_gid = 0;
2052 	sKernelTeam->real_gid = 0;
2053 	sKernelTeam->effective_gid = 0;
2054 	sKernelTeam->supplementary_groups = NULL;
2055 	sKernelTeam->supplementary_group_count = 0;
2056 
2057 	insert_team_into_group(group, sKernelTeam);
2058 
2059 	sKernelTeam->io_context = vfs_new_io_context(NULL);
2060 	if (sKernelTeam->io_context == NULL)
2061 		panic("could not create io_context for kernel team!\n");
2062 
2063 	// stick it in the team hash
2064 	hash_insert(sTeamHash, sKernelTeam);
2065 
2066 	add_debugger_command_etc("team", &dump_team_info,
2067 		"Dump info about a particular team",
2068 		"[ <id> | <address> | <name> ]\n"
2069 		"Prints information about the specified team. If no argument is given\n"
2070 		"the current team is selected.\n"
2071 		"  <id>       - The ID of the team.\n"
2072 		"  <address>  - The address of the team structure.\n"
2073 		"  <name>     - The team's name.\n", 0);
2074 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2075 		"\n"
2076 		"Prints a list of all existing teams.\n", 0);
2077 
2078 	new(&sNotificationService) TeamNotificationService();
2079 
2080 	return B_OK;
2081 }
2082 
2083 
2084 int32
2085 team_max_teams(void)
2086 {
2087 	return sMaxTeams;
2088 }
2089 
2090 
2091 int32
2092 team_used_teams(void)
2093 {
2094 	return sUsedTeams;
2095 }
2096 
2097 
2098 /*!	Iterates through the list of teams. The team spinlock must be held.
2099  */
2100 struct team*
2101 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2102 {
2103 	struct hash_iterator iterator;
2104 	hash_open(sTeamHash, &iterator);
2105 
2106 	struct team* team;
2107 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2108 		if (callback(team, cookie))
2109 			break;
2110 	}
2111 
2112 	hash_close(sTeamHash, &iterator, false);
2113 
2114 	return team;
2115 }
2116 
2117 
2118 /*! Fills the provided death entry if it's in the team.
2119 	You need to have the team lock held when calling this function.
2120 */
2121 job_control_entry*
2122 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2123 {
2124 	if (child <= 0)
2125 		return NULL;
2126 
2127 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2128 		child);
2129 	if (entry) {
2130 		// remove the entry only, if the caller is the parent of the found team
2131 		if (team_get_current_team_id() == entry->thread) {
2132 			team->dead_children->entries.Remove(entry);
2133 			team->dead_children->count--;
2134 			*_deleteEntry = true;
2135 		} else {
2136 			*_deleteEntry = false;
2137 		}
2138 	}
2139 
2140 	return entry;
2141 }
2142 
2143 
2144 /*! Quick check to see if we have a valid team ID. */
2145 bool
2146 team_is_valid(team_id id)
2147 {
2148 	struct team *team;
2149 	cpu_status state;
2150 
2151 	if (id <= 0)
2152 		return false;
2153 
2154 	state = disable_interrupts();
2155 	GRAB_TEAM_LOCK();
2156 
2157 	team = team_get_team_struct_locked(id);
2158 
2159 	RELEASE_TEAM_LOCK();
2160 	restore_interrupts(state);
2161 
2162 	return team != NULL;
2163 }
2164 
2165 
2166 struct team *
2167 team_get_team_struct_locked(team_id id)
2168 {
2169 	struct team_key key;
2170 	key.id = id;
2171 
2172 	return (struct team*)hash_lookup(sTeamHash, &key);
2173 }
2174 
2175 
2176 /*! This searches the session of the team for the specified group ID.
2177 	You must hold the team lock when you call this function.
2178 */
2179 struct process_group *
2180 team_get_process_group_locked(struct process_session *session, pid_t id)
2181 {
2182 	struct process_group *group;
2183 	struct team_key key;
2184 	key.id = id;
2185 
2186 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2187 	if (group != NULL && (session == NULL || session == group->session))
2188 		return group;
2189 
2190 	return NULL;
2191 }
2192 
2193 
2194 void
2195 team_delete_process_group(struct process_group *group)
2196 {
2197 	if (group == NULL)
2198 		return;
2199 
2200 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2201 
2202 	// remove_group_from_session() keeps this pointer around
2203 	// only if the session can be freed as well
2204 	if (group->session) {
2205 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2206 		free(group->session);
2207 	}
2208 
2209 	free(group);
2210 }
2211 
2212 
2213 void
2214 team_set_controlling_tty(int32 ttyIndex)
2215 {
2216 	struct team* team = thread_get_current_thread()->team;
2217 
2218 	InterruptsSpinLocker _(gTeamSpinlock);
2219 
2220 	team->group->session->controlling_tty = ttyIndex;
2221 	team->group->session->foreground_group = -1;
2222 }
2223 
2224 
2225 int32
2226 team_get_controlling_tty()
2227 {
2228 	struct team* team = thread_get_current_thread()->team;
2229 
2230 	InterruptsSpinLocker _(gTeamSpinlock);
2231 
2232 	return team->group->session->controlling_tty;
2233 }
2234 
2235 
2236 status_t
2237 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2238 {
2239 	struct thread* thread = thread_get_current_thread();
2240 	struct team* team = thread->team;
2241 
2242 	InterruptsSpinLocker locker(gTeamSpinlock);
2243 
2244 	process_session* session = team->group->session;
2245 
2246 	// must be the controlling tty of the calling process
2247 	if (session->controlling_tty != ttyIndex)
2248 		return ENOTTY;
2249 
2250 	// check process group -- must belong to our session
2251 	process_group* group = team_get_process_group_locked(session,
2252 		processGroupID);
2253 	if (group == NULL)
2254 		return B_BAD_VALUE;
2255 
2256 	// If we are a background group, we can't do that unharmed, only if we
2257 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2258 	if (session->foreground_group != -1
2259 		&& session->foreground_group != team->group_id
2260 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2261 		&& !is_signal_blocked(SIGTTOU)) {
2262 		pid_t groupID = team->group->id;
2263 		locker.Unlock();
2264 		send_signal(-groupID, SIGTTOU);
2265 		return B_INTERRUPTED;
2266 	}
2267 
2268 	team->group->session->foreground_group = processGroupID;
2269 
2270 	return B_OK;
2271 }
2272 
2273 
2274 /*!	Removes the specified team from the global team hash, and from its parent.
2275 	It also moves all of its children up to the parent.
2276 	You must hold the team lock when you call this function.
2277 */
2278 void
2279 team_remove_team(struct team *team)
2280 {
2281 	struct team *parent = team->parent;
2282 
2283 	// remember how long this team lasted
2284 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2285 		+ team->dead_children->kernel_time;
2286 	parent->dead_children->user_time += team->dead_threads_user_time
2287 		+ team->dead_children->user_time;
2288 
2289 	// Also grab the thread spinlock while removing the team from the hash.
2290 	// This makes the following sequence safe: grab teams lock, lookup team,
2291 	// grab threads lock, unlock teams lock,
2292 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2293 	// lock another team's IO context.
2294 	GRAB_THREAD_LOCK();
2295 	hash_remove(sTeamHash, team);
2296 	RELEASE_THREAD_LOCK();
2297 	sUsedTeams--;
2298 
2299 	team->state = TEAM_STATE_DEATH;
2300 
2301 	// If we're a controlling process (i.e. a session leader with controlling
2302 	// terminal), there's a bit of signalling we have to do.
2303 	if (team->session_id == team->id
2304 		&& team->group->session->controlling_tty >= 0) {
2305 		process_session* session = team->group->session;
2306 
2307 		session->controlling_tty = -1;
2308 
2309 		// send SIGHUP to the foreground
2310 		if (session->foreground_group >= 0) {
2311 			send_signal_etc(-session->foreground_group, SIGHUP,
2312 				SIGNAL_FLAG_TEAMS_LOCKED);
2313 		}
2314 
2315 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2316 		// stopped processes
2317 		struct team* child = team->children;
2318 		while (child != NULL) {
2319 			process_group* childGroup = child->group;
2320 			if (!childGroup->orphaned
2321 				&& update_orphaned_process_group(childGroup, team->id)
2322 				&& process_group_has_stopped_processes(childGroup)) {
2323 				send_signal_etc(-childGroup->id, SIGHUP,
2324 					SIGNAL_FLAG_TEAMS_LOCKED);
2325 				send_signal_etc(-childGroup->id, SIGCONT,
2326 					SIGNAL_FLAG_TEAMS_LOCKED);
2327 			}
2328 
2329 			child = child->siblings_next;
2330 		}
2331 	} else {
2332 		// update "orphaned" flags of all children's process groups
2333 		struct team* child = team->children;
2334 		while (child != NULL) {
2335 			process_group* childGroup = child->group;
2336 			if (!childGroup->orphaned)
2337 				update_orphaned_process_group(childGroup, team->id);
2338 
2339 			child = child->siblings_next;
2340 		}
2341 
2342 		// update "orphaned" flag of this team's process group
2343 		update_orphaned_process_group(team->group, team->id);
2344 	}
2345 
2346 	// reparent each of the team's children
2347 	reparent_children(team);
2348 
2349 	// remove us from our process group
2350 	remove_team_from_group(team);
2351 
2352 	// remove us from our parent
2353 	remove_team_from_parent(parent, team);
2354 }
2355 
2356 
2357 void
2358 team_delete_team(struct team *team)
2359 {
2360 	team_id teamID = team->id;
2361 	port_id debuggerPort = -1;
2362 	cpu_status state;
2363 
2364 	if (team->num_threads > 0) {
2365 		// there are other threads still in this team,
2366 		// cycle through and signal kill on each of the threads
2367 		// ToDo: this can be optimized. There's got to be a better solution.
2368 		struct thread *temp_thread;
2369 		char death_sem_name[B_OS_NAME_LENGTH];
2370 		sem_id deathSem;
2371 		int32 threadCount;
2372 
2373 		sprintf(death_sem_name, "team %ld death sem", teamID);
2374 		deathSem = create_sem(0, death_sem_name);
2375 		if (deathSem < 0)
2376 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2377 
2378 		state = disable_interrupts();
2379 		GRAB_TEAM_LOCK();
2380 
2381 		team->death_sem = deathSem;
2382 		threadCount = team->num_threads;
2383 
2384 		// If the team was being debugged, that will stop with the termination
2385 		// of the nub thread. The team structure has already been removed from
2386 		// the team hash table at this point, so noone can install a debugger
2387 		// anymore. We fetch the debugger's port to send it a message at the
2388 		// bitter end.
2389 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2390 
2391 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2392 			debuggerPort = team->debug_info.debugger_port;
2393 
2394 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2395 
2396 		// we can safely walk the list because of the lock. no new threads can be created
2397 		// because of the TEAM_STATE_DEATH flag on the team
2398 		temp_thread = team->thread_list;
2399 		while (temp_thread) {
2400 			struct thread *next = temp_thread->team_next;
2401 
2402 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2403 			temp_thread = next;
2404 		}
2405 
2406 		RELEASE_TEAM_LOCK();
2407 		restore_interrupts(state);
2408 
2409 		// wait until all threads in team are dead.
2410 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2411 		delete_sem(team->death_sem);
2412 	}
2413 
2414 	// If someone is waiting for this team to be loaded, but it dies
2415 	// unexpectedly before being done, we need to notify the waiting
2416 	// thread now.
2417 
2418 	state = disable_interrupts();
2419 	GRAB_TEAM_LOCK();
2420 
2421 	if (team->loading_info) {
2422 		// there's indeed someone waiting
2423 		struct team_loading_info *loadingInfo = team->loading_info;
2424 		team->loading_info = NULL;
2425 
2426 		loadingInfo->result = B_ERROR;
2427 		loadingInfo->done = true;
2428 
2429 		GRAB_THREAD_LOCK();
2430 
2431 		// wake up the waiting thread
2432 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2433 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2434 
2435 		RELEASE_THREAD_LOCK();
2436 	}
2437 
2438 	RELEASE_TEAM_LOCK();
2439 	restore_interrupts(state);
2440 
2441 	// notify team watchers
2442 
2443 	{
2444 		// we're not reachable from anyone anymore at this point, so we
2445 		// can safely access the list without any locking
2446 		struct team_watcher *watcher;
2447 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2448 				&team->watcher_list)) != NULL) {
2449 			watcher->hook(teamID, watcher->data);
2450 			free(watcher);
2451 		}
2452 	}
2453 
2454 	sNotificationService.Notify(TEAM_REMOVED, team);
2455 
2456 	// free team resources
2457 
2458 	vfs_put_io_context(team->io_context);
2459 	delete_realtime_sem_context(team->realtime_sem_context);
2460 	xsi_sem_undo(team);
2461 	delete_owned_ports(teamID);
2462 	sem_delete_owned_sems(teamID);
2463 	remove_images(team);
2464 	vm_delete_address_space(team->address_space);
2465 
2466 	delete_team_struct(team);
2467 
2468 	// notify the debugger, that the team is gone
2469 	user_debug_team_deleted(teamID, debuggerPort);
2470 }
2471 
2472 
2473 struct team *
2474 team_get_kernel_team(void)
2475 {
2476 	return sKernelTeam;
2477 }
2478 
2479 
2480 team_id
2481 team_get_kernel_team_id(void)
2482 {
2483 	if (!sKernelTeam)
2484 		return 0;
2485 
2486 	return sKernelTeam->id;
2487 }
2488 
2489 
2490 team_id
2491 team_get_current_team_id(void)
2492 {
2493 	return thread_get_current_thread()->team->id;
2494 }
2495 
2496 
2497 status_t
2498 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2499 {
2500 	cpu_status state;
2501 	struct team *team;
2502 	status_t status;
2503 
2504 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2505 	if (id == 1) {
2506 		// we're the kernel team, so we don't have to go through all
2507 		// the hassle (locking and hash lookup)
2508 		*_addressSpace = vm_get_kernel_address_space();
2509 		return B_OK;
2510 	}
2511 
2512 	state = disable_interrupts();
2513 	GRAB_TEAM_LOCK();
2514 
2515 	team = team_get_team_struct_locked(id);
2516 	if (team != NULL) {
2517 		atomic_add(&team->address_space->ref_count, 1);
2518 		*_addressSpace = team->address_space;
2519 		status = B_OK;
2520 	} else
2521 		status = B_BAD_VALUE;
2522 
2523 	RELEASE_TEAM_LOCK();
2524 	restore_interrupts(state);
2525 
2526 	return status;
2527 }
2528 
2529 
2530 /*!	Sets the team's job control state.
2531 	Interrupts must be disabled and the team lock be held.
2532 	\a threadsLocked indicates whether the thread lock is being held, too.
2533 */
2534 void
2535 team_set_job_control_state(struct team* team, job_control_state newState,
2536 	int signal, bool threadsLocked)
2537 {
2538 	if (team == NULL || team->job_control_entry == NULL)
2539 		return;
2540 
2541 	// don't touch anything, if the state stays the same or the team is already
2542 	// dead
2543 	job_control_entry* entry = team->job_control_entry;
2544 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2545 		return;
2546 
2547 	T(SetJobControlState(team->id, newState, signal));
2548 
2549 	// remove from the old list
2550 	switch (entry->state) {
2551 		case JOB_CONTROL_STATE_NONE:
2552 			// entry is in no list ATM
2553 			break;
2554 		case JOB_CONTROL_STATE_DEAD:
2555 			// can't get here
2556 			break;
2557 		case JOB_CONTROL_STATE_STOPPED:
2558 			team->parent->stopped_children->entries.Remove(entry);
2559 			break;
2560 		case JOB_CONTROL_STATE_CONTINUED:
2561 			team->parent->continued_children->entries.Remove(entry);
2562 			break;
2563 	}
2564 
2565 	entry->state = newState;
2566 	entry->signal = signal;
2567 
2568 	// add to new list
2569 	team_job_control_children* childList = NULL;
2570 	switch (entry->state) {
2571 		case JOB_CONTROL_STATE_NONE:
2572 			// entry doesn't get into any list
2573 			break;
2574 		case JOB_CONTROL_STATE_DEAD:
2575 			childList = team->parent->dead_children;
2576 			team->parent->dead_children->count++;
2577 			break;
2578 		case JOB_CONTROL_STATE_STOPPED:
2579 			childList = team->parent->stopped_children;
2580 			break;
2581 		case JOB_CONTROL_STATE_CONTINUED:
2582 			childList = team->parent->continued_children;
2583 			break;
2584 	}
2585 
2586 	if (childList != NULL) {
2587 		childList->entries.Add(entry);
2588 		team->parent->dead_children->condition_variable.NotifyAll(
2589 			threadsLocked);
2590 	}
2591 }
2592 
2593 
2594 /*! Adds a hook to the team that is called as soon as this
2595 	team goes away.
2596 	This call might get public in the future.
2597 */
2598 status_t
2599 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2600 {
2601 	struct team_watcher *watcher;
2602 	struct team *team;
2603 	cpu_status state;
2604 
2605 	if (hook == NULL || teamID < B_OK)
2606 		return B_BAD_VALUE;
2607 
2608 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2609 	if (watcher == NULL)
2610 		return B_NO_MEMORY;
2611 
2612 	watcher->hook = hook;
2613 	watcher->data = data;
2614 
2615 	// find team and add watcher
2616 
2617 	state = disable_interrupts();
2618 	GRAB_TEAM_LOCK();
2619 
2620 	team = team_get_team_struct_locked(teamID);
2621 	if (team != NULL)
2622 		list_add_item(&team->watcher_list, watcher);
2623 
2624 	RELEASE_TEAM_LOCK();
2625 	restore_interrupts(state);
2626 
2627 	if (team == NULL) {
2628 		free(watcher);
2629 		return B_BAD_TEAM_ID;
2630 	}
2631 
2632 	return B_OK;
2633 }
2634 
2635 
2636 status_t
2637 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2638 {
2639 	struct team_watcher *watcher = NULL;
2640 	struct team *team;
2641 	cpu_status state;
2642 
2643 	if (hook == NULL || teamID < B_OK)
2644 		return B_BAD_VALUE;
2645 
2646 	// find team and remove watcher (if present)
2647 
2648 	state = disable_interrupts();
2649 	GRAB_TEAM_LOCK();
2650 
2651 	team = team_get_team_struct_locked(teamID);
2652 	if (team != NULL) {
2653 		// search for watcher
2654 		while ((watcher = (struct team_watcher*)list_get_next_item(
2655 				&team->watcher_list, watcher)) != NULL) {
2656 			if (watcher->hook == hook && watcher->data == data) {
2657 				// got it!
2658 				list_remove_item(&team->watcher_list, watcher);
2659 				break;
2660 			}
2661 		}
2662 	}
2663 
2664 	RELEASE_TEAM_LOCK();
2665 	restore_interrupts(state);
2666 
2667 	if (watcher == NULL)
2668 		return B_ENTRY_NOT_FOUND;
2669 
2670 	free(watcher);
2671 	return B_OK;
2672 }
2673 
2674 
2675 /*!	The team lock must be held or the team must still be single threaded.
2676 */
2677 struct user_thread*
2678 team_allocate_user_thread(struct team* team)
2679 {
2680 	if (team->user_data == 0)
2681 		return NULL;
2682 
2683 	user_thread* thread = NULL;
2684 
2685 	// take an entry from the free list, if any
2686 	if (struct free_user_thread* entry = team->free_user_threads) {
2687 		thread = entry->thread;
2688 		team->free_user_threads = entry->next;
2689 		deferred_free(entry);
2690 		return thread;
2691 	} else {
2692 		// enough space left?
2693 		size_t needed = _ALIGN(sizeof(user_thread));
2694 		if (team->user_data_size - team->used_user_data < needed)
2695 			return NULL;
2696 		// TODO: This imposes a per team thread limit! We should resize the
2697 		// area, if necessary. That's problematic at this point, though, since
2698 		// we've got the team lock.
2699 
2700 		thread = (user_thread*)(team->user_data + team->used_user_data);
2701 		team->used_user_data += needed;
2702 	}
2703 
2704 	thread->defer_signals = 0;
2705 	thread->pending_signals = 0;
2706 	thread->wait_status = B_OK;
2707 
2708 	return thread;
2709 }
2710 
2711 
2712 /*!	The team lock must not be held. \a thread must be the current thread.
2713 */
2714 void
2715 team_free_user_thread(struct thread* thread)
2716 {
2717 	user_thread* userThread = thread->user_thread;
2718 	if (userThread == NULL)
2719 		return;
2720 
2721 	// create a free list entry
2722 	free_user_thread* entry
2723 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2724 	if (entry == NULL) {
2725 		// we have to leak the user thread :-/
2726 		return;
2727 	}
2728 
2729 	InterruptsSpinLocker _(gTeamSpinlock);
2730 
2731 	// detach from thread
2732 	SpinLocker threadLocker(gThreadSpinlock);
2733 	thread->user_thread = NULL;
2734 	threadLocker.Unlock();
2735 
2736 	entry->thread = userThread;
2737 	entry->next = thread->team->free_user_threads;
2738 	thread->team->free_user_threads = entry;
2739 }
2740 
2741 
2742 //	#pragma mark - Public kernel API
2743 
2744 
2745 thread_id
2746 load_image(int32 argCount, const char **args, const char **env)
2747 {
2748 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2749 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2750 }
2751 
2752 
2753 thread_id
2754 load_image_etc(int32 argCount, const char* const* args,
2755 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2756 {
2757 	// we need to flatten the args and environment
2758 
2759 	if (args == NULL)
2760 		return B_BAD_VALUE;
2761 
2762 	// determine total needed size
2763 	int32 argSize = 0;
2764 	for (int32 i = 0; i < argCount; i++)
2765 		argSize += strlen(args[i]) + 1;
2766 
2767 	int32 envCount = 0;
2768 	int32 envSize = 0;
2769 	while (env != NULL && env[envCount] != NULL)
2770 		envSize += strlen(env[envCount++]) + 1;
2771 
2772 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2773 	if (size > MAX_PROCESS_ARGS_SIZE)
2774 		return B_TOO_MANY_ARGS;
2775 
2776 	// allocate space
2777 	char** flatArgs = (char**)malloc(size);
2778 	if (flatArgs == NULL)
2779 		return B_NO_MEMORY;
2780 
2781 	char** slot = flatArgs;
2782 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2783 
2784 	// copy arguments and environment
2785 	for (int32 i = 0; i < argCount; i++) {
2786 		int32 argSize = strlen(args[i]) + 1;
2787 		memcpy(stringSpace, args[i], argSize);
2788 		*slot++ = stringSpace;
2789 		stringSpace += argSize;
2790 	}
2791 
2792 	*slot++ = NULL;
2793 
2794 	for (int32 i = 0; i < envCount; i++) {
2795 		int32 envSize = strlen(env[i]) + 1;
2796 		memcpy(stringSpace, env[i], envSize);
2797 		*slot++ = stringSpace;
2798 		stringSpace += envSize;
2799 	}
2800 
2801 	*slot++ = NULL;
2802 
2803 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2804 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2805 
2806 	free(flatArgs);
2807 		// load_image_internal() unset our variable if it took over ownership
2808 
2809 	return thread;
2810 }
2811 
2812 
2813 status_t
2814 wait_for_team(team_id id, status_t *_returnCode)
2815 {
2816 	struct team *team;
2817 	thread_id thread;
2818 	cpu_status state;
2819 
2820 	// find main thread and wait for that
2821 
2822 	state = disable_interrupts();
2823 	GRAB_TEAM_LOCK();
2824 
2825 	team = team_get_team_struct_locked(id);
2826 	if (team != NULL && team->main_thread != NULL)
2827 		thread = team->main_thread->id;
2828 	else
2829 		thread = B_BAD_THREAD_ID;
2830 
2831 	RELEASE_TEAM_LOCK();
2832 	restore_interrupts(state);
2833 
2834 	if (thread < 0)
2835 		return thread;
2836 
2837 	return wait_for_thread(thread, _returnCode);
2838 }
2839 
2840 
2841 status_t
2842 kill_team(team_id id)
2843 {
2844 	status_t status = B_OK;
2845 	thread_id threadID = -1;
2846 	struct team *team;
2847 	cpu_status state;
2848 
2849 	state = disable_interrupts();
2850 	GRAB_TEAM_LOCK();
2851 
2852 	team = team_get_team_struct_locked(id);
2853 	if (team != NULL) {
2854 		if (team != sKernelTeam) {
2855 			threadID = team->id;
2856 				// the team ID is the same as the ID of its main thread
2857 		} else
2858 			status = B_NOT_ALLOWED;
2859 	} else
2860 		status = B_BAD_THREAD_ID;
2861 
2862 	RELEASE_TEAM_LOCK();
2863 	restore_interrupts(state);
2864 
2865 	if (status < B_OK)
2866 		return status;
2867 
2868 	// just kill the main thread in the team. The cleanup code there will
2869 	// take care of the team
2870 	return kill_thread(threadID);
2871 }
2872 
2873 
2874 status_t
2875 _get_team_info(team_id id, team_info *info, size_t size)
2876 {
2877 	cpu_status state;
2878 	status_t status = B_OK;
2879 	struct team *team;
2880 
2881 	state = disable_interrupts();
2882 	GRAB_TEAM_LOCK();
2883 
2884 	if (id == B_CURRENT_TEAM)
2885 		team = thread_get_current_thread()->team;
2886 	else
2887 		team = team_get_team_struct_locked(id);
2888 
2889 	if (team == NULL) {
2890 		status = B_BAD_TEAM_ID;
2891 		goto err;
2892 	}
2893 
2894 	status = fill_team_info(team, info, size);
2895 
2896 err:
2897 	RELEASE_TEAM_LOCK();
2898 	restore_interrupts(state);
2899 
2900 	return status;
2901 }
2902 
2903 
2904 status_t
2905 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2906 {
2907 	status_t status = B_BAD_TEAM_ID;
2908 	struct team *team = NULL;
2909 	int32 slot = *cookie;
2910 	team_id lastTeamID;
2911 	cpu_status state;
2912 
2913 	if (slot < 1)
2914 		slot = 1;
2915 
2916 	state = disable_interrupts();
2917 	GRAB_TEAM_LOCK();
2918 
2919 	lastTeamID = peek_next_thread_id();
2920 	if (slot >= lastTeamID)
2921 		goto err;
2922 
2923 	// get next valid team
2924 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2925 		slot++;
2926 
2927 	if (team) {
2928 		status = fill_team_info(team, info, size);
2929 		*cookie = ++slot;
2930 	}
2931 
2932 err:
2933 	RELEASE_TEAM_LOCK();
2934 	restore_interrupts(state);
2935 
2936 	return status;
2937 }
2938 
2939 
2940 status_t
2941 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2942 {
2943 	bigtime_t kernelTime = 0, userTime = 0;
2944 	status_t status = B_OK;
2945 	struct team *team;
2946 	cpu_status state;
2947 
2948 	if (size != sizeof(team_usage_info)
2949 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2950 		return B_BAD_VALUE;
2951 
2952 	state = disable_interrupts();
2953 	GRAB_TEAM_LOCK();
2954 
2955 	if (id == B_CURRENT_TEAM)
2956 		team = thread_get_current_thread()->team;
2957 	else
2958 		team = team_get_team_struct_locked(id);
2959 
2960 	if (team == NULL) {
2961 		status = B_BAD_TEAM_ID;
2962 		goto out;
2963 	}
2964 
2965 	switch (who) {
2966 		case B_TEAM_USAGE_SELF:
2967 		{
2968 			struct thread *thread = team->thread_list;
2969 
2970 			for (; thread != NULL; thread = thread->team_next) {
2971 				kernelTime += thread->kernel_time;
2972 				userTime += thread->user_time;
2973 			}
2974 
2975 			kernelTime += team->dead_threads_kernel_time;
2976 			userTime += team->dead_threads_user_time;
2977 			break;
2978 		}
2979 
2980 		case B_TEAM_USAGE_CHILDREN:
2981 		{
2982 			struct team *child = team->children;
2983 			for (; child != NULL; child = child->siblings_next) {
2984 				struct thread *thread = team->thread_list;
2985 
2986 				for (; thread != NULL; thread = thread->team_next) {
2987 					kernelTime += thread->kernel_time;
2988 					userTime += thread->user_time;
2989 				}
2990 
2991 				kernelTime += child->dead_threads_kernel_time;
2992 				userTime += child->dead_threads_user_time;
2993 			}
2994 
2995 			kernelTime += team->dead_children->kernel_time;
2996 			userTime += team->dead_children->user_time;
2997 			break;
2998 		}
2999 	}
3000 
3001 out:
3002 	RELEASE_TEAM_LOCK();
3003 	restore_interrupts(state);
3004 
3005 	if (status == B_OK) {
3006 		info->kernel_time = kernelTime;
3007 		info->user_time = userTime;
3008 	}
3009 
3010 	return status;
3011 }
3012 
3013 
3014 pid_t
3015 getpid(void)
3016 {
3017 	return thread_get_current_thread()->team->id;
3018 }
3019 
3020 
3021 pid_t
3022 getppid(void)
3023 {
3024 	struct team *team = thread_get_current_thread()->team;
3025 	cpu_status state;
3026 	pid_t parent;
3027 
3028 	state = disable_interrupts();
3029 	GRAB_TEAM_LOCK();
3030 
3031 	parent = team->parent->id;
3032 
3033 	RELEASE_TEAM_LOCK();
3034 	restore_interrupts(state);
3035 
3036 	return parent;
3037 }
3038 
3039 
3040 pid_t
3041 getpgid(pid_t process)
3042 {
3043 	struct thread *thread;
3044 	pid_t result = -1;
3045 	cpu_status state;
3046 
3047 	if (process == 0)
3048 		process = thread_get_current_thread()->team->id;
3049 
3050 	state = disable_interrupts();
3051 	GRAB_THREAD_LOCK();
3052 
3053 	thread = thread_get_thread_struct_locked(process);
3054 	if (thread != NULL)
3055 		result = thread->team->group_id;
3056 
3057 	RELEASE_THREAD_LOCK();
3058 	restore_interrupts(state);
3059 
3060 	return thread != NULL ? result : B_BAD_VALUE;
3061 }
3062 
3063 
3064 pid_t
3065 getsid(pid_t process)
3066 {
3067 	struct thread *thread;
3068 	pid_t result = -1;
3069 	cpu_status state;
3070 
3071 	if (process == 0)
3072 		process = thread_get_current_thread()->team->id;
3073 
3074 	state = disable_interrupts();
3075 	GRAB_THREAD_LOCK();
3076 
3077 	thread = thread_get_thread_struct_locked(process);
3078 	if (thread != NULL)
3079 		result = thread->team->session_id;
3080 
3081 	RELEASE_THREAD_LOCK();
3082 	restore_interrupts(state);
3083 
3084 	return thread != NULL ? result : B_BAD_VALUE;
3085 }
3086 
3087 
3088 //	#pragma mark - User syscalls
3089 
3090 
3091 status_t
3092 _user_exec(const char *userPath, const char* const* userFlatArgs,
3093 	size_t flatArgsSize, int32 argCount, int32 envCount)
3094 {
3095 	// NOTE: Since this function normally doesn't return, don't use automatic
3096 	// variables that need destruction in the function scope.
3097 	char path[B_PATH_NAME_LENGTH];
3098 
3099 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3100 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3101 		return B_BAD_ADDRESS;
3102 
3103 	// copy and relocate the flat arguments
3104 	char** flatArgs;
3105 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3106 		argCount, envCount, flatArgs);
3107 
3108 	if (error == B_OK) {
3109 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3110 			envCount);
3111 			// this one only returns in case of error
3112 	}
3113 
3114 	free(flatArgs);
3115 	return error;
3116 }
3117 
3118 
3119 thread_id
3120 _user_fork(void)
3121 {
3122 	return fork_team();
3123 }
3124 
3125 
3126 thread_id
3127 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3128 {
3129 	status_t returnCode;
3130 	int32 reason;
3131 	thread_id deadChild;
3132 
3133 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3134 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3135 		return B_BAD_ADDRESS;
3136 
3137 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3138 
3139 	if (deadChild >= B_OK) {
3140 		// copy result data on successful completion
3141 		if ((_userReason != NULL
3142 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3143 			|| (_userReturnCode != NULL
3144 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3145 					< B_OK)) {
3146 			return B_BAD_ADDRESS;
3147 		}
3148 
3149 		return deadChild;
3150 	}
3151 
3152 	return syscall_restart_handle_post(deadChild);
3153 }
3154 
3155 
3156 pid_t
3157 _user_process_info(pid_t process, int32 which)
3158 {
3159 	// we only allow to return the parent of the current process
3160 	if (which == PARENT_ID
3161 		&& process != 0 && process != thread_get_current_thread()->team->id)
3162 		return B_BAD_VALUE;
3163 
3164 	switch (which) {
3165 		case SESSION_ID:
3166 			return getsid(process);
3167 		case GROUP_ID:
3168 			return getpgid(process);
3169 		case PARENT_ID:
3170 			return getppid();
3171 	}
3172 
3173 	return B_BAD_VALUE;
3174 }
3175 
3176 
3177 pid_t
3178 _user_setpgid(pid_t processID, pid_t groupID)
3179 {
3180 	struct thread *thread = thread_get_current_thread();
3181 	struct team *currentTeam = thread->team;
3182 	struct team *team;
3183 
3184 	if (groupID < 0)
3185 		return B_BAD_VALUE;
3186 
3187 	if (processID == 0)
3188 		processID = currentTeam->id;
3189 
3190 	// if the group ID is not specified, use the target process' ID
3191 	if (groupID == 0)
3192 		groupID = processID;
3193 
3194 	if (processID == currentTeam->id) {
3195 		// we set our own group
3196 
3197 		// we must not change our process group ID if we're a session leader
3198 		if (is_session_leader(currentTeam))
3199 			return B_NOT_ALLOWED;
3200 	} else {
3201 		// another team is the target of the call -- check it out
3202 		InterruptsSpinLocker _(gTeamSpinlock);
3203 
3204 		team = team_get_team_struct_locked(processID);
3205 		if (team == NULL)
3206 			return ESRCH;
3207 
3208 		// The team must be a child of the calling team and in the same session.
3209 		// (If that's the case it isn't a session leader either.)
3210 		if (team->parent != currentTeam
3211 			|| team->session_id != currentTeam->session_id) {
3212 			return B_NOT_ALLOWED;
3213 		}
3214 
3215 		if (team->group_id == groupID)
3216 			return groupID;
3217 
3218 		// The call is also supposed to fail on a child, when the child already
3219 		// has executed exec*() [EACCES].
3220 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3221 			return EACCES;
3222 	}
3223 
3224 	struct process_group *group = NULL;
3225 	if (groupID == processID) {
3226 		// A new process group might be needed.
3227 		group = create_process_group(groupID);
3228 		if (group == NULL)
3229 			return B_NO_MEMORY;
3230 
3231 		// Assume orphaned. We consider the situation of the team's parent
3232 		// below.
3233 		group->orphaned = true;
3234 	}
3235 
3236 	status_t status = B_OK;
3237 	struct process_group *freeGroup = NULL;
3238 
3239 	InterruptsSpinLocker locker(gTeamSpinlock);
3240 
3241 	team = team_get_team_struct_locked(processID);
3242 	if (team != NULL) {
3243 		// check the conditions again -- they might have changed in the meantime
3244 		if (is_session_leader(team)
3245 			|| team->session_id != currentTeam->session_id) {
3246 			status = B_NOT_ALLOWED;
3247 		} else if (team != currentTeam
3248 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3249 			status = EACCES;
3250 		} else if (team->group_id == groupID) {
3251 			// the team is already in the desired process group
3252 			freeGroup = group;
3253 		} else {
3254 			// Check if a process group with the requested ID already exists.
3255 			struct process_group *targetGroup
3256 				= team_get_process_group_locked(team->group->session, groupID);
3257 			if (targetGroup != NULL) {
3258 				// In case of processID == groupID we have to free the
3259 				// allocated group.
3260 				freeGroup = group;
3261 			} else if (processID == groupID) {
3262 				// We created a new process group, let us insert it into the
3263 				// team's session.
3264 				insert_group_into_session(team->group->session, group);
3265 				targetGroup = group;
3266 			}
3267 
3268 			if (targetGroup != NULL) {
3269 				// we got a group, let's move the team there
3270 				process_group* oldGroup = team->group;
3271 
3272 				remove_team_from_group(team);
3273 				insert_team_into_group(targetGroup, team);
3274 
3275 				// Update the "orphaned" flag of all potentially affected
3276 				// groups.
3277 
3278 				// the team's old group
3279 				if (oldGroup->teams != NULL) {
3280 					oldGroup->orphaned = false;
3281 					update_orphaned_process_group(oldGroup, -1);
3282 				}
3283 
3284 				// the team's new group
3285 				struct team* parent = team->parent;
3286 				targetGroup->orphaned &= parent == NULL
3287 					|| parent->group == targetGroup
3288 					|| team->parent->session_id != team->session_id;
3289 
3290 				// children's groups
3291 				struct team* child = team->children;
3292 				while (child != NULL) {
3293 					child->group->orphaned = false;
3294 					update_orphaned_process_group(child->group, -1);
3295 
3296 					child = child->siblings_next;
3297 				}
3298 			} else
3299 				status = B_NOT_ALLOWED;
3300 		}
3301 	} else
3302 		status = B_NOT_ALLOWED;
3303 
3304 	// Changing the process group might have changed the situation for a parent
3305 	// waiting in wait_for_child(). Hence we notify it.
3306 	if (status == B_OK)
3307 		team->parent->dead_children->condition_variable.NotifyAll(false);
3308 
3309 	locker.Unlock();
3310 
3311 	if (status != B_OK) {
3312 		// in case of error, the group hasn't been added into the hash
3313 		team_delete_process_group(group);
3314 	}
3315 
3316 	team_delete_process_group(freeGroup);
3317 
3318 	return status == B_OK ? groupID : status;
3319 }
3320 
3321 
3322 pid_t
3323 _user_setsid(void)
3324 {
3325 	struct team *team = thread_get_current_thread()->team;
3326 	struct process_session *session;
3327 	struct process_group *group;
3328 	cpu_status state;
3329 	bool failed = false;
3330 
3331 	// the team must not already be a process group leader
3332 	if (is_process_group_leader(team))
3333 		return B_NOT_ALLOWED;
3334 
3335 	group = create_process_group(team->id);
3336 	if (group == NULL)
3337 		return B_NO_MEMORY;
3338 
3339 	session = create_process_session(group->id);
3340 	if (session == NULL) {
3341 		team_delete_process_group(group);
3342 		return B_NO_MEMORY;
3343 	}
3344 
3345 	state = disable_interrupts();
3346 	GRAB_TEAM_LOCK();
3347 
3348 	// this may have changed since the check above
3349 	if (!is_process_group_leader(team)) {
3350 		remove_team_from_group(team);
3351 
3352 		insert_group_into_session(session, group);
3353 		insert_team_into_group(group, team);
3354 	} else
3355 		failed = true;
3356 
3357 	RELEASE_TEAM_LOCK();
3358 	restore_interrupts(state);
3359 
3360 	if (failed) {
3361 		team_delete_process_group(group);
3362 		free(session);
3363 		return B_NOT_ALLOWED;
3364 	}
3365 
3366 	return team->group_id;
3367 }
3368 
3369 
3370 status_t
3371 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3372 {
3373 	status_t returnCode;
3374 	status_t status;
3375 
3376 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3377 		return B_BAD_ADDRESS;
3378 
3379 	status = wait_for_team(id, &returnCode);
3380 	if (status >= B_OK && _userReturnCode != NULL) {
3381 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3382 			return B_BAD_ADDRESS;
3383 		return B_OK;
3384 	}
3385 
3386 	return syscall_restart_handle_post(status);
3387 }
3388 
3389 
3390 thread_id
3391 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3392 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3393 	port_id errorPort, uint32 errorToken)
3394 {
3395 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3396 
3397 	if (argCount < 1)
3398 		return B_BAD_VALUE;
3399 
3400 	// copy and relocate the flat arguments
3401 	char** flatArgs;
3402 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3403 		argCount, envCount, flatArgs);
3404 	if (error != B_OK)
3405 		return error;
3406 
3407 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3408 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3409 		errorToken);
3410 
3411 	free(flatArgs);
3412 		// load_image_internal() unset our variable if it took over ownership
3413 
3414 	return thread;
3415 }
3416 
3417 
3418 void
3419 _user_exit_team(status_t returnValue)
3420 {
3421 	struct thread *thread = thread_get_current_thread();
3422 
3423 	thread->exit.status = returnValue;
3424 	thread->exit.reason = THREAD_RETURN_EXIT;
3425 
3426 	send_signal(thread->id, SIGKILL);
3427 }
3428 
3429 
3430 status_t
3431 _user_kill_team(team_id team)
3432 {
3433 	return kill_team(team);
3434 }
3435 
3436 
3437 status_t
3438 _user_get_team_info(team_id id, team_info *userInfo)
3439 {
3440 	status_t status;
3441 	team_info info;
3442 
3443 	if (!IS_USER_ADDRESS(userInfo))
3444 		return B_BAD_ADDRESS;
3445 
3446 	status = _get_team_info(id, &info, sizeof(team_info));
3447 	if (status == B_OK) {
3448 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3449 			return B_BAD_ADDRESS;
3450 	}
3451 
3452 	return status;
3453 }
3454 
3455 
3456 status_t
3457 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3458 {
3459 	status_t status;
3460 	team_info info;
3461 	int32 cookie;
3462 
3463 	if (!IS_USER_ADDRESS(userCookie)
3464 		|| !IS_USER_ADDRESS(userInfo)
3465 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3466 		return B_BAD_ADDRESS;
3467 
3468 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3469 	if (status != B_OK)
3470 		return status;
3471 
3472 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3473 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3474 		return B_BAD_ADDRESS;
3475 
3476 	return status;
3477 }
3478 
3479 
3480 team_id
3481 _user_get_current_team(void)
3482 {
3483 	return team_get_current_team_id();
3484 }
3485 
3486 
3487 status_t
3488 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3489 {
3490 	team_usage_info info;
3491 	status_t status;
3492 
3493 	if (!IS_USER_ADDRESS(userInfo))
3494 		return B_BAD_ADDRESS;
3495 
3496 	status = _get_team_usage_info(team, who, &info, size);
3497 	if (status != B_OK)
3498 		return status;
3499 
3500 	if (user_memcpy(userInfo, &info, size) < B_OK)
3501 		return B_BAD_ADDRESS;
3502 
3503 	return status;
3504 }
3505 
3506