xref: /haiku/src/system/kernel/team.cpp (revision abb72bec4b50661dc326a98824ca0a26500805f7)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/wait.h>
18 
19 #include <OS.h>
20 
21 #include <AutoDeleter.h>
22 #include <FindDirectory.h>
23 
24 #include <boot_device.h>
25 #include <elf.h>
26 #include <file_cache.h>
27 #include <fs/KPath.h>
28 #include <heap.h>
29 #include <int.h>
30 #include <kernel.h>
31 #include <kimage.h>
32 #include <kscheduler.h>
33 #include <ksignal.h>
34 #include <Notifications.h>
35 #include <port.h>
36 #include <posix/realtime_sem.h>
37 #include <posix/xsi_semaphore.h>
38 #include <sem.h>
39 #include <syscall_process_info.h>
40 #include <syscall_restart.h>
41 #include <syscalls.h>
42 #include <team.h>
43 #include <tls.h>
44 #include <tracing.h>
45 #include <user_runtime.h>
46 #include <user_thread.h>
47 #include <usergroup.h>
48 #include <vfs.h>
49 #include <vm.h>
50 #include <vm_address_space.h>
51 #include <util/AutoLock.h>
52 #include <util/khash.h>
53 
54 //#define TRACE_TEAM
55 #ifdef TRACE_TEAM
56 #	define TRACE(x) dprintf x
57 #else
58 #	define TRACE(x) ;
59 #endif
60 
61 
62 struct team_key {
63 	team_id id;
64 };
65 
66 struct team_arg {
67 	char	*path;
68 	char	**flat_args;
69 	size_t	flat_args_size;
70 	uint32	arg_count;
71 	uint32	env_count;
72 	port_id	error_port;
73 	uint32	error_token;
74 };
75 
76 struct fork_arg {
77 	area_id				user_stack_area;
78 	addr_t				user_stack_base;
79 	size_t				user_stack_size;
80 	addr_t				user_local_storage;
81 	sigset_t			sig_block_mask;
82 	struct sigaction	sig_action[32];
83 	addr_t				signal_stack_base;
84 	size_t				signal_stack_size;
85 	bool				signal_stack_enabled;
86 
87 	struct user_thread* user_thread;
88 
89 	struct arch_fork_arg arch_info;
90 };
91 
92 class TeamNotificationService : public DefaultNotificationService {
93 public:
94 							TeamNotificationService();
95 
96 			void			Notify(uint32 eventCode, struct team* team);
97 };
98 
99 
100 static hash_table *sTeamHash = NULL;
101 static hash_table *sGroupHash = NULL;
102 static struct team *sKernelTeam = NULL;
103 
104 // some arbitrary chosen limits - should probably depend on the available
105 // memory (the limit is not yet enforced)
106 static int32 sMaxTeams = 2048;
107 static int32 sUsedTeams = 1;
108 
109 static TeamNotificationService sNotificationService;
110 
111 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
112 
113 
114 // #pragma mark - Tracing
115 
116 
117 #if TEAM_TRACING
118 namespace TeamTracing {
119 
120 class TeamForked : public AbstractTraceEntry {
121 public:
122 	TeamForked(thread_id forkedThread)
123 		:
124 		fForkedThread(forkedThread)
125 	{
126 		Initialized();
127 	}
128 
129 	virtual void AddDump(TraceOutput& out)
130 	{
131 		out.Print("team forked, new thread %ld", fForkedThread);
132 	}
133 
134 private:
135 	thread_id			fForkedThread;
136 };
137 
138 
139 class ExecTeam : public AbstractTraceEntry {
140 public:
141 	ExecTeam(const char* path, int32 argCount, const char* const* args,
142 			int32 envCount, const char* const* env)
143 		:
144 		fArgCount(argCount),
145 		fArgs(NULL)
146 	{
147 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
148 			false);
149 
150 		// determine the buffer size we need for the args
151 		size_t argBufferSize = 0;
152 		for (int32 i = 0; i < argCount; i++)
153 			argBufferSize += strlen(args[i]) + 1;
154 
155 		// allocate a buffer
156 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
157 		if (fArgs) {
158 			char* buffer = fArgs;
159 			for (int32 i = 0; i < argCount; i++) {
160 				size_t argSize = strlen(args[i]) + 1;
161 				memcpy(buffer, args[i], argSize);
162 				buffer += argSize;
163 			}
164 		}
165 
166 		// ignore env for the time being
167 		(void)envCount;
168 		(void)env;
169 
170 		Initialized();
171 	}
172 
173 	virtual void AddDump(TraceOutput& out)
174 	{
175 		out.Print("team exec, \"%p\", args:", fPath);
176 
177 		if (fArgs != NULL) {
178 			char* args = fArgs;
179 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
180 				out.Print(" \"%s\"", args);
181 				args += strlen(args) + 1;
182 			}
183 		} else
184 			out.Print(" <too long>");
185 	}
186 
187 private:
188 	char*	fPath;
189 	int32	fArgCount;
190 	char*	fArgs;
191 };
192 
193 
194 static const char*
195 job_control_state_name(job_control_state state)
196 {
197 	switch (state) {
198 		case JOB_CONTROL_STATE_NONE:
199 			return "none";
200 		case JOB_CONTROL_STATE_STOPPED:
201 			return "stopped";
202 		case JOB_CONTROL_STATE_CONTINUED:
203 			return "continued";
204 		case JOB_CONTROL_STATE_DEAD:
205 			return "dead";
206 		default:
207 			return "invalid";
208 	}
209 }
210 
211 
212 class SetJobControlState : public AbstractTraceEntry {
213 public:
214 	SetJobControlState(team_id team, job_control_state newState, int signal)
215 		:
216 		fTeam(team),
217 		fNewState(newState),
218 		fSignal(signal)
219 	{
220 		Initialized();
221 	}
222 
223 	virtual void AddDump(TraceOutput& out)
224 	{
225 		out.Print("team set job control state, team %ld, "
226 			"new state: %s, signal: %d",
227 			fTeam, job_control_state_name(fNewState), fSignal);
228 	}
229 
230 private:
231 	team_id				fTeam;
232 	job_control_state	fNewState;
233 	int					fSignal;
234 };
235 
236 
237 class WaitForChild : public AbstractTraceEntry {
238 public:
239 	WaitForChild(pid_t child, uint32 flags)
240 		:
241 		fChild(child),
242 		fFlags(flags)
243 	{
244 		Initialized();
245 	}
246 
247 	virtual void AddDump(TraceOutput& out)
248 	{
249 		out.Print("team wait for child, child: %ld, "
250 			"flags: 0x%lx", fChild, fFlags);
251 	}
252 
253 private:
254 	pid_t	fChild;
255 	uint32	fFlags;
256 };
257 
258 
259 class WaitForChildDone : public AbstractTraceEntry {
260 public:
261 	WaitForChildDone(const job_control_entry& entry)
262 		:
263 		fState(entry.state),
264 		fTeam(entry.thread),
265 		fStatus(entry.status),
266 		fReason(entry.reason),
267 		fSignal(entry.signal)
268 	{
269 		Initialized();
270 	}
271 
272 	WaitForChildDone(status_t error)
273 		:
274 		fTeam(error)
275 	{
276 		Initialized();
277 	}
278 
279 	virtual void AddDump(TraceOutput& out)
280 	{
281 		if (fTeam >= 0) {
282 			out.Print("team wait for child done, team: %ld, "
283 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
284 				fTeam, job_control_state_name(fState), fStatus, fReason,
285 				fSignal);
286 		} else {
287 			out.Print("team wait for child failed, error: "
288 				"0x%lx, ", fTeam);
289 		}
290 	}
291 
292 private:
293 	job_control_state	fState;
294 	team_id				fTeam;
295 	status_t			fStatus;
296 	uint16				fReason;
297 	uint16				fSignal;
298 };
299 
300 }	// namespace TeamTracing
301 
302 #	define T(x) new(std::nothrow) TeamTracing::x;
303 #else
304 #	define T(x) ;
305 #endif
306 
307 
308 //	#pragma mark - TeamNotificationService
309 
310 
311 TeamNotificationService::TeamNotificationService()
312 	: DefaultNotificationService("teams")
313 {
314 }
315 
316 
317 void
318 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
319 {
320 	char eventBuffer[128];
321 	KMessage event;
322 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
323 	event.AddInt32("event", eventCode);
324 	event.AddInt32("team", team->id);
325 	event.AddPointer("teamStruct", team);
326 
327 	DefaultNotificationService::Notify(event, eventCode);
328 }
329 
330 
331 //	#pragma mark - Private functions
332 
333 
334 static void
335 _dump_team_info(struct team *team)
336 {
337 	kprintf("TEAM: %p\n", team);
338 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
339 	kprintf("name:        '%s'\n", team->name);
340 	kprintf("args:        '%s'\n", team->args);
341 	kprintf("next:        %p\n", team->next);
342 	kprintf("parent:      %p", team->parent);
343 	if (team->parent != NULL) {
344 		kprintf(" (id = %ld)\n", team->parent->id);
345 	} else
346 		kprintf("\n");
347 
348 	kprintf("children:    %p\n", team->children);
349 	kprintf("num_threads: %d\n", team->num_threads);
350 	kprintf("state:       %d\n", team->state);
351 	kprintf("flags:       0x%lx\n", team->flags);
352 	kprintf("io_context:  %p\n", team->io_context);
353 	if (team->address_space)
354 		kprintf("address_space: %p\n", team->address_space);
355 	kprintf("main_thread: %p\n", team->main_thread);
356 	kprintf("thread_list: %p\n", team->thread_list);
357 	kprintf("group_id:    %ld\n", team->group_id);
358 	kprintf("session_id:  %ld\n", team->session_id);
359 }
360 
361 
362 static int
363 dump_team_info(int argc, char **argv)
364 {
365 	struct hash_iterator iterator;
366 	struct team *team;
367 	team_id id = -1;
368 	bool found = false;
369 
370 	if (argc < 2) {
371 		struct thread* thread = thread_get_current_thread();
372 		if (thread != NULL && thread->team != NULL)
373 			_dump_team_info(thread->team);
374 		else
375 			kprintf("No current team!\n");
376 		return 0;
377 	}
378 
379 	id = strtoul(argv[1], NULL, 0);
380 	if (IS_KERNEL_ADDRESS(id)) {
381 		// semi-hack
382 		_dump_team_info((struct team *)id);
383 		return 0;
384 	}
385 
386 	// walk through the thread list, trying to match name or id
387 	hash_open(sTeamHash, &iterator);
388 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
389 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
390 			_dump_team_info(team);
391 			found = true;
392 			break;
393 		}
394 	}
395 	hash_close(sTeamHash, &iterator, false);
396 
397 	if (!found)
398 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
399 	return 0;
400 }
401 
402 
403 static int
404 dump_teams(int argc, char **argv)
405 {
406 	struct hash_iterator iterator;
407 	struct team *team;
408 
409 	kprintf("team           id  parent      name\n");
410 	hash_open(sTeamHash, &iterator);
411 
412 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
413 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
414 	}
415 
416 	hash_close(sTeamHash, &iterator, false);
417 	return 0;
418 }
419 
420 
421 static int
422 team_struct_compare(void *_p, const void *_key)
423 {
424 	struct team *p = (struct team*)_p;
425 	const struct team_key *key = (const struct team_key*)_key;
426 
427 	if (p->id == key->id)
428 		return 0;
429 
430 	return 1;
431 }
432 
433 
434 static uint32
435 team_struct_hash(void *_p, const void *_key, uint32 range)
436 {
437 	struct team *p = (struct team*)_p;
438 	const struct team_key *key = (const struct team_key*)_key;
439 
440 	if (p != NULL)
441 		return p->id % range;
442 
443 	return (uint32)key->id % range;
444 }
445 
446 
447 static int
448 process_group_compare(void *_group, const void *_key)
449 {
450 	struct process_group *group = (struct process_group*)_group;
451 	const struct team_key *key = (const struct team_key*)_key;
452 
453 	if (group->id == key->id)
454 		return 0;
455 
456 	return 1;
457 }
458 
459 
460 static uint32
461 process_group_hash(void *_group, const void *_key, uint32 range)
462 {
463 	struct process_group *group = (struct process_group*)_group;
464 	const struct team_key *key = (const struct team_key*)_key;
465 
466 	if (group != NULL)
467 		return group->id % range;
468 
469 	return (uint32)key->id % range;
470 }
471 
472 
473 static void
474 insert_team_into_parent(struct team *parent, struct team *team)
475 {
476 	ASSERT(parent != NULL);
477 
478 	team->siblings_next = parent->children;
479 	parent->children = team;
480 	team->parent = parent;
481 }
482 
483 
484 /*!	Note: must have team lock held */
485 static void
486 remove_team_from_parent(struct team *parent, struct team *team)
487 {
488 	struct team *child, *last = NULL;
489 
490 	for (child = parent->children; child != NULL; child = child->siblings_next) {
491 		if (child == team) {
492 			if (last == NULL)
493 				parent->children = child->siblings_next;
494 			else
495 				last->siblings_next = child->siblings_next;
496 
497 			team->parent = NULL;
498 			break;
499 		}
500 		last = child;
501 	}
502 }
503 
504 
505 /*!	Reparent each of our children
506 	Note: must have team lock held
507 */
508 static void
509 reparent_children(struct team *team)
510 {
511 	struct team *child;
512 
513 	while ((child = team->children) != NULL) {
514 		// remove the child from the current proc and add to the parent
515 		remove_team_from_parent(team, child);
516 		insert_team_into_parent(sKernelTeam, child);
517 	}
518 
519 	// move job control entries too
520 	sKernelTeam->stopped_children->entries.MoveFrom(
521 		&team->stopped_children->entries);
522 	sKernelTeam->continued_children->entries.MoveFrom(
523 		&team->continued_children->entries);
524 
525 	// Note, we don't move the dead children entries. Those will be deleted
526 	// when the team structure is deleted.
527 }
528 
529 
530 static bool
531 is_session_leader(struct team *team)
532 {
533 	return team->session_id == team->id;
534 }
535 
536 
537 static bool
538 is_process_group_leader(struct team *team)
539 {
540 	return team->group_id == team->id;
541 }
542 
543 
544 static void
545 deferred_delete_process_group(struct process_group *group)
546 {
547 	if (group == NULL)
548 		return;
549 
550 	// remove_group_from_session() keeps this pointer around
551 	// only if the session can be freed as well
552 	if (group->session) {
553 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
554 			group->session->id));
555 		deferred_free(group->session);
556 	}
557 
558 	deferred_free(group);
559 }
560 
561 
562 /*!	Removes a group from a session, and puts the session object
563 	back into the session cache, if it's not used anymore.
564 	You must hold the team lock when calling this function.
565 */
566 static void
567 remove_group_from_session(struct process_group *group)
568 {
569 	struct process_session *session = group->session;
570 
571 	// the group must be in any session to let this function have any effect
572 	if (session == NULL)
573 		return;
574 
575 	hash_remove(sGroupHash, group);
576 
577 	// we cannot free the resource here, so we're keeping the group link
578 	// around - this way it'll be freed by free_process_group()
579 	if (--session->group_count > 0)
580 		group->session = NULL;
581 }
582 
583 
584 /*!	Team lock must be held.
585 */
586 static void
587 acquire_process_group_ref(pid_t groupID)
588 {
589 	process_group* group = team_get_process_group_locked(NULL, groupID);
590 	if (group == NULL) {
591 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
592 		return;
593 	}
594 
595 	group->refs++;
596 }
597 
598 
599 /*!	Team lock must be held.
600 */
601 static void
602 release_process_group_ref(pid_t groupID)
603 {
604 	process_group* group = team_get_process_group_locked(NULL, groupID);
605 	if (group == NULL) {
606 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
607 		return;
608 	}
609 
610 	if (group->refs <= 0) {
611 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
612 		return;
613 	}
614 
615 	if (--group->refs > 0)
616 		return;
617 
618 	// group is no longer used
619 
620 	remove_group_from_session(group);
621 	deferred_delete_process_group(group);
622 }
623 
624 
625 /*!	You must hold the team lock when calling this function. */
626 static void
627 insert_group_into_session(struct process_session *session, struct process_group *group)
628 {
629 	if (group == NULL)
630 		return;
631 
632 	group->session = session;
633 	hash_insert(sGroupHash, group);
634 	session->group_count++;
635 }
636 
637 
638 /*!	You must hold the team lock when calling this function. */
639 static void
640 insert_team_into_group(struct process_group *group, struct team *team)
641 {
642 	team->group = group;
643 	team->group_id = group->id;
644 	team->session_id = group->session->id;
645 
646 	team->group_next = group->teams;
647 	group->teams = team;
648 	acquire_process_group_ref(group->id);
649 }
650 
651 
652 /*!	Removes the team from the group.
653 
654 	\param team the team that'll be removed from it's group
655 */
656 static void
657 remove_team_from_group(struct team *team)
658 {
659 	struct process_group *group = team->group;
660 	struct team *current, *last = NULL;
661 
662 	// the team must be in any team to let this function have any effect
663 	if  (group == NULL)
664 		return;
665 
666 	for (current = group->teams; current != NULL; current = current->group_next) {
667 		if (current == team) {
668 			if (last == NULL)
669 				group->teams = current->group_next;
670 			else
671 				last->group_next = current->group_next;
672 
673 			team->group = NULL;
674 			break;
675 		}
676 		last = current;
677 	}
678 
679 	team->group = NULL;
680 	team->group_next = NULL;
681 
682 	release_process_group_ref(group->id);
683 }
684 
685 
686 static struct process_group *
687 create_process_group(pid_t id)
688 {
689 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
690 	if (group == NULL)
691 		return NULL;
692 
693 	group->id = id;
694 	group->refs = 0;
695 	group->session = NULL;
696 	group->teams = NULL;
697 	group->orphaned = true;
698 	return group;
699 }
700 
701 
702 static struct process_session *
703 create_process_session(pid_t id)
704 {
705 	struct process_session *session
706 		= (struct process_session *)malloc(sizeof(struct process_session));
707 	if (session == NULL)
708 		return NULL;
709 
710 	session->id = id;
711 	session->group_count = 0;
712 	session->controlling_tty = -1;
713 	session->foreground_group = -1;
714 
715 	return session;
716 }
717 
718 
719 static void
720 set_team_name(struct team* team, const char* name)
721 {
722 	if (const char* lastSlash = strrchr(name, '/'))
723 		name = lastSlash + 1;
724 
725 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
726 }
727 
728 
729 static struct team *
730 create_team_struct(const char *name, bool kernel)
731 {
732 	struct team *team = (struct team *)malloc(sizeof(struct team));
733 	if (team == NULL)
734 		return NULL;
735 	MemoryDeleter teamDeleter(team);
736 
737 	team->next = team->siblings_next = team->children = team->parent = NULL;
738 	team->id = allocate_thread_id();
739 	set_team_name(team, name);
740 	team->args[0] = '\0';
741 	team->num_threads = 0;
742 	team->io_context = NULL;
743 	team->address_space = NULL;
744 	team->realtime_sem_context = NULL;
745 	team->xsi_sem_context = NULL;
746 	team->thread_list = NULL;
747 	team->main_thread = NULL;
748 	team->loading_info = NULL;
749 	team->state = TEAM_STATE_BIRTH;
750 	team->flags = 0;
751 	team->death_sem = -1;
752 	team->user_data_area = -1;
753 	team->user_data = 0;
754 	team->used_user_data = 0;
755 	team->user_data_size = 0;
756 	team->free_user_threads = NULL;
757 
758 	team->supplementary_groups = NULL;
759 	team->supplementary_group_count = 0;
760 
761 	team->dead_threads_kernel_time = 0;
762 	team->dead_threads_user_time = 0;
763 
764 	// dead threads
765 	list_init(&team->dead_threads);
766 	team->dead_threads_count = 0;
767 
768 	// dead children
769 	team->dead_children = new(nothrow) team_dead_children;
770 	if (team->dead_children == NULL)
771 		return NULL;
772 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
773 
774 	team->dead_children->count = 0;
775 	team->dead_children->kernel_time = 0;
776 	team->dead_children->user_time = 0;
777 
778 	// stopped children
779 	team->stopped_children = new(nothrow) team_job_control_children;
780 	if (team->stopped_children == NULL)
781 		return NULL;
782 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
783 		team->stopped_children);
784 
785 	// continued children
786 	team->continued_children = new(nothrow) team_job_control_children;
787 	if (team->continued_children == NULL)
788 		return NULL;
789 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
790 		team->continued_children);
791 
792 	// job control entry
793 	team->job_control_entry = new(nothrow) job_control_entry;
794 	if (team->job_control_entry == NULL)
795 		return NULL;
796 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
797 		team->job_control_entry);
798 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
799 	team->job_control_entry->thread = team->id;
800 	team->job_control_entry->team = team;
801 
802 	list_init(&team->sem_list);
803 	list_init(&team->port_list);
804 	list_init(&team->image_list);
805 	list_init(&team->watcher_list);
806 
807 	clear_team_debug_info(&team->debug_info, true);
808 
809 	if (arch_team_init_team_struct(team, kernel) < 0)
810 		return NULL;
811 
812 	// publish dead/stopped/continued children condition vars
813 	team->dead_children->condition_variable.Init(team->dead_children,
814 		"team children");
815 
816 	// keep all allocated structures
817 	jobControlEntryDeleter.Detach();
818 	continuedChildrenDeleter.Detach();
819 	stoppedChildrenDeleter.Detach();
820 	deadChildrenDeleter.Detach();
821 	teamDeleter.Detach();
822 
823 	return team;
824 }
825 
826 
827 static void
828 delete_team_struct(struct team *team)
829 {
830 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
831 			&team->dead_threads)) {
832 		free(threadDeathEntry);
833 	}
834 
835 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
836 		delete entry;
837 
838 	while (free_user_thread* entry = team->free_user_threads) {
839 		team->free_user_threads = entry->next;
840 		free(entry);
841 	}
842 
843 	malloc_referenced_release(team->supplementary_groups);
844 
845 	delete team->job_control_entry;
846 		// usually already NULL and transferred to the parent
847 	delete team->continued_children;
848 	delete team->stopped_children;
849 	delete team->dead_children;
850 	free(team);
851 }
852 
853 
854 static status_t
855 create_team_user_data(struct team* team)
856 {
857 	void* address = (void*)KERNEL_USER_DATA_BASE;
858 	size_t size = 4 * B_PAGE_SIZE;
859 	team->user_data_area = create_area_etc(team->id, "user area", &address,
860 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
861 	if (team->user_data_area < 0)
862 		return team->user_data_area;
863 
864 	team->user_data = (addr_t)address;
865 	team->used_user_data = 0;
866 	team->user_data_size = size;
867 	team->free_user_threads = NULL;
868 
869 	return B_OK;
870 }
871 
872 
873 static void
874 delete_team_user_data(struct team* team)
875 {
876 	if (team->user_data_area >= 0) {
877 		vm_delete_area(team->id, team->user_data_area, true);
878 		team->user_data = 0;
879 		team->used_user_data = 0;
880 		team->user_data_size = 0;
881 		team->user_data_area = -1;
882 		while (free_user_thread* entry = team->free_user_threads) {
883 			team->free_user_threads = entry->next;
884 			free(entry);
885 		}
886 	}
887 }
888 
889 
890 static status_t
891 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
892 	int32 argCount, int32 envCount, char**& _flatArgs)
893 {
894 	if (argCount < 0 || envCount < 0)
895 		return B_BAD_VALUE;
896 
897 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
898 		return B_TOO_MANY_ARGS;
899 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
900 		return B_BAD_VALUE;
901 
902 	if (!IS_USER_ADDRESS(userFlatArgs))
903 		return B_BAD_ADDRESS;
904 
905 	// allocate kernel memory
906 	char** flatArgs = (char**)malloc(flatArgsSize);
907 	if (flatArgs == NULL)
908 		return B_NO_MEMORY;
909 
910 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
911 		free(flatArgs);
912 		return B_BAD_ADDRESS;
913 	}
914 
915 	// check and relocate the array
916 	status_t error = B_OK;
917 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
918 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
919 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
920 		if (i == argCount || i == argCount + envCount + 1) {
921 			// check array null termination
922 			if (flatArgs[i] != NULL) {
923 				error = B_BAD_VALUE;
924 				break;
925 			}
926 		} else {
927 			// check string
928 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
929 			size_t maxLen = stringEnd - arg;
930 			if (arg < stringBase || arg >= stringEnd
931 					|| strnlen(arg, maxLen) == maxLen) {
932 				error = B_BAD_VALUE;
933 				break;
934 			}
935 
936 			flatArgs[i] = arg;
937 		}
938 	}
939 
940 	if (error == B_OK)
941 		_flatArgs = flatArgs;
942 	else
943 		free(flatArgs);
944 
945 	return error;
946 }
947 
948 
949 static void
950 free_team_arg(struct team_arg *teamArg)
951 {
952 	if (teamArg != NULL) {
953 		free(teamArg->flat_args);
954 		free(teamArg->path);
955 		free(teamArg);
956 	}
957 }
958 
959 
960 static status_t
961 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
962 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
963 	uint32 token)
964 {
965 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
966 	if (teamArg == NULL)
967 		return B_NO_MEMORY;
968 
969 	teamArg->path = strdup(path);
970 	if (teamArg->path == NULL) {
971 		free(teamArg);
972 		return B_NO_MEMORY;
973 	}
974 
975 	// copy the args over
976 
977 	teamArg->flat_args = flatArgs;
978 	teamArg->flat_args_size = flatArgsSize;
979 	teamArg->arg_count = argCount;
980 	teamArg->env_count = envCount;
981 	teamArg->error_port = port;
982 	teamArg->error_token = token;
983 
984 	*_teamArg = teamArg;
985 	return B_OK;
986 }
987 
988 
989 static int32
990 team_create_thread_start(void *args)
991 {
992 	status_t err;
993 	struct thread *t;
994 	struct team *team;
995 	struct team_arg *teamArgs = (struct team_arg*)args;
996 	const char *path;
997 	addr_t entry;
998 	char ustack_name[128];
999 	uint32 sizeLeft;
1000 	char **userArgs;
1001 	char **userEnv;
1002 	struct user_space_program_args *programArgs;
1003 	uint32 argCount, envCount, i;
1004 
1005 	t = thread_get_current_thread();
1006 	team = t->team;
1007 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1008 
1009 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1010 
1011 	// get a user thread for the main thread
1012 	t->user_thread = team_allocate_user_thread(team);
1013 
1014 	// create an initial primary stack area
1015 
1016 	// Main stack area layout is currently as follows (starting from 0):
1017 	//
1018 	// size								| usage
1019 	// ---------------------------------+--------------------------------
1020 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1021 	// TLS_SIZE							| TLS data
1022 	// sizeof(user_space_program_args)	| argument structure for the runtime
1023 	//									| loader
1024 	// flat arguments size				| flat process arguments and environment
1025 
1026 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1027 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1028 
1029 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1030 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1031 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1032 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1033 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1034 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1035 		// the exact location at the end of the user stack area
1036 
1037 	sprintf(ustack_name, "%s_main_stack", team->name);
1038 	t->user_stack_area = create_area_etc(team->id, ustack_name,
1039 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
1040 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
1041 	if (t->user_stack_area < 0) {
1042 		dprintf("team_create_thread_start: could not create default user stack "
1043 			"region: %s\n", strerror(t->user_stack_area));
1044 
1045 		free_team_arg(teamArgs);
1046 		return t->user_stack_area;
1047 	}
1048 
1049 	// now that the TLS area is allocated, initialize TLS
1050 	arch_thread_init_tls(t);
1051 
1052 	argCount = teamArgs->arg_count;
1053 	envCount = teamArgs->env_count;
1054 
1055 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1056 		+ t->user_stack_size + TLS_SIZE);
1057 
1058 	userArgs = (char**)(programArgs + 1);
1059 	userEnv = userArgs + argCount + 1;
1060 	path = teamArgs->path;
1061 
1062 	if (user_strlcpy(programArgs->program_path, path,
1063 				sizeof(programArgs->program_path)) < B_OK
1064 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1065 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1066 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1067 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1068 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1069 				sizeof(port_id)) < B_OK
1070 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1071 				sizeof(uint32)) < B_OK
1072 		|| user_memcpy(userArgs, teamArgs->flat_args,
1073 				teamArgs->flat_args_size) < B_OK) {
1074 		// the team deletion process will clean this mess
1075 		return B_BAD_ADDRESS;
1076 	}
1077 
1078 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1079 
1080 	// add args to info member
1081 	team->args[0] = 0;
1082 	strlcpy(team->args, path, sizeof(team->args));
1083 	for (i = 1; i < argCount; i++) {
1084 		strlcat(team->args, " ", sizeof(team->args));
1085 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1086 	}
1087 
1088 	free_team_arg(teamArgs);
1089 		// the arguments are already on the user stack, we no longer need
1090 		// them in this form
1091 
1092 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1093 	// automatic variables with function scope will never be destroyed.
1094 	{
1095 		// find runtime_loader path
1096 		KPath runtimeLoaderPath;
1097 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1098 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1099 		if (err < B_OK) {
1100 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1101 				strerror(err)));
1102 			return err;
1103 		}
1104 		runtimeLoaderPath.UnlockBuffer();
1105 		err = runtimeLoaderPath.Append("runtime_loader");
1106 
1107 		if (err == B_OK)
1108 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1109 	}
1110 
1111 	if (err < B_OK) {
1112 		// Luckily, we don't have to clean up the mess we created - that's
1113 		// done for us by the normal team deletion process
1114 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1115 			"%s\n", strerror(err)));
1116 		return err;
1117 	}
1118 
1119 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1120 
1121 	team->state = TEAM_STATE_NORMAL;
1122 
1123 	// jump to the entry point in user space
1124 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1125 		// only returns in case of error
1126 }
1127 
1128 
1129 static thread_id
1130 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1131 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1132 	port_id errorPort, uint32 errorToken)
1133 {
1134 	char** flatArgs = _flatArgs;
1135 	struct team *team;
1136 	const char *threadName;
1137 	thread_id thread;
1138 	status_t status;
1139 	cpu_status state;
1140 	struct team_arg *teamArgs;
1141 	struct team_loading_info loadingInfo;
1142 	io_context* parentIOContext = NULL;
1143 
1144 	if (flatArgs == NULL || argCount == 0)
1145 		return B_BAD_VALUE;
1146 
1147 	const char* path = flatArgs[0];
1148 
1149 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1150 		path, flatArgs, argCount));
1151 
1152 	team = create_team_struct(path, false);
1153 	if (team == NULL)
1154 		return B_NO_MEMORY;
1155 
1156 	if (flags & B_WAIT_TILL_LOADED) {
1157 		loadingInfo.thread = thread_get_current_thread();
1158 		loadingInfo.result = B_ERROR;
1159 		loadingInfo.done = false;
1160 		team->loading_info = &loadingInfo;
1161 	}
1162 
1163  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1164 
1165 	// get the parent team
1166 	struct team* parent;
1167 
1168 	if (parentID == B_CURRENT_TEAM)
1169 		parent = thread_get_current_thread()->team;
1170 	else
1171 		parent = team_get_team_struct_locked(parentID);
1172 
1173 	if (parent == NULL) {
1174 		teamLocker.Unlock();
1175 		status = B_BAD_TEAM_ID;
1176 		goto err0;
1177 	}
1178 
1179 	// inherit the parent's user/group
1180 	inherit_parent_user_and_group_locked(team, parent);
1181 
1182 	hash_insert(sTeamHash, team);
1183 	insert_team_into_parent(parent, team);
1184 	insert_team_into_group(parent->group, team);
1185 	sUsedTeams++;
1186 
1187 	// get a reference to the parent's I/O context -- we need it to create ours
1188 	parentIOContext = parent->io_context;
1189 	vfs_get_io_context(parentIOContext);
1190 
1191 	teamLocker.Unlock();
1192 
1193 	// check the executable's set-user/group-id permission
1194 	update_set_id_user_and_group(team, path);
1195 
1196 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1197 		envCount, errorPort, errorToken);
1198 
1199 	if (status != B_OK)
1200 		goto err1;
1201 
1202 	_flatArgs = NULL;
1203 		// args are owned by the team_arg structure now
1204 
1205 	// create a new io_context for this team
1206 	team->io_context = vfs_new_io_context(parentIOContext, true);
1207 	if (!team->io_context) {
1208 		status = B_NO_MEMORY;
1209 		goto err2;
1210 	}
1211 
1212 	// We don't need the parent's I/O context any longer.
1213 	vfs_put_io_context(parentIOContext);
1214 	parentIOContext = NULL;
1215 
1216 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1217 	vfs_exec_io_context(team->io_context);
1218 
1219 	// create an address space for this team
1220 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1221 		&team->address_space);
1222 	if (status < B_OK)
1223 		goto err3;
1224 
1225 	// cut the path from the main thread name
1226 	threadName = strrchr(path, '/');
1227 	if (threadName != NULL)
1228 		threadName++;
1229 	else
1230 		threadName = path;
1231 
1232 	// create the user data area
1233 	status = create_team_user_data(team);
1234 	if (status != B_OK)
1235 		goto err4;
1236 
1237 	// notify team listeners
1238 	sNotificationService.Notify(TEAM_ADDED, team);
1239 
1240 	// Create a kernel thread, but under the context of the new team
1241 	// The new thread will take over ownership of teamArgs
1242 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1243 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1244 	if (thread < 0) {
1245 		status = thread;
1246 		goto err5;
1247 	}
1248 
1249 	// wait for the loader of the new team to finish its work
1250 	if (flags & B_WAIT_TILL_LOADED) {
1251 		struct thread *mainThread;
1252 
1253 		state = disable_interrupts();
1254 		GRAB_THREAD_LOCK();
1255 
1256 		mainThread = thread_get_thread_struct_locked(thread);
1257 		if (mainThread) {
1258 			// resume the team's main thread
1259 			if (mainThread->state == B_THREAD_SUSPENDED)
1260 				scheduler_enqueue_in_run_queue(mainThread);
1261 
1262 			// Now suspend ourselves until loading is finished.
1263 			// We will be woken either by the thread, when it finished or
1264 			// aborted loading, or when the team is going to die (e.g. is
1265 			// killed). In either case the one setting `loadingInfo.done' is
1266 			// responsible for removing the info from the team structure.
1267 			while (!loadingInfo.done) {
1268 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1269 				scheduler_reschedule();
1270 			}
1271 		} else {
1272 			// Impressive! Someone managed to kill the thread in this short
1273 			// time.
1274 		}
1275 
1276 		RELEASE_THREAD_LOCK();
1277 		restore_interrupts(state);
1278 
1279 		if (loadingInfo.result < B_OK)
1280 			return loadingInfo.result;
1281 	}
1282 
1283 	// notify the debugger
1284 	user_debug_team_created(team->id);
1285 
1286 	return thread;
1287 
1288 err5:
1289 	sNotificationService.Notify(TEAM_REMOVED, team);
1290 	delete_team_user_data(team);
1291 err4:
1292 	vm_put_address_space(team->address_space);
1293 err3:
1294 	vfs_put_io_context(team->io_context);
1295 err2:
1296 	free_team_arg(teamArgs);
1297 err1:
1298 	if (parentIOContext != NULL)
1299 		vfs_put_io_context(parentIOContext);
1300 
1301 	// remove the team structure from the team hash table and delete the team structure
1302 	state = disable_interrupts();
1303 	GRAB_TEAM_LOCK();
1304 
1305 	remove_team_from_group(team);
1306 	remove_team_from_parent(team->parent, team);
1307 	hash_remove(sTeamHash, team);
1308 
1309 	RELEASE_TEAM_LOCK();
1310 	restore_interrupts(state);
1311 
1312 err0:
1313 	delete_team_struct(team);
1314 
1315 	return status;
1316 }
1317 
1318 
1319 /*!	Almost shuts down the current team and loads a new image into it.
1320 	If successful, this function does not return and will takeover ownership of
1321 	the arguments provided.
1322 	This function may only be called from user space.
1323 */
1324 static status_t
1325 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1326 	int32 argCount, int32 envCount)
1327 {
1328 	// NOTE: Since this function normally doesn't return, don't use automatic
1329 	// variables that need destruction in the function scope.
1330 	char** flatArgs = _flatArgs;
1331 	struct team *team = thread_get_current_thread()->team;
1332 	struct team_arg *teamArgs;
1333 	const char *threadName;
1334 	status_t status = B_OK;
1335 	cpu_status state;
1336 	struct thread *thread;
1337 	thread_id nubThreadID = -1;
1338 
1339 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1340 		path, argCount, envCount, team->id));
1341 
1342 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1343 
1344 	// switching the kernel at run time is probably not a good idea :)
1345 	if (team == team_get_kernel_team())
1346 		return B_NOT_ALLOWED;
1347 
1348 	// we currently need to be single threaded here
1349 	// ToDo: maybe we should just kill all other threads and
1350 	//	make the current thread the team's main thread?
1351 	if (team->main_thread != thread_get_current_thread())
1352 		return B_NOT_ALLOWED;
1353 
1354 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1355 	// We iterate through the thread list to make sure that there's no other
1356 	// thread.
1357 	state = disable_interrupts();
1358 	GRAB_TEAM_LOCK();
1359 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1360 
1361 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1362 		nubThreadID = team->debug_info.nub_thread;
1363 
1364 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1365 
1366 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1367 		if (thread != team->main_thread && thread->id != nubThreadID) {
1368 			status = B_NOT_ALLOWED;
1369 			break;
1370 		}
1371 	}
1372 
1373 	RELEASE_TEAM_LOCK();
1374 	restore_interrupts(state);
1375 
1376 	if (status != B_OK)
1377 		return status;
1378 
1379 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1380 		envCount, -1, 0);
1381 
1382 	if (status != B_OK)
1383 		return status;
1384 
1385 	_flatArgs = NULL;
1386 		// args are owned by the team_arg structure now
1387 
1388 	// ToDo: remove team resources if there are any left
1389 	// thread_atkernel_exit() might not be called at all
1390 
1391 	thread_reset_for_exec();
1392 
1393 	user_debug_prepare_for_exec();
1394 
1395 	delete_team_user_data(team);
1396 	vm_delete_areas(team->address_space);
1397 	xsi_sem_undo(team);
1398 	delete_owned_ports(team);
1399 	sem_delete_owned_sems(team);
1400 	remove_images(team);
1401 	vfs_exec_io_context(team->io_context);
1402 	delete_realtime_sem_context(team->realtime_sem_context);
1403 	team->realtime_sem_context = NULL;
1404 
1405 	status = create_team_user_data(team);
1406 	if (status != B_OK) {
1407 		// creating the user data failed -- we're toast
1408 		// TODO: We should better keep the old user area in the first place.
1409 		exit_thread(status);
1410 		return status;
1411 	}
1412 
1413 	user_debug_finish_after_exec();
1414 
1415 	// rename the team
1416 
1417 	set_team_name(team, path);
1418 
1419 	// cut the path from the team name and rename the main thread, too
1420 	threadName = strrchr(path, '/');
1421 	if (threadName != NULL)
1422 		threadName++;
1423 	else
1424 		threadName = path;
1425 	rename_thread(thread_get_current_thread_id(), threadName);
1426 
1427 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1428 
1429 	// Update user/group according to the executable's set-user/group-id
1430 	// permission.
1431 	update_set_id_user_and_group(team, path);
1432 
1433 	user_debug_team_exec();
1434 
1435 	// notify team listeners
1436 	sNotificationService.Notify(TEAM_EXEC, team);
1437 
1438 	status = team_create_thread_start(teamArgs);
1439 		// this one usually doesn't return...
1440 
1441 	// sorry, we have to kill us, there is no way out anymore
1442 	// (without any areas left and all that)
1443 	exit_thread(status);
1444 
1445 	// we return a status here since the signal that is sent by the
1446 	// call above is not immediately handled
1447 	return B_ERROR;
1448 }
1449 
1450 
1451 /*! This is the first function to be called from the newly created
1452 	main child thread.
1453 	It will fill in everything what's left to do from fork_arg, and
1454 	return from the parent's fork() syscall to the child.
1455 */
1456 static int32
1457 fork_team_thread_start(void *_args)
1458 {
1459 	struct thread *thread = thread_get_current_thread();
1460 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1461 
1462 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1463 		// we need a local copy of the arch dependent part
1464 
1465 	thread->user_stack_area = forkArgs->user_stack_area;
1466 	thread->user_stack_base = forkArgs->user_stack_base;
1467 	thread->user_stack_size = forkArgs->user_stack_size;
1468 	thread->user_local_storage = forkArgs->user_local_storage;
1469 	thread->sig_block_mask = forkArgs->sig_block_mask;
1470 	thread->user_thread = forkArgs->user_thread;
1471 	memcpy(thread->sig_action, forkArgs->sig_action,
1472 		sizeof(forkArgs->sig_action));
1473 	thread->signal_stack_base = forkArgs->signal_stack_base;
1474 	thread->signal_stack_size = forkArgs->signal_stack_size;
1475 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1476 
1477 	arch_thread_init_tls(thread);
1478 
1479 	free(forkArgs);
1480 
1481 	// set frame of the parent thread to this one, too
1482 
1483 	arch_restore_fork_frame(&archArgs);
1484 		// This one won't return here
1485 
1486 	return 0;
1487 }
1488 
1489 
1490 static thread_id
1491 fork_team(void)
1492 {
1493 	struct thread *parentThread = thread_get_current_thread();
1494 	struct team *parentTeam = parentThread->team, *team;
1495 	struct fork_arg *forkArgs;
1496 	struct area_info info;
1497 	thread_id threadID;
1498 	status_t status;
1499 	int32 cookie;
1500 
1501 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1502 
1503 	if (parentTeam == team_get_kernel_team())
1504 		return B_NOT_ALLOWED;
1505 
1506 	// create a new team
1507 	// TODO: this is very similar to load_image_internal() - maybe we can do
1508 	// something about it :)
1509 
1510 	team = create_team_struct(parentTeam->name, false);
1511 	if (team == NULL)
1512 		return B_NO_MEMORY;
1513 
1514 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1515 
1516 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1517 
1518 	// Inherit the parent's user/group.
1519 	inherit_parent_user_and_group_locked(team, parentTeam);
1520 
1521 	hash_insert(sTeamHash, team);
1522 	insert_team_into_parent(parentTeam, team);
1523 	insert_team_into_group(parentTeam->group, team);
1524 	sUsedTeams++;
1525 
1526 	teamLocker.Unlock();
1527 
1528 	// inherit some team debug flags
1529 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1530 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1531 
1532 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1533 	if (forkArgs == NULL) {
1534 		status = B_NO_MEMORY;
1535 		goto err1;
1536 	}
1537 
1538 	// create a new io_context for this team
1539 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
1540 	if (!team->io_context) {
1541 		status = B_NO_MEMORY;
1542 		goto err2;
1543 	}
1544 
1545 	// duplicate the realtime sem context
1546 	if (parentTeam->realtime_sem_context) {
1547 		team->realtime_sem_context = clone_realtime_sem_context(
1548 			parentTeam->realtime_sem_context);
1549 		if (team->realtime_sem_context == NULL) {
1550 			status = B_NO_MEMORY;
1551 			goto err25;
1552 		}
1553 	}
1554 
1555 	// create an address space for this team
1556 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1557 		&team->address_space);
1558 	if (status < B_OK)
1559 		goto err3;
1560 
1561 	// copy all areas of the team
1562 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1563 	// ToDo: all stacks of other threads than the current one could be left out
1564 
1565 	forkArgs->user_thread = NULL;
1566 
1567 	cookie = 0;
1568 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1569 		if (info.area == parentTeam->user_data_area) {
1570 			// don't clone the user area; just create a new one
1571 			status = create_team_user_data(team);
1572 			if (status != B_OK)
1573 				break;
1574 
1575 			forkArgs->user_thread = team_allocate_user_thread(team);
1576 		} else {
1577 			void *address;
1578 			area_id area = vm_copy_area(team->address_space->id, info.name,
1579 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1580 			if (area < B_OK) {
1581 				status = area;
1582 				break;
1583 			}
1584 
1585 			if (info.area == parentThread->user_stack_area)
1586 				forkArgs->user_stack_area = area;
1587 		}
1588 	}
1589 
1590 	if (status < B_OK)
1591 		goto err4;
1592 
1593 	if (forkArgs->user_thread == NULL) {
1594 #if KDEBUG
1595 		panic("user data area not found, parent area is %ld",
1596 			parentTeam->user_data_area);
1597 #endif
1598 		status = B_ERROR;
1599 		goto err4;
1600 	}
1601 
1602 	forkArgs->user_stack_base = parentThread->user_stack_base;
1603 	forkArgs->user_stack_size = parentThread->user_stack_size;
1604 	forkArgs->user_local_storage = parentThread->user_local_storage;
1605 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1606 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1607 		sizeof(forkArgs->sig_action));
1608 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1609 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1610 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1611 
1612 	arch_store_fork_frame(&forkArgs->arch_info);
1613 
1614 	// copy image list
1615 	image_info imageInfo;
1616 	cookie = 0;
1617 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1618 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1619 		if (image < 0)
1620 			goto err5;
1621 	}
1622 
1623 	// notify team listeners
1624 	sNotificationService.Notify(TEAM_ADDED, team);
1625 
1626 	// create a kernel thread under the context of the new team
1627 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1628 		parentThread->name, parentThread->priority, forkArgs,
1629 		team->id, team->id);
1630 	if (threadID < 0) {
1631 		status = threadID;
1632 		goto err5;
1633 	}
1634 
1635 	// notify the debugger
1636 	user_debug_team_created(team->id);
1637 
1638 	T(TeamForked(threadID));
1639 
1640 	resume_thread(threadID);
1641 	return threadID;
1642 
1643 err5:
1644 	sNotificationService.Notify(TEAM_REMOVED, team);
1645 	remove_images(team);
1646 err4:
1647 	vm_delete_address_space(team->address_space);
1648 err3:
1649 	delete_realtime_sem_context(team->realtime_sem_context);
1650 err25:
1651 	vfs_put_io_context(team->io_context);
1652 err2:
1653 	free(forkArgs);
1654 err1:
1655 	// remove the team structure from the team hash table and delete the team structure
1656 	teamLocker.Lock();
1657 
1658 	remove_team_from_group(team);
1659 	remove_team_from_parent(parentTeam, team);
1660 	hash_remove(sTeamHash, team);
1661 
1662 	teamLocker.Unlock();
1663 
1664 	delete_team_struct(team);
1665 
1666 	return status;
1667 }
1668 
1669 
1670 /*!	Returns if the specified \a team has any children belonging to the
1671 	specified \a group.
1672 	Must be called with the team lock held.
1673 */
1674 static bool
1675 has_children_in_group(struct team *parent, pid_t groupID)
1676 {
1677 	struct team *team;
1678 
1679 	struct process_group *group = team_get_process_group_locked(
1680 		parent->group->session, groupID);
1681 	if (group == NULL)
1682 		return false;
1683 
1684 	for (team = group->teams; team; team = team->group_next) {
1685 		if (team->parent == parent)
1686 			return true;
1687 	}
1688 
1689 	return false;
1690 }
1691 
1692 
1693 static job_control_entry*
1694 get_job_control_entry(team_job_control_children* children, pid_t id)
1695 {
1696 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1697 		 job_control_entry* entry = it.Next();) {
1698 
1699 		if (id > 0) {
1700 			if (entry->thread == id)
1701 				return entry;
1702 		} else if (id == -1) {
1703 			return entry;
1704 		} else {
1705 			pid_t processGroup
1706 				= (entry->team ? entry->team->group_id : entry->group_id);
1707 			if (processGroup == -id)
1708 				return entry;
1709 		}
1710 	}
1711 
1712 	return NULL;
1713 }
1714 
1715 
1716 static job_control_entry*
1717 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1718 {
1719 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1720 
1721 	if (entry == NULL && (flags & WCONTINUED) != 0)
1722 		entry = get_job_control_entry(team->continued_children, id);
1723 
1724 	if (entry == NULL && (flags & WUNTRACED) != 0)
1725 		entry = get_job_control_entry(team->stopped_children, id);
1726 
1727 	return entry;
1728 }
1729 
1730 
1731 job_control_entry::job_control_entry()
1732 	:
1733 	has_group_ref(false)
1734 {
1735 }
1736 
1737 
1738 job_control_entry::~job_control_entry()
1739 {
1740 	if (has_group_ref) {
1741 		InterruptsSpinLocker locker(gTeamSpinlock);
1742 		release_process_group_ref(group_id);
1743 	}
1744 }
1745 
1746 
1747 /*!	Team and thread lock must be held.
1748 */
1749 void
1750 job_control_entry::InitDeadState()
1751 {
1752 	if (team != NULL) {
1753 		struct thread* thread = team->main_thread;
1754 		group_id = team->group_id;
1755 		this->thread = thread->id;
1756 		status = thread->exit.status;
1757 		reason = thread->exit.reason;
1758 		signal = thread->exit.signal;
1759 		team = NULL;
1760 		acquire_process_group_ref(group_id);
1761 		has_group_ref = true;
1762 	}
1763 }
1764 
1765 
1766 job_control_entry&
1767 job_control_entry::operator=(const job_control_entry& other)
1768 {
1769 	state = other.state;
1770 	thread = other.thread;
1771 	has_group_ref = false;
1772 	team = other.team;
1773 	group_id = other.group_id;
1774 	status = other.status;
1775 	reason = other.reason;
1776 	signal = other.signal;
1777 
1778 	return *this;
1779 }
1780 
1781 
1782 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1783 	comes to the reason why a thread has died than waitpid() can be.
1784 */
1785 static thread_id
1786 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1787 	status_t *_returnCode)
1788 {
1789 	struct thread* thread = thread_get_current_thread();
1790 	struct team* team = thread->team;
1791 	struct job_control_entry foundEntry;
1792 	struct job_control_entry* freeDeathEntry = NULL;
1793 	status_t status = B_OK;
1794 
1795 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1796 
1797 	T(WaitForChild(child, flags));
1798 
1799 	if (child == 0) {
1800 		// wait for all children in the process group of the calling team
1801 		child = -team->group_id;
1802 	}
1803 
1804 	bool ignoreFoundEntries = false;
1805 	bool ignoreFoundEntriesChecked = false;
1806 
1807 	while (true) {
1808 		InterruptsSpinLocker locker(gTeamSpinlock);
1809 
1810 		// check whether any condition holds
1811 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1812 
1813 		// If we don't have an entry yet, check whether there are any children
1814 		// complying to the process group specification at all.
1815 		if (entry == NULL) {
1816 			// No success yet -- check whether there are any children we could
1817 			// wait for.
1818 			bool childrenExist = false;
1819 			if (child == -1) {
1820 				childrenExist = team->children != NULL;
1821 			} else if (child < -1) {
1822 				childrenExist = has_children_in_group(team, -child);
1823 			} else {
1824 				if (struct team* childTeam = team_get_team_struct_locked(child))
1825 					childrenExist = childTeam->parent == team;
1826 			}
1827 
1828 			if (!childrenExist) {
1829 				// there is no child we could wait for
1830 				status = ECHILD;
1831 			} else {
1832 				// the children we're waiting for are still running
1833 				status = B_WOULD_BLOCK;
1834 			}
1835 		} else {
1836 			// got something
1837 			foundEntry = *entry;
1838 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1839 				// The child is dead. Reap its death entry.
1840 				freeDeathEntry = entry;
1841 				team->dead_children->entries.Remove(entry);
1842 				team->dead_children->count--;
1843 			} else {
1844 				// The child is well. Reset its job control state.
1845 				team_set_job_control_state(entry->team,
1846 					JOB_CONTROL_STATE_NONE, 0, false);
1847 			}
1848 		}
1849 
1850 		// If we haven't got anything yet, prepare for waiting for the
1851 		// condition variable.
1852 		ConditionVariableEntry deadWaitEntry;
1853 
1854 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1855 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1856 
1857 		locker.Unlock();
1858 
1859 		// we got our entry and can return to our caller
1860 		if (status == B_OK) {
1861 			if (ignoreFoundEntries) {
1862 				// ... unless we shall ignore found entries
1863 				delete freeDeathEntry;
1864 				freeDeathEntry = NULL;
1865 				continue;
1866 			}
1867 
1868 			break;
1869 		}
1870 
1871 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1872 			T(WaitForChildDone(status));
1873 			return status;
1874 		}
1875 
1876 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1877 		if (status == B_INTERRUPTED) {
1878 			T(WaitForChildDone(status));
1879 			return status;
1880 		}
1881 
1882 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1883 		// all our children are dead and fail with ECHILD. We check the
1884 		// condition at this point.
1885 		if (!ignoreFoundEntriesChecked) {
1886 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1887 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1888 				|| handler.sa_handler == SIG_IGN) {
1889 				ignoreFoundEntries = true;
1890 			}
1891 
1892 			ignoreFoundEntriesChecked = true;
1893 		}
1894 	}
1895 
1896 	delete freeDeathEntry;
1897 
1898 	// when we got here, we have a valid death entry, and
1899 	// already got unregistered from the team or group
1900 	int reason = 0;
1901 	switch (foundEntry.state) {
1902 		case JOB_CONTROL_STATE_DEAD:
1903 			reason = foundEntry.reason;
1904 			break;
1905 		case JOB_CONTROL_STATE_STOPPED:
1906 			reason = THREAD_STOPPED;
1907 			break;
1908 		case JOB_CONTROL_STATE_CONTINUED:
1909 			reason = THREAD_CONTINUED;
1910 			break;
1911 		case JOB_CONTROL_STATE_NONE:
1912 			// can't happen
1913 			break;
1914 	}
1915 
1916 	*_returnCode = foundEntry.status;
1917 	*_reason = (foundEntry.signal << 16) | reason;
1918 
1919 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1920 	// status is available.
1921 	if (is_signal_blocked(SIGCHLD)) {
1922 		InterruptsSpinLocker locker(gTeamSpinlock);
1923 
1924 		if (get_job_control_entry(team, child, flags) == NULL)
1925 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1926 	}
1927 
1928 	// When the team is dead, the main thread continues to live in the kernel
1929 	// team for a very short time. To avoid surprises for the caller we rather
1930 	// wait until the thread is really gone.
1931 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1932 		wait_for_thread(foundEntry.thread, NULL);
1933 
1934 	T(WaitForChildDone(foundEntry));
1935 
1936 	return foundEntry.thread;
1937 }
1938 
1939 
1940 /*! Fills the team_info structure with information from the specified
1941 	team.
1942 	The team lock must be held when called.
1943 */
1944 static status_t
1945 fill_team_info(struct team *team, team_info *info, size_t size)
1946 {
1947 	if (size != sizeof(team_info))
1948 		return B_BAD_VALUE;
1949 
1950 	// ToDo: Set more informations for team_info
1951 	memset(info, 0, size);
1952 
1953 	info->team = team->id;
1954 	info->thread_count = team->num_threads;
1955 	info->image_count = count_images(team);
1956 	//info->area_count =
1957 	info->debugger_nub_thread = team->debug_info.nub_thread;
1958 	info->debugger_nub_port = team->debug_info.nub_port;
1959 	//info->uid =
1960 	//info->gid =
1961 
1962 	strlcpy(info->args, team->args, sizeof(info->args));
1963 	info->argc = 1;
1964 
1965 	return B_OK;
1966 }
1967 
1968 
1969 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1970 	Interrupts must be disabled and team lock be held.
1971 */
1972 static bool
1973 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1974 {
1975 	// Orphaned Process Group: "A process group in which the parent of every
1976 	// member is either itself a member of the group or is not a member of the
1977 	// group's session." (Open Group Base Specs Issue 6)
1978 
1979 	// once orphaned, things won't change (exception: cf. setpgid())
1980 	if (group->orphaned)
1981 		return true;
1982 
1983 	struct team* team = group->teams;
1984 	while (team != NULL) {
1985 		struct team* parent = team->parent;
1986 		if (team->id != dyingProcess && parent != NULL
1987 			&& parent->id != dyingProcess
1988 			&& parent->group_id != group->id
1989 			&& parent->session_id == group->session->id) {
1990 			return false;
1991 		}
1992 
1993 		team = team->group_next;
1994 	}
1995 
1996 	group->orphaned = true;
1997 	return true;
1998 }
1999 
2000 
2001 /*!	Returns whether the process group contains stopped processes.
2002 	Interrupts must be disabled and team lock be held.
2003 */
2004 static bool
2005 process_group_has_stopped_processes(process_group* group)
2006 {
2007 	SpinLocker _(gThreadSpinlock);
2008 
2009 	struct team* team = group->teams;
2010 	while (team != NULL) {
2011 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2012 			return true;
2013 
2014 		team = team->group_next;
2015 	}
2016 
2017 	return false;
2018 }
2019 
2020 
2021 //	#pragma mark - Private kernel API
2022 
2023 
2024 status_t
2025 team_init(kernel_args *args)
2026 {
2027 	struct process_session *session;
2028 	struct process_group *group;
2029 
2030 	// create the team hash table
2031 	sTeamHash = hash_init(16, offsetof(struct team, next),
2032 		&team_struct_compare, &team_struct_hash);
2033 
2034 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2035 		&process_group_compare, &process_group_hash);
2036 
2037 	// create initial session and process groups
2038 
2039 	session = create_process_session(1);
2040 	if (session == NULL)
2041 		panic("Could not create initial session.\n");
2042 
2043 	group = create_process_group(1);
2044 	if (group == NULL)
2045 		panic("Could not create initial process group.\n");
2046 
2047 	insert_group_into_session(session, group);
2048 
2049 	// create the kernel team
2050 	sKernelTeam = create_team_struct("kernel_team", true);
2051 	if (sKernelTeam == NULL)
2052 		panic("could not create kernel team!\n");
2053 	strcpy(sKernelTeam->args, sKernelTeam->name);
2054 	sKernelTeam->state = TEAM_STATE_NORMAL;
2055 
2056 	sKernelTeam->saved_set_uid = 0;
2057 	sKernelTeam->real_uid = 0;
2058 	sKernelTeam->effective_uid = 0;
2059 	sKernelTeam->saved_set_gid = 0;
2060 	sKernelTeam->real_gid = 0;
2061 	sKernelTeam->effective_gid = 0;
2062 	sKernelTeam->supplementary_groups = NULL;
2063 	sKernelTeam->supplementary_group_count = 0;
2064 
2065 	insert_team_into_group(group, sKernelTeam);
2066 
2067 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2068 	if (sKernelTeam->io_context == NULL)
2069 		panic("could not create io_context for kernel team!\n");
2070 
2071 	// stick it in the team hash
2072 	hash_insert(sTeamHash, sKernelTeam);
2073 
2074 	add_debugger_command_etc("team", &dump_team_info,
2075 		"Dump info about a particular team",
2076 		"[ <id> | <address> | <name> ]\n"
2077 		"Prints information about the specified team. If no argument is given\n"
2078 		"the current team is selected.\n"
2079 		"  <id>       - The ID of the team.\n"
2080 		"  <address>  - The address of the team structure.\n"
2081 		"  <name>     - The team's name.\n", 0);
2082 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2083 		"\n"
2084 		"Prints a list of all existing teams.\n", 0);
2085 
2086 	new(&sNotificationService) TeamNotificationService();
2087 
2088 	return B_OK;
2089 }
2090 
2091 
2092 int32
2093 team_max_teams(void)
2094 {
2095 	return sMaxTeams;
2096 }
2097 
2098 
2099 int32
2100 team_used_teams(void)
2101 {
2102 	return sUsedTeams;
2103 }
2104 
2105 
2106 /*!	Iterates through the list of teams. The team spinlock must be held.
2107  */
2108 struct team*
2109 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2110 {
2111 	struct hash_iterator iterator;
2112 	hash_open(sTeamHash, &iterator);
2113 
2114 	struct team* team;
2115 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2116 		if (callback(team, cookie))
2117 			break;
2118 	}
2119 
2120 	hash_close(sTeamHash, &iterator, false);
2121 
2122 	return team;
2123 }
2124 
2125 
2126 /*! Fills the provided death entry if it's in the team.
2127 	You need to have the team lock held when calling this function.
2128 */
2129 job_control_entry*
2130 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2131 {
2132 	if (child <= 0)
2133 		return NULL;
2134 
2135 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2136 		child);
2137 	if (entry) {
2138 		// remove the entry only, if the caller is the parent of the found team
2139 		if (team_get_current_team_id() == entry->thread) {
2140 			team->dead_children->entries.Remove(entry);
2141 			team->dead_children->count--;
2142 			*_deleteEntry = true;
2143 		} else {
2144 			*_deleteEntry = false;
2145 		}
2146 	}
2147 
2148 	return entry;
2149 }
2150 
2151 
2152 /*! Quick check to see if we have a valid team ID. */
2153 bool
2154 team_is_valid(team_id id)
2155 {
2156 	struct team *team;
2157 	cpu_status state;
2158 
2159 	if (id <= 0)
2160 		return false;
2161 
2162 	state = disable_interrupts();
2163 	GRAB_TEAM_LOCK();
2164 
2165 	team = team_get_team_struct_locked(id);
2166 
2167 	RELEASE_TEAM_LOCK();
2168 	restore_interrupts(state);
2169 
2170 	return team != NULL;
2171 }
2172 
2173 
2174 struct team *
2175 team_get_team_struct_locked(team_id id)
2176 {
2177 	struct team_key key;
2178 	key.id = id;
2179 
2180 	return (struct team*)hash_lookup(sTeamHash, &key);
2181 }
2182 
2183 
2184 /*! This searches the session of the team for the specified group ID.
2185 	You must hold the team lock when you call this function.
2186 */
2187 struct process_group *
2188 team_get_process_group_locked(struct process_session *session, pid_t id)
2189 {
2190 	struct process_group *group;
2191 	struct team_key key;
2192 	key.id = id;
2193 
2194 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2195 	if (group != NULL && (session == NULL || session == group->session))
2196 		return group;
2197 
2198 	return NULL;
2199 }
2200 
2201 
2202 void
2203 team_delete_process_group(struct process_group *group)
2204 {
2205 	if (group == NULL)
2206 		return;
2207 
2208 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2209 
2210 	// remove_group_from_session() keeps this pointer around
2211 	// only if the session can be freed as well
2212 	if (group->session) {
2213 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2214 		free(group->session);
2215 	}
2216 
2217 	free(group);
2218 }
2219 
2220 
2221 void
2222 team_set_controlling_tty(int32 ttyIndex)
2223 {
2224 	struct team* team = thread_get_current_thread()->team;
2225 
2226 	InterruptsSpinLocker _(gTeamSpinlock);
2227 
2228 	team->group->session->controlling_tty = ttyIndex;
2229 	team->group->session->foreground_group = -1;
2230 }
2231 
2232 
2233 int32
2234 team_get_controlling_tty()
2235 {
2236 	struct team* team = thread_get_current_thread()->team;
2237 
2238 	InterruptsSpinLocker _(gTeamSpinlock);
2239 
2240 	return team->group->session->controlling_tty;
2241 }
2242 
2243 
2244 status_t
2245 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2246 {
2247 	struct thread* thread = thread_get_current_thread();
2248 	struct team* team = thread->team;
2249 
2250 	InterruptsSpinLocker locker(gTeamSpinlock);
2251 
2252 	process_session* session = team->group->session;
2253 
2254 	// must be the controlling tty of the calling process
2255 	if (session->controlling_tty != ttyIndex)
2256 		return ENOTTY;
2257 
2258 	// check process group -- must belong to our session
2259 	process_group* group = team_get_process_group_locked(session,
2260 		processGroupID);
2261 	if (group == NULL)
2262 		return B_BAD_VALUE;
2263 
2264 	// If we are a background group, we can't do that unharmed, only if we
2265 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2266 	if (session->foreground_group != -1
2267 		&& session->foreground_group != team->group_id
2268 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2269 		&& !is_signal_blocked(SIGTTOU)) {
2270 		pid_t groupID = team->group->id;
2271 		locker.Unlock();
2272 		send_signal(-groupID, SIGTTOU);
2273 		return B_INTERRUPTED;
2274 	}
2275 
2276 	team->group->session->foreground_group = processGroupID;
2277 
2278 	return B_OK;
2279 }
2280 
2281 
2282 /*!	Removes the specified team from the global team hash, and from its parent.
2283 	It also moves all of its children up to the parent.
2284 	You must hold the team lock when you call this function.
2285 */
2286 void
2287 team_remove_team(struct team *team)
2288 {
2289 	struct team *parent = team->parent;
2290 
2291 	// remember how long this team lasted
2292 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2293 		+ team->dead_children->kernel_time;
2294 	parent->dead_children->user_time += team->dead_threads_user_time
2295 		+ team->dead_children->user_time;
2296 
2297 	// Also grab the thread spinlock while removing the team from the hash.
2298 	// This makes the following sequence safe: grab teams lock, lookup team,
2299 	// grab threads lock, unlock teams lock,
2300 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2301 	// lock another team's IO context.
2302 	GRAB_THREAD_LOCK();
2303 	hash_remove(sTeamHash, team);
2304 	RELEASE_THREAD_LOCK();
2305 	sUsedTeams--;
2306 
2307 	team->state = TEAM_STATE_DEATH;
2308 
2309 	// If we're a controlling process (i.e. a session leader with controlling
2310 	// terminal), there's a bit of signalling we have to do.
2311 	if (team->session_id == team->id
2312 		&& team->group->session->controlling_tty >= 0) {
2313 		process_session* session = team->group->session;
2314 
2315 		session->controlling_tty = -1;
2316 
2317 		// send SIGHUP to the foreground
2318 		if (session->foreground_group >= 0) {
2319 			send_signal_etc(-session->foreground_group, SIGHUP,
2320 				SIGNAL_FLAG_TEAMS_LOCKED);
2321 		}
2322 
2323 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2324 		// stopped processes
2325 		struct team* child = team->children;
2326 		while (child != NULL) {
2327 			process_group* childGroup = child->group;
2328 			if (!childGroup->orphaned
2329 				&& update_orphaned_process_group(childGroup, team->id)
2330 				&& process_group_has_stopped_processes(childGroup)) {
2331 				send_signal_etc(-childGroup->id, SIGHUP,
2332 					SIGNAL_FLAG_TEAMS_LOCKED);
2333 				send_signal_etc(-childGroup->id, SIGCONT,
2334 					SIGNAL_FLAG_TEAMS_LOCKED);
2335 			}
2336 
2337 			child = child->siblings_next;
2338 		}
2339 	} else {
2340 		// update "orphaned" flags of all children's process groups
2341 		struct team* child = team->children;
2342 		while (child != NULL) {
2343 			process_group* childGroup = child->group;
2344 			if (!childGroup->orphaned)
2345 				update_orphaned_process_group(childGroup, team->id);
2346 
2347 			child = child->siblings_next;
2348 		}
2349 
2350 		// update "orphaned" flag of this team's process group
2351 		update_orphaned_process_group(team->group, team->id);
2352 	}
2353 
2354 	// reparent each of the team's children
2355 	reparent_children(team);
2356 
2357 	// remove us from our process group
2358 	remove_team_from_group(team);
2359 
2360 	// remove us from our parent
2361 	remove_team_from_parent(parent, team);
2362 }
2363 
2364 
2365 void
2366 team_delete_team(struct team *team)
2367 {
2368 	team_id teamID = team->id;
2369 	port_id debuggerPort = -1;
2370 	cpu_status state;
2371 
2372 	if (team->num_threads > 0) {
2373 		// there are other threads still in this team,
2374 		// cycle through and signal kill on each of the threads
2375 		// ToDo: this can be optimized. There's got to be a better solution.
2376 		struct thread *temp_thread;
2377 		char death_sem_name[B_OS_NAME_LENGTH];
2378 		sem_id deathSem;
2379 		int32 threadCount;
2380 
2381 		sprintf(death_sem_name, "team %ld death sem", teamID);
2382 		deathSem = create_sem(0, death_sem_name);
2383 		if (deathSem < 0)
2384 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2385 
2386 		state = disable_interrupts();
2387 		GRAB_TEAM_LOCK();
2388 
2389 		team->death_sem = deathSem;
2390 		threadCount = team->num_threads;
2391 
2392 		// If the team was being debugged, that will stop with the termination
2393 		// of the nub thread. The team structure has already been removed from
2394 		// the team hash table at this point, so noone can install a debugger
2395 		// anymore. We fetch the debugger's port to send it a message at the
2396 		// bitter end.
2397 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2398 
2399 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2400 			debuggerPort = team->debug_info.debugger_port;
2401 
2402 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2403 
2404 		// we can safely walk the list because of the lock. no new threads can be created
2405 		// because of the TEAM_STATE_DEATH flag on the team
2406 		temp_thread = team->thread_list;
2407 		while (temp_thread) {
2408 			struct thread *next = temp_thread->team_next;
2409 
2410 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2411 			temp_thread = next;
2412 		}
2413 
2414 		RELEASE_TEAM_LOCK();
2415 		restore_interrupts(state);
2416 
2417 		// wait until all threads in team are dead.
2418 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2419 		delete_sem(team->death_sem);
2420 	}
2421 
2422 	// If someone is waiting for this team to be loaded, but it dies
2423 	// unexpectedly before being done, we need to notify the waiting
2424 	// thread now.
2425 
2426 	state = disable_interrupts();
2427 	GRAB_TEAM_LOCK();
2428 
2429 	if (team->loading_info) {
2430 		// there's indeed someone waiting
2431 		struct team_loading_info *loadingInfo = team->loading_info;
2432 		team->loading_info = NULL;
2433 
2434 		loadingInfo->result = B_ERROR;
2435 		loadingInfo->done = true;
2436 
2437 		GRAB_THREAD_LOCK();
2438 
2439 		// wake up the waiting thread
2440 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2441 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2442 
2443 		RELEASE_THREAD_LOCK();
2444 	}
2445 
2446 	RELEASE_TEAM_LOCK();
2447 	restore_interrupts(state);
2448 
2449 	// notify team watchers
2450 
2451 	{
2452 		// we're not reachable from anyone anymore at this point, so we
2453 		// can safely access the list without any locking
2454 		struct team_watcher *watcher;
2455 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2456 				&team->watcher_list)) != NULL) {
2457 			watcher->hook(teamID, watcher->data);
2458 			free(watcher);
2459 		}
2460 	}
2461 
2462 	sNotificationService.Notify(TEAM_REMOVED, team);
2463 
2464 	// free team resources
2465 
2466 	vfs_put_io_context(team->io_context);
2467 	delete_realtime_sem_context(team->realtime_sem_context);
2468 	xsi_sem_undo(team);
2469 	delete_owned_ports(team);
2470 	sem_delete_owned_sems(team);
2471 	remove_images(team);
2472 	vm_delete_address_space(team->address_space);
2473 
2474 	delete_team_struct(team);
2475 
2476 	// notify the debugger, that the team is gone
2477 	user_debug_team_deleted(teamID, debuggerPort);
2478 }
2479 
2480 
2481 struct team *
2482 team_get_kernel_team(void)
2483 {
2484 	return sKernelTeam;
2485 }
2486 
2487 
2488 team_id
2489 team_get_kernel_team_id(void)
2490 {
2491 	if (!sKernelTeam)
2492 		return 0;
2493 
2494 	return sKernelTeam->id;
2495 }
2496 
2497 
2498 team_id
2499 team_get_current_team_id(void)
2500 {
2501 	return thread_get_current_thread()->team->id;
2502 }
2503 
2504 
2505 status_t
2506 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2507 {
2508 	cpu_status state;
2509 	struct team *team;
2510 	status_t status;
2511 
2512 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2513 	if (id == 1) {
2514 		// we're the kernel team, so we don't have to go through all
2515 		// the hassle (locking and hash lookup)
2516 		*_addressSpace = vm_get_kernel_address_space();
2517 		return B_OK;
2518 	}
2519 
2520 	state = disable_interrupts();
2521 	GRAB_TEAM_LOCK();
2522 
2523 	team = team_get_team_struct_locked(id);
2524 	if (team != NULL) {
2525 		atomic_add(&team->address_space->ref_count, 1);
2526 		*_addressSpace = team->address_space;
2527 		status = B_OK;
2528 	} else
2529 		status = B_BAD_VALUE;
2530 
2531 	RELEASE_TEAM_LOCK();
2532 	restore_interrupts(state);
2533 
2534 	return status;
2535 }
2536 
2537 
2538 /*!	Sets the team's job control state.
2539 	Interrupts must be disabled and the team lock be held.
2540 	\a threadsLocked indicates whether the thread lock is being held, too.
2541 */
2542 void
2543 team_set_job_control_state(struct team* team, job_control_state newState,
2544 	int signal, bool threadsLocked)
2545 {
2546 	if (team == NULL || team->job_control_entry == NULL)
2547 		return;
2548 
2549 	// don't touch anything, if the state stays the same or the team is already
2550 	// dead
2551 	job_control_entry* entry = team->job_control_entry;
2552 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2553 		return;
2554 
2555 	T(SetJobControlState(team->id, newState, signal));
2556 
2557 	// remove from the old list
2558 	switch (entry->state) {
2559 		case JOB_CONTROL_STATE_NONE:
2560 			// entry is in no list ATM
2561 			break;
2562 		case JOB_CONTROL_STATE_DEAD:
2563 			// can't get here
2564 			break;
2565 		case JOB_CONTROL_STATE_STOPPED:
2566 			team->parent->stopped_children->entries.Remove(entry);
2567 			break;
2568 		case JOB_CONTROL_STATE_CONTINUED:
2569 			team->parent->continued_children->entries.Remove(entry);
2570 			break;
2571 	}
2572 
2573 	entry->state = newState;
2574 	entry->signal = signal;
2575 
2576 	// add to new list
2577 	team_job_control_children* childList = NULL;
2578 	switch (entry->state) {
2579 		case JOB_CONTROL_STATE_NONE:
2580 			// entry doesn't get into any list
2581 			break;
2582 		case JOB_CONTROL_STATE_DEAD:
2583 			childList = team->parent->dead_children;
2584 			team->parent->dead_children->count++;
2585 			break;
2586 		case JOB_CONTROL_STATE_STOPPED:
2587 			childList = team->parent->stopped_children;
2588 			break;
2589 		case JOB_CONTROL_STATE_CONTINUED:
2590 			childList = team->parent->continued_children;
2591 			break;
2592 	}
2593 
2594 	if (childList != NULL) {
2595 		childList->entries.Add(entry);
2596 		team->parent->dead_children->condition_variable.NotifyAll(
2597 			threadsLocked);
2598 	}
2599 }
2600 
2601 
2602 /*! Adds a hook to the team that is called as soon as this
2603 	team goes away.
2604 	This call might get public in the future.
2605 */
2606 status_t
2607 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2608 {
2609 	struct team_watcher *watcher;
2610 	struct team *team;
2611 	cpu_status state;
2612 
2613 	if (hook == NULL || teamID < B_OK)
2614 		return B_BAD_VALUE;
2615 
2616 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2617 	if (watcher == NULL)
2618 		return B_NO_MEMORY;
2619 
2620 	watcher->hook = hook;
2621 	watcher->data = data;
2622 
2623 	// find team and add watcher
2624 
2625 	state = disable_interrupts();
2626 	GRAB_TEAM_LOCK();
2627 
2628 	team = team_get_team_struct_locked(teamID);
2629 	if (team != NULL)
2630 		list_add_item(&team->watcher_list, watcher);
2631 
2632 	RELEASE_TEAM_LOCK();
2633 	restore_interrupts(state);
2634 
2635 	if (team == NULL) {
2636 		free(watcher);
2637 		return B_BAD_TEAM_ID;
2638 	}
2639 
2640 	return B_OK;
2641 }
2642 
2643 
2644 status_t
2645 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2646 {
2647 	struct team_watcher *watcher = NULL;
2648 	struct team *team;
2649 	cpu_status state;
2650 
2651 	if (hook == NULL || teamID < B_OK)
2652 		return B_BAD_VALUE;
2653 
2654 	// find team and remove watcher (if present)
2655 
2656 	state = disable_interrupts();
2657 	GRAB_TEAM_LOCK();
2658 
2659 	team = team_get_team_struct_locked(teamID);
2660 	if (team != NULL) {
2661 		// search for watcher
2662 		while ((watcher = (struct team_watcher*)list_get_next_item(
2663 				&team->watcher_list, watcher)) != NULL) {
2664 			if (watcher->hook == hook && watcher->data == data) {
2665 				// got it!
2666 				list_remove_item(&team->watcher_list, watcher);
2667 				break;
2668 			}
2669 		}
2670 	}
2671 
2672 	RELEASE_TEAM_LOCK();
2673 	restore_interrupts(state);
2674 
2675 	if (watcher == NULL)
2676 		return B_ENTRY_NOT_FOUND;
2677 
2678 	free(watcher);
2679 	return B_OK;
2680 }
2681 
2682 
2683 /*!	The team lock must be held or the team must still be single threaded.
2684 */
2685 struct user_thread*
2686 team_allocate_user_thread(struct team* team)
2687 {
2688 	if (team->user_data == 0)
2689 		return NULL;
2690 
2691 	user_thread* thread = NULL;
2692 
2693 	// take an entry from the free list, if any
2694 	if (struct free_user_thread* entry = team->free_user_threads) {
2695 		thread = entry->thread;
2696 		team->free_user_threads = entry->next;
2697 		deferred_free(entry);
2698 		return thread;
2699 	} else {
2700 		// enough space left?
2701 		size_t needed = _ALIGN(sizeof(user_thread));
2702 		if (team->user_data_size - team->used_user_data < needed)
2703 			return NULL;
2704 		// TODO: This imposes a per team thread limit! We should resize the
2705 		// area, if necessary. That's problematic at this point, though, since
2706 		// we've got the team lock.
2707 
2708 		thread = (user_thread*)(team->user_data + team->used_user_data);
2709 		team->used_user_data += needed;
2710 	}
2711 
2712 	thread->defer_signals = 0;
2713 	thread->pending_signals = 0;
2714 	thread->wait_status = B_OK;
2715 
2716 	return thread;
2717 }
2718 
2719 
2720 /*!	The team lock must not be held. \a thread must be the current thread.
2721 */
2722 void
2723 team_free_user_thread(struct thread* thread)
2724 {
2725 	user_thread* userThread = thread->user_thread;
2726 	if (userThread == NULL)
2727 		return;
2728 
2729 	// create a free list entry
2730 	free_user_thread* entry
2731 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2732 	if (entry == NULL) {
2733 		// we have to leak the user thread :-/
2734 		return;
2735 	}
2736 
2737 	InterruptsSpinLocker _(gTeamSpinlock);
2738 
2739 	// detach from thread
2740 	SpinLocker threadLocker(gThreadSpinlock);
2741 	thread->user_thread = NULL;
2742 	threadLocker.Unlock();
2743 
2744 	entry->thread = userThread;
2745 	entry->next = thread->team->free_user_threads;
2746 	thread->team->free_user_threads = entry;
2747 }
2748 
2749 
2750 //	#pragma mark - Public kernel API
2751 
2752 
2753 thread_id
2754 load_image(int32 argCount, const char **args, const char **env)
2755 {
2756 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2757 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2758 }
2759 
2760 
2761 thread_id
2762 load_image_etc(int32 argCount, const char* const* args,
2763 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2764 {
2765 	// we need to flatten the args and environment
2766 
2767 	if (args == NULL)
2768 		return B_BAD_VALUE;
2769 
2770 	// determine total needed size
2771 	int32 argSize = 0;
2772 	for (int32 i = 0; i < argCount; i++)
2773 		argSize += strlen(args[i]) + 1;
2774 
2775 	int32 envCount = 0;
2776 	int32 envSize = 0;
2777 	while (env != NULL && env[envCount] != NULL)
2778 		envSize += strlen(env[envCount++]) + 1;
2779 
2780 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2781 	if (size > MAX_PROCESS_ARGS_SIZE)
2782 		return B_TOO_MANY_ARGS;
2783 
2784 	// allocate space
2785 	char** flatArgs = (char**)malloc(size);
2786 	if (flatArgs == NULL)
2787 		return B_NO_MEMORY;
2788 
2789 	char** slot = flatArgs;
2790 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2791 
2792 	// copy arguments and environment
2793 	for (int32 i = 0; i < argCount; i++) {
2794 		int32 argSize = strlen(args[i]) + 1;
2795 		memcpy(stringSpace, args[i], argSize);
2796 		*slot++ = stringSpace;
2797 		stringSpace += argSize;
2798 	}
2799 
2800 	*slot++ = NULL;
2801 
2802 	for (int32 i = 0; i < envCount; i++) {
2803 		int32 envSize = strlen(env[i]) + 1;
2804 		memcpy(stringSpace, env[i], envSize);
2805 		*slot++ = stringSpace;
2806 		stringSpace += envSize;
2807 	}
2808 
2809 	*slot++ = NULL;
2810 
2811 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2812 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2813 
2814 	free(flatArgs);
2815 		// load_image_internal() unset our variable if it took over ownership
2816 
2817 	return thread;
2818 }
2819 
2820 
2821 status_t
2822 wait_for_team(team_id id, status_t *_returnCode)
2823 {
2824 	struct team *team;
2825 	thread_id thread;
2826 	cpu_status state;
2827 
2828 	// find main thread and wait for that
2829 
2830 	state = disable_interrupts();
2831 	GRAB_TEAM_LOCK();
2832 
2833 	team = team_get_team_struct_locked(id);
2834 	if (team != NULL && team->main_thread != NULL)
2835 		thread = team->main_thread->id;
2836 	else
2837 		thread = B_BAD_THREAD_ID;
2838 
2839 	RELEASE_TEAM_LOCK();
2840 	restore_interrupts(state);
2841 
2842 	if (thread < 0)
2843 		return thread;
2844 
2845 	return wait_for_thread(thread, _returnCode);
2846 }
2847 
2848 
2849 status_t
2850 kill_team(team_id id)
2851 {
2852 	status_t status = B_OK;
2853 	thread_id threadID = -1;
2854 	struct team *team;
2855 	cpu_status state;
2856 
2857 	state = disable_interrupts();
2858 	GRAB_TEAM_LOCK();
2859 
2860 	team = team_get_team_struct_locked(id);
2861 	if (team != NULL) {
2862 		if (team != sKernelTeam) {
2863 			threadID = team->id;
2864 				// the team ID is the same as the ID of its main thread
2865 		} else
2866 			status = B_NOT_ALLOWED;
2867 	} else
2868 		status = B_BAD_THREAD_ID;
2869 
2870 	RELEASE_TEAM_LOCK();
2871 	restore_interrupts(state);
2872 
2873 	if (status < B_OK)
2874 		return status;
2875 
2876 	// just kill the main thread in the team. The cleanup code there will
2877 	// take care of the team
2878 	return kill_thread(threadID);
2879 }
2880 
2881 
2882 status_t
2883 _get_team_info(team_id id, team_info *info, size_t size)
2884 {
2885 	cpu_status state;
2886 	status_t status = B_OK;
2887 	struct team *team;
2888 
2889 	state = disable_interrupts();
2890 	GRAB_TEAM_LOCK();
2891 
2892 	if (id == B_CURRENT_TEAM)
2893 		team = thread_get_current_thread()->team;
2894 	else
2895 		team = team_get_team_struct_locked(id);
2896 
2897 	if (team == NULL) {
2898 		status = B_BAD_TEAM_ID;
2899 		goto err;
2900 	}
2901 
2902 	status = fill_team_info(team, info, size);
2903 
2904 err:
2905 	RELEASE_TEAM_LOCK();
2906 	restore_interrupts(state);
2907 
2908 	return status;
2909 }
2910 
2911 
2912 status_t
2913 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2914 {
2915 	status_t status = B_BAD_TEAM_ID;
2916 	struct team *team = NULL;
2917 	int32 slot = *cookie;
2918 	team_id lastTeamID;
2919 	cpu_status state;
2920 
2921 	if (slot < 1)
2922 		slot = 1;
2923 
2924 	state = disable_interrupts();
2925 	GRAB_TEAM_LOCK();
2926 
2927 	lastTeamID = peek_next_thread_id();
2928 	if (slot >= lastTeamID)
2929 		goto err;
2930 
2931 	// get next valid team
2932 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2933 		slot++;
2934 
2935 	if (team) {
2936 		status = fill_team_info(team, info, size);
2937 		*cookie = ++slot;
2938 	}
2939 
2940 err:
2941 	RELEASE_TEAM_LOCK();
2942 	restore_interrupts(state);
2943 
2944 	return status;
2945 }
2946 
2947 
2948 status_t
2949 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2950 {
2951 	bigtime_t kernelTime = 0, userTime = 0;
2952 	status_t status = B_OK;
2953 	struct team *team;
2954 	cpu_status state;
2955 
2956 	if (size != sizeof(team_usage_info)
2957 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2958 		return B_BAD_VALUE;
2959 
2960 	state = disable_interrupts();
2961 	GRAB_TEAM_LOCK();
2962 
2963 	if (id == B_CURRENT_TEAM)
2964 		team = thread_get_current_thread()->team;
2965 	else
2966 		team = team_get_team_struct_locked(id);
2967 
2968 	if (team == NULL) {
2969 		status = B_BAD_TEAM_ID;
2970 		goto out;
2971 	}
2972 
2973 	switch (who) {
2974 		case B_TEAM_USAGE_SELF:
2975 		{
2976 			struct thread *thread = team->thread_list;
2977 
2978 			for (; thread != NULL; thread = thread->team_next) {
2979 				kernelTime += thread->kernel_time;
2980 				userTime += thread->user_time;
2981 			}
2982 
2983 			kernelTime += team->dead_threads_kernel_time;
2984 			userTime += team->dead_threads_user_time;
2985 			break;
2986 		}
2987 
2988 		case B_TEAM_USAGE_CHILDREN:
2989 		{
2990 			struct team *child = team->children;
2991 			for (; child != NULL; child = child->siblings_next) {
2992 				struct thread *thread = team->thread_list;
2993 
2994 				for (; thread != NULL; thread = thread->team_next) {
2995 					kernelTime += thread->kernel_time;
2996 					userTime += thread->user_time;
2997 				}
2998 
2999 				kernelTime += child->dead_threads_kernel_time;
3000 				userTime += child->dead_threads_user_time;
3001 			}
3002 
3003 			kernelTime += team->dead_children->kernel_time;
3004 			userTime += team->dead_children->user_time;
3005 			break;
3006 		}
3007 	}
3008 
3009 out:
3010 	RELEASE_TEAM_LOCK();
3011 	restore_interrupts(state);
3012 
3013 	if (status == B_OK) {
3014 		info->kernel_time = kernelTime;
3015 		info->user_time = userTime;
3016 	}
3017 
3018 	return status;
3019 }
3020 
3021 
3022 pid_t
3023 getpid(void)
3024 {
3025 	return thread_get_current_thread()->team->id;
3026 }
3027 
3028 
3029 pid_t
3030 getppid(void)
3031 {
3032 	struct team *team = thread_get_current_thread()->team;
3033 	cpu_status state;
3034 	pid_t parent;
3035 
3036 	state = disable_interrupts();
3037 	GRAB_TEAM_LOCK();
3038 
3039 	parent = team->parent->id;
3040 
3041 	RELEASE_TEAM_LOCK();
3042 	restore_interrupts(state);
3043 
3044 	return parent;
3045 }
3046 
3047 
3048 pid_t
3049 getpgid(pid_t process)
3050 {
3051 	struct thread *thread;
3052 	pid_t result = -1;
3053 	cpu_status state;
3054 
3055 	if (process == 0)
3056 		process = thread_get_current_thread()->team->id;
3057 
3058 	state = disable_interrupts();
3059 	GRAB_THREAD_LOCK();
3060 
3061 	thread = thread_get_thread_struct_locked(process);
3062 	if (thread != NULL)
3063 		result = thread->team->group_id;
3064 
3065 	RELEASE_THREAD_LOCK();
3066 	restore_interrupts(state);
3067 
3068 	return thread != NULL ? result : B_BAD_VALUE;
3069 }
3070 
3071 
3072 pid_t
3073 getsid(pid_t process)
3074 {
3075 	struct thread *thread;
3076 	pid_t result = -1;
3077 	cpu_status state;
3078 
3079 	if (process == 0)
3080 		process = thread_get_current_thread()->team->id;
3081 
3082 	state = disable_interrupts();
3083 	GRAB_THREAD_LOCK();
3084 
3085 	thread = thread_get_thread_struct_locked(process);
3086 	if (thread != NULL)
3087 		result = thread->team->session_id;
3088 
3089 	RELEASE_THREAD_LOCK();
3090 	restore_interrupts(state);
3091 
3092 	return thread != NULL ? result : B_BAD_VALUE;
3093 }
3094 
3095 
3096 //	#pragma mark - User syscalls
3097 
3098 
3099 status_t
3100 _user_exec(const char *userPath, const char* const* userFlatArgs,
3101 	size_t flatArgsSize, int32 argCount, int32 envCount)
3102 {
3103 	// NOTE: Since this function normally doesn't return, don't use automatic
3104 	// variables that need destruction in the function scope.
3105 	char path[B_PATH_NAME_LENGTH];
3106 
3107 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3108 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3109 		return B_BAD_ADDRESS;
3110 
3111 	// copy and relocate the flat arguments
3112 	char** flatArgs;
3113 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3114 		argCount, envCount, flatArgs);
3115 
3116 	if (error == B_OK) {
3117 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3118 			envCount);
3119 			// this one only returns in case of error
3120 	}
3121 
3122 	free(flatArgs);
3123 	return error;
3124 }
3125 
3126 
3127 thread_id
3128 _user_fork(void)
3129 {
3130 	return fork_team();
3131 }
3132 
3133 
3134 thread_id
3135 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3136 {
3137 	status_t returnCode;
3138 	int32 reason;
3139 	thread_id deadChild;
3140 
3141 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3142 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3143 		return B_BAD_ADDRESS;
3144 
3145 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3146 
3147 	if (deadChild >= B_OK) {
3148 		// copy result data on successful completion
3149 		if ((_userReason != NULL
3150 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3151 			|| (_userReturnCode != NULL
3152 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3153 					< B_OK)) {
3154 			return B_BAD_ADDRESS;
3155 		}
3156 
3157 		return deadChild;
3158 	}
3159 
3160 	return syscall_restart_handle_post(deadChild);
3161 }
3162 
3163 
3164 pid_t
3165 _user_process_info(pid_t process, int32 which)
3166 {
3167 	// we only allow to return the parent of the current process
3168 	if (which == PARENT_ID
3169 		&& process != 0 && process != thread_get_current_thread()->team->id)
3170 		return B_BAD_VALUE;
3171 
3172 	switch (which) {
3173 		case SESSION_ID:
3174 			return getsid(process);
3175 		case GROUP_ID:
3176 			return getpgid(process);
3177 		case PARENT_ID:
3178 			return getppid();
3179 	}
3180 
3181 	return B_BAD_VALUE;
3182 }
3183 
3184 
3185 pid_t
3186 _user_setpgid(pid_t processID, pid_t groupID)
3187 {
3188 	struct thread *thread = thread_get_current_thread();
3189 	struct team *currentTeam = thread->team;
3190 	struct team *team;
3191 
3192 	if (groupID < 0)
3193 		return B_BAD_VALUE;
3194 
3195 	if (processID == 0)
3196 		processID = currentTeam->id;
3197 
3198 	// if the group ID is not specified, use the target process' ID
3199 	if (groupID == 0)
3200 		groupID = processID;
3201 
3202 	if (processID == currentTeam->id) {
3203 		// we set our own group
3204 
3205 		// we must not change our process group ID if we're a session leader
3206 		if (is_session_leader(currentTeam))
3207 			return B_NOT_ALLOWED;
3208 	} else {
3209 		// another team is the target of the call -- check it out
3210 		InterruptsSpinLocker _(gTeamSpinlock);
3211 
3212 		team = team_get_team_struct_locked(processID);
3213 		if (team == NULL)
3214 			return ESRCH;
3215 
3216 		// The team must be a child of the calling team and in the same session.
3217 		// (If that's the case it isn't a session leader either.)
3218 		if (team->parent != currentTeam
3219 			|| team->session_id != currentTeam->session_id) {
3220 			return B_NOT_ALLOWED;
3221 		}
3222 
3223 		if (team->group_id == groupID)
3224 			return groupID;
3225 
3226 		// The call is also supposed to fail on a child, when the child already
3227 		// has executed exec*() [EACCES].
3228 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3229 			return EACCES;
3230 	}
3231 
3232 	struct process_group *group = NULL;
3233 	if (groupID == processID) {
3234 		// A new process group might be needed.
3235 		group = create_process_group(groupID);
3236 		if (group == NULL)
3237 			return B_NO_MEMORY;
3238 
3239 		// Assume orphaned. We consider the situation of the team's parent
3240 		// below.
3241 		group->orphaned = true;
3242 	}
3243 
3244 	status_t status = B_OK;
3245 	struct process_group *freeGroup = NULL;
3246 
3247 	InterruptsSpinLocker locker(gTeamSpinlock);
3248 
3249 	team = team_get_team_struct_locked(processID);
3250 	if (team != NULL) {
3251 		// check the conditions again -- they might have changed in the meantime
3252 		if (is_session_leader(team)
3253 			|| team->session_id != currentTeam->session_id) {
3254 			status = B_NOT_ALLOWED;
3255 		} else if (team != currentTeam
3256 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3257 			status = EACCES;
3258 		} else if (team->group_id == groupID) {
3259 			// the team is already in the desired process group
3260 			freeGroup = group;
3261 		} else {
3262 			// Check if a process group with the requested ID already exists.
3263 			struct process_group *targetGroup
3264 				= team_get_process_group_locked(team->group->session, groupID);
3265 			if (targetGroup != NULL) {
3266 				// In case of processID == groupID we have to free the
3267 				// allocated group.
3268 				freeGroup = group;
3269 			} else if (processID == groupID) {
3270 				// We created a new process group, let us insert it into the
3271 				// team's session.
3272 				insert_group_into_session(team->group->session, group);
3273 				targetGroup = group;
3274 			}
3275 
3276 			if (targetGroup != NULL) {
3277 				// we got a group, let's move the team there
3278 				process_group* oldGroup = team->group;
3279 
3280 				remove_team_from_group(team);
3281 				insert_team_into_group(targetGroup, team);
3282 
3283 				// Update the "orphaned" flag of all potentially affected
3284 				// groups.
3285 
3286 				// the team's old group
3287 				if (oldGroup->teams != NULL) {
3288 					oldGroup->orphaned = false;
3289 					update_orphaned_process_group(oldGroup, -1);
3290 				}
3291 
3292 				// the team's new group
3293 				struct team* parent = team->parent;
3294 				targetGroup->orphaned &= parent == NULL
3295 					|| parent->group == targetGroup
3296 					|| team->parent->session_id != team->session_id;
3297 
3298 				// children's groups
3299 				struct team* child = team->children;
3300 				while (child != NULL) {
3301 					child->group->orphaned = false;
3302 					update_orphaned_process_group(child->group, -1);
3303 
3304 					child = child->siblings_next;
3305 				}
3306 			} else
3307 				status = B_NOT_ALLOWED;
3308 		}
3309 	} else
3310 		status = B_NOT_ALLOWED;
3311 
3312 	// Changing the process group might have changed the situation for a parent
3313 	// waiting in wait_for_child(). Hence we notify it.
3314 	if (status == B_OK)
3315 		team->parent->dead_children->condition_variable.NotifyAll(false);
3316 
3317 	locker.Unlock();
3318 
3319 	if (status != B_OK) {
3320 		// in case of error, the group hasn't been added into the hash
3321 		team_delete_process_group(group);
3322 	}
3323 
3324 	team_delete_process_group(freeGroup);
3325 
3326 	return status == B_OK ? groupID : status;
3327 }
3328 
3329 
3330 pid_t
3331 _user_setsid(void)
3332 {
3333 	struct team *team = thread_get_current_thread()->team;
3334 	struct process_session *session;
3335 	struct process_group *group;
3336 	cpu_status state;
3337 	bool failed = false;
3338 
3339 	// the team must not already be a process group leader
3340 	if (is_process_group_leader(team))
3341 		return B_NOT_ALLOWED;
3342 
3343 	group = create_process_group(team->id);
3344 	if (group == NULL)
3345 		return B_NO_MEMORY;
3346 
3347 	session = create_process_session(group->id);
3348 	if (session == NULL) {
3349 		team_delete_process_group(group);
3350 		return B_NO_MEMORY;
3351 	}
3352 
3353 	state = disable_interrupts();
3354 	GRAB_TEAM_LOCK();
3355 
3356 	// this may have changed since the check above
3357 	if (!is_process_group_leader(team)) {
3358 		remove_team_from_group(team);
3359 
3360 		insert_group_into_session(session, group);
3361 		insert_team_into_group(group, team);
3362 	} else
3363 		failed = true;
3364 
3365 	RELEASE_TEAM_LOCK();
3366 	restore_interrupts(state);
3367 
3368 	if (failed) {
3369 		team_delete_process_group(group);
3370 		free(session);
3371 		return B_NOT_ALLOWED;
3372 	}
3373 
3374 	return team->group_id;
3375 }
3376 
3377 
3378 status_t
3379 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3380 {
3381 	status_t returnCode;
3382 	status_t status;
3383 
3384 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3385 		return B_BAD_ADDRESS;
3386 
3387 	status = wait_for_team(id, &returnCode);
3388 	if (status >= B_OK && _userReturnCode != NULL) {
3389 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3390 			return B_BAD_ADDRESS;
3391 		return B_OK;
3392 	}
3393 
3394 	return syscall_restart_handle_post(status);
3395 }
3396 
3397 
3398 thread_id
3399 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3400 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3401 	port_id errorPort, uint32 errorToken)
3402 {
3403 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3404 
3405 	if (argCount < 1)
3406 		return B_BAD_VALUE;
3407 
3408 	// copy and relocate the flat arguments
3409 	char** flatArgs;
3410 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3411 		argCount, envCount, flatArgs);
3412 	if (error != B_OK)
3413 		return error;
3414 
3415 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3416 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3417 		errorToken);
3418 
3419 	free(flatArgs);
3420 		// load_image_internal() unset our variable if it took over ownership
3421 
3422 	return thread;
3423 }
3424 
3425 
3426 void
3427 _user_exit_team(status_t returnValue)
3428 {
3429 	struct thread *thread = thread_get_current_thread();
3430 
3431 	thread->exit.status = returnValue;
3432 	thread->exit.reason = THREAD_RETURN_EXIT;
3433 
3434 	send_signal(thread->id, SIGKILL);
3435 }
3436 
3437 
3438 status_t
3439 _user_kill_team(team_id team)
3440 {
3441 	return kill_team(team);
3442 }
3443 
3444 
3445 status_t
3446 _user_get_team_info(team_id id, team_info *userInfo)
3447 {
3448 	status_t status;
3449 	team_info info;
3450 
3451 	if (!IS_USER_ADDRESS(userInfo))
3452 		return B_BAD_ADDRESS;
3453 
3454 	status = _get_team_info(id, &info, sizeof(team_info));
3455 	if (status == B_OK) {
3456 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3457 			return B_BAD_ADDRESS;
3458 	}
3459 
3460 	return status;
3461 }
3462 
3463 
3464 status_t
3465 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3466 {
3467 	status_t status;
3468 	team_info info;
3469 	int32 cookie;
3470 
3471 	if (!IS_USER_ADDRESS(userCookie)
3472 		|| !IS_USER_ADDRESS(userInfo)
3473 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3474 		return B_BAD_ADDRESS;
3475 
3476 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3477 	if (status != B_OK)
3478 		return status;
3479 
3480 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3481 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3482 		return B_BAD_ADDRESS;
3483 
3484 	return status;
3485 }
3486 
3487 
3488 team_id
3489 _user_get_current_team(void)
3490 {
3491 	return team_get_current_team_id();
3492 }
3493 
3494 
3495 status_t
3496 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3497 {
3498 	team_usage_info info;
3499 	status_t status;
3500 
3501 	if (!IS_USER_ADDRESS(userInfo))
3502 		return B_BAD_ADDRESS;
3503 
3504 	status = _get_team_usage_info(team, who, &info, size);
3505 	if (status != B_OK)
3506 		return status;
3507 
3508 	if (user_memcpy(userInfo, &info, size) < B_OK)
3509 		return B_BAD_ADDRESS;
3510 
3511 	return status;
3512 }
3513 
3514