xref: /haiku/src/system/kernel/team.cpp (revision e8cd7007416a323259791ac09c013dcce2956976)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <sys/wait.h>
20 
21 #include <OS.h>
22 
23 #include <AutoDeleter.h>
24 #include <FindDirectory.h>
25 
26 #include <extended_system_info_defs.h>
27 
28 #include <boot_device.h>
29 #include <elf.h>
30 #include <file_cache.h>
31 #include <fs/KPath.h>
32 #include <heap.h>
33 #include <int.h>
34 #include <kernel.h>
35 #include <kimage.h>
36 #include <kscheduler.h>
37 #include <ksignal.h>
38 #include <Notifications.h>
39 #include <port.h>
40 #include <posix/realtime_sem.h>
41 #include <posix/xsi_semaphore.h>
42 #include <sem.h>
43 #include <syscall_process_info.h>
44 #include <syscall_restart.h>
45 #include <syscalls.h>
46 #include <tls.h>
47 #include <tracing.h>
48 #include <user_runtime.h>
49 #include <user_thread.h>
50 #include <usergroup.h>
51 #include <vfs.h>
52 #include <vm/vm.h>
53 #include <vm/VMAddressSpace.h>
54 #include <util/AutoLock.h>
55 #include <util/khash.h>
56 
57 //#define TRACE_TEAM
58 #ifdef TRACE_TEAM
59 #	define TRACE(x) dprintf x
60 #else
61 #	define TRACE(x) ;
62 #endif
63 
64 
65 struct team_key {
66 	team_id id;
67 };
68 
69 struct team_arg {
70 	char	*path;
71 	char	**flat_args;
72 	size_t	flat_args_size;
73 	uint32	arg_count;
74 	uint32	env_count;
75 	mode_t	umask;
76 	port_id	error_port;
77 	uint32	error_token;
78 };
79 
80 struct fork_arg {
81 	area_id				user_stack_area;
82 	addr_t				user_stack_base;
83 	size_t				user_stack_size;
84 	addr_t				user_local_storage;
85 	sigset_t			sig_block_mask;
86 	struct sigaction	sig_action[32];
87 	addr_t				signal_stack_base;
88 	size_t				signal_stack_size;
89 	bool				signal_stack_enabled;
90 
91 	struct user_thread* user_thread;
92 
93 	struct arch_fork_arg arch_info;
94 };
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 struct TeamHashDefinition {
105 	typedef team_id		KeyType;
106 	typedef	Team		ValueType;
107 
108 	size_t HashKey(team_id key) const
109 	{
110 		return key;
111 	}
112 
113 	size_t Hash(Team* value) const
114 	{
115 		return HashKey(value->id);
116 	}
117 
118 	bool Compare(team_id key, Team* value) const
119 	{
120 		return value->id == key;
121 	}
122 
123 	Team*& GetLink(Team* value) const
124 	{
125 		return value->next;
126 	}
127 };
128 
129 typedef BOpenHashTable<TeamHashDefinition> TeamHashTable;
130 
131 
132 static TeamHashTable sTeamHash;
133 static hash_table* sGroupHash = NULL;
134 static Team* sKernelTeam = NULL;
135 
136 // some arbitrary chosen limits - should probably depend on the available
137 // memory (the limit is not yet enforced)
138 static int32 sMaxTeams = 2048;
139 static int32 sUsedTeams = 1;
140 
141 static TeamNotificationService sNotificationService;
142 
143 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
144 
145 
146 // #pragma mark - Tracing
147 
148 
149 #if TEAM_TRACING
150 namespace TeamTracing {
151 
152 class TeamForked : public AbstractTraceEntry {
153 public:
154 	TeamForked(thread_id forkedThread)
155 		:
156 		fForkedThread(forkedThread)
157 	{
158 		Initialized();
159 	}
160 
161 	virtual void AddDump(TraceOutput& out)
162 	{
163 		out.Print("team forked, new thread %ld", fForkedThread);
164 	}
165 
166 private:
167 	thread_id			fForkedThread;
168 };
169 
170 
171 class ExecTeam : public AbstractTraceEntry {
172 public:
173 	ExecTeam(const char* path, int32 argCount, const char* const* args,
174 			int32 envCount, const char* const* env)
175 		:
176 		fArgCount(argCount),
177 		fArgs(NULL)
178 	{
179 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
180 			false);
181 
182 		// determine the buffer size we need for the args
183 		size_t argBufferSize = 0;
184 		for (int32 i = 0; i < argCount; i++)
185 			argBufferSize += strlen(args[i]) + 1;
186 
187 		// allocate a buffer
188 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
189 		if (fArgs) {
190 			char* buffer = fArgs;
191 			for (int32 i = 0; i < argCount; i++) {
192 				size_t argSize = strlen(args[i]) + 1;
193 				memcpy(buffer, args[i], argSize);
194 				buffer += argSize;
195 			}
196 		}
197 
198 		// ignore env for the time being
199 		(void)envCount;
200 		(void)env;
201 
202 		Initialized();
203 	}
204 
205 	virtual void AddDump(TraceOutput& out)
206 	{
207 		out.Print("team exec, \"%p\", args:", fPath);
208 
209 		if (fArgs != NULL) {
210 			char* args = fArgs;
211 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
212 				out.Print(" \"%s\"", args);
213 				args += strlen(args) + 1;
214 			}
215 		} else
216 			out.Print(" <too long>");
217 	}
218 
219 private:
220 	char*	fPath;
221 	int32	fArgCount;
222 	char*	fArgs;
223 };
224 
225 
226 static const char*
227 job_control_state_name(job_control_state state)
228 {
229 	switch (state) {
230 		case JOB_CONTROL_STATE_NONE:
231 			return "none";
232 		case JOB_CONTROL_STATE_STOPPED:
233 			return "stopped";
234 		case JOB_CONTROL_STATE_CONTINUED:
235 			return "continued";
236 		case JOB_CONTROL_STATE_DEAD:
237 			return "dead";
238 		default:
239 			return "invalid";
240 	}
241 }
242 
243 
244 class SetJobControlState : public AbstractTraceEntry {
245 public:
246 	SetJobControlState(team_id team, job_control_state newState, int signal)
247 		:
248 		fTeam(team),
249 		fNewState(newState),
250 		fSignal(signal)
251 	{
252 		Initialized();
253 	}
254 
255 	virtual void AddDump(TraceOutput& out)
256 	{
257 		out.Print("team set job control state, team %ld, "
258 			"new state: %s, signal: %d",
259 			fTeam, job_control_state_name(fNewState), fSignal);
260 	}
261 
262 private:
263 	team_id				fTeam;
264 	job_control_state	fNewState;
265 	int					fSignal;
266 };
267 
268 
269 class WaitForChild : public AbstractTraceEntry {
270 public:
271 	WaitForChild(pid_t child, uint32 flags)
272 		:
273 		fChild(child),
274 		fFlags(flags)
275 	{
276 		Initialized();
277 	}
278 
279 	virtual void AddDump(TraceOutput& out)
280 	{
281 		out.Print("team wait for child, child: %ld, "
282 			"flags: 0x%lx", fChild, fFlags);
283 	}
284 
285 private:
286 	pid_t	fChild;
287 	uint32	fFlags;
288 };
289 
290 
291 class WaitForChildDone : public AbstractTraceEntry {
292 public:
293 	WaitForChildDone(const job_control_entry& entry)
294 		:
295 		fState(entry.state),
296 		fTeam(entry.thread),
297 		fStatus(entry.status),
298 		fReason(entry.reason),
299 		fSignal(entry.signal)
300 	{
301 		Initialized();
302 	}
303 
304 	WaitForChildDone(status_t error)
305 		:
306 		fTeam(error)
307 	{
308 		Initialized();
309 	}
310 
311 	virtual void AddDump(TraceOutput& out)
312 	{
313 		if (fTeam >= 0) {
314 			out.Print("team wait for child done, team: %ld, "
315 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
316 				fTeam, job_control_state_name(fState), fStatus, fReason,
317 				fSignal);
318 		} else {
319 			out.Print("team wait for child failed, error: "
320 				"0x%lx, ", fTeam);
321 		}
322 	}
323 
324 private:
325 	job_control_state	fState;
326 	team_id				fTeam;
327 	status_t			fStatus;
328 	uint16				fReason;
329 	uint16				fSignal;
330 };
331 
332 }	// namespace TeamTracing
333 
334 #	define T(x) new(std::nothrow) TeamTracing::x;
335 #else
336 #	define T(x) ;
337 #endif
338 
339 
340 //	#pragma mark - TeamNotificationService
341 
342 
343 TeamNotificationService::TeamNotificationService()
344 	: DefaultNotificationService("teams")
345 {
346 }
347 
348 
349 void
350 TeamNotificationService::Notify(uint32 eventCode, Team* team)
351 {
352 	char eventBuffer[128];
353 	KMessage event;
354 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
355 	event.AddInt32("event", eventCode);
356 	event.AddInt32("team", team->id);
357 	event.AddPointer("teamStruct", team);
358 
359 	DefaultNotificationService::Notify(event, eventCode);
360 }
361 
362 
363 //	#pragma mark - Private functions
364 
365 
366 static void
367 _dump_team_info(Team* team)
368 {
369 	kprintf("TEAM: %p\n", team);
370 	kprintf("id:               %ld (%#lx)\n", team->id, team->id);
371 	kprintf("name:             '%s'\n", team->name);
372 	kprintf("args:             '%s'\n", team->args);
373 	kprintf("next:             %p\n", team->next);
374 	kprintf("parent:           %p", team->parent);
375 	if (team->parent != NULL) {
376 		kprintf(" (id = %ld)\n", team->parent->id);
377 	} else
378 		kprintf("\n");
379 
380 	kprintf("children:         %p\n", team->children);
381 	kprintf("num_threads:      %d\n", team->num_threads);
382 	kprintf("state:            %d\n", team->state);
383 	kprintf("flags:            0x%lx\n", team->flags);
384 	kprintf("io_context:       %p\n", team->io_context);
385 	if (team->address_space)
386 		kprintf("address_space:    %p\n", team->address_space);
387 	kprintf("user data:        %p (area %ld)\n", (void*)team->user_data,
388 		team->user_data_area);
389 	kprintf("free user thread: %p\n", team->free_user_threads);
390 	kprintf("main_thread:      %p\n", team->main_thread);
391 	kprintf("thread_list:      %p\n", team->thread_list);
392 	kprintf("group_id:         %ld\n", team->group_id);
393 	kprintf("session_id:       %ld\n", team->session_id);
394 }
395 
396 
397 static int
398 dump_team_info(int argc, char** argv)
399 {
400 	team_id id = -1;
401 	bool found = false;
402 
403 	if (argc < 2) {
404 		Thread* thread = thread_get_current_thread();
405 		if (thread != NULL && thread->team != NULL)
406 			_dump_team_info(thread->team);
407 		else
408 			kprintf("No current team!\n");
409 		return 0;
410 	}
411 
412 	id = strtoul(argv[1], NULL, 0);
413 	if (IS_KERNEL_ADDRESS(id)) {
414 		// semi-hack
415 		_dump_team_info((Team*)id);
416 		return 0;
417 	}
418 
419 	// walk through the thread list, trying to match name or id
420 	for (TeamHashTable::Iterator it = sTeamHash.GetIterator();
421 		Team* team = it.Next();) {
422 		if ((team->name && strcmp(argv[1], team->name) == 0)
423 			|| team->id == id) {
424 			_dump_team_info(team);
425 			found = true;
426 			break;
427 		}
428 	}
429 
430 	if (!found)
431 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
432 	return 0;
433 }
434 
435 
436 static int
437 dump_teams(int argc, char** argv)
438 {
439 	kprintf("team           id  parent      name\n");
440 
441 	for (TeamHashTable::Iterator it = sTeamHash.GetIterator();
442 		Team* team = it.Next();) {
443 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
444 	}
445 
446 	return 0;
447 }
448 
449 
450 static int
451 process_group_compare(void* _group, const void* _key)
452 {
453 	struct process_group* group = (struct process_group*)_group;
454 	const struct team_key* key = (const struct team_key*)_key;
455 
456 	if (group->id == key->id)
457 		return 0;
458 
459 	return 1;
460 }
461 
462 
463 static uint32
464 process_group_hash(void* _group, const void* _key, uint32 range)
465 {
466 	struct process_group* group = (struct process_group*)_group;
467 	const struct team_key* key = (const struct team_key*)_key;
468 
469 	if (group != NULL)
470 		return group->id % range;
471 
472 	return (uint32)key->id % range;
473 }
474 
475 
476 static void
477 insert_team_into_parent(Team* parent, Team* team)
478 {
479 	ASSERT(parent != NULL);
480 
481 	team->siblings_next = parent->children;
482 	parent->children = team;
483 	team->parent = parent;
484 }
485 
486 
487 /*!	Note: must have team lock held */
488 static void
489 remove_team_from_parent(Team* parent, Team* team)
490 {
491 	Team* child;
492 	Team* last = NULL;
493 
494 	for (child = parent->children; child != NULL;
495 			child = child->siblings_next) {
496 		if (child == team) {
497 			if (last == NULL)
498 				parent->children = child->siblings_next;
499 			else
500 				last->siblings_next = child->siblings_next;
501 
502 			team->parent = NULL;
503 			break;
504 		}
505 		last = child;
506 	}
507 }
508 
509 
510 /*!	Reparent each of our children
511 	Note: must have team lock held
512 */
513 static void
514 reparent_children(Team* team)
515 {
516 	Team* child;
517 
518 	while ((child = team->children) != NULL) {
519 		// remove the child from the current proc and add to the parent
520 		remove_team_from_parent(team, child);
521 		insert_team_into_parent(sKernelTeam, child);
522 	}
523 
524 	// move job control entries too
525 	sKernelTeam->stopped_children.entries.MoveFrom(
526 		&team->stopped_children.entries);
527 	sKernelTeam->continued_children.entries.MoveFrom(
528 		&team->continued_children.entries);
529 
530 	// Note, we don't move the dead children entries. Those will be deleted
531 	// when the team structure is deleted.
532 }
533 
534 
535 static bool
536 is_session_leader(Team* team)
537 {
538 	return team->session_id == team->id;
539 }
540 
541 
542 static bool
543 is_process_group_leader(Team* team)
544 {
545 	return team->group_id == team->id;
546 }
547 
548 
549 static void
550 deferred_delete_process_group(struct process_group* group)
551 {
552 	if (group == NULL)
553 		return;
554 
555 	// remove_group_from_session() keeps this pointer around
556 	// only if the session can be freed as well
557 	if (group->session) {
558 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
559 			group->session->id));
560 		deferred_free(group->session);
561 	}
562 
563 	deferred_free(group);
564 }
565 
566 
567 /*!	Removes a group from a session, and puts the session object
568 	back into the session cache, if it's not used anymore.
569 	You must hold the team lock when calling this function.
570 */
571 static void
572 remove_group_from_session(struct process_group* group)
573 {
574 	struct process_session* session = group->session;
575 
576 	// the group must be in any session to let this function have any effect
577 	if (session == NULL)
578 		return;
579 
580 	hash_remove(sGroupHash, group);
581 
582 	// we cannot free the resource here, so we're keeping the group link
583 	// around - this way it'll be freed by free_process_group()
584 	if (--session->group_count > 0)
585 		group->session = NULL;
586 }
587 
588 
589 /*!	Team lock must be held.
590 */
591 static void
592 acquire_process_group_ref(pid_t groupID)
593 {
594 	process_group* group = team_get_process_group_locked(NULL, groupID);
595 	if (group == NULL) {
596 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
597 		return;
598 	}
599 
600 	group->refs++;
601 }
602 
603 
604 /*!	Team lock must be held.
605 */
606 static void
607 release_process_group_ref(pid_t groupID)
608 {
609 	process_group* group = team_get_process_group_locked(NULL, groupID);
610 	if (group == NULL) {
611 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
612 		return;
613 	}
614 
615 	if (group->refs <= 0) {
616 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
617 		return;
618 	}
619 
620 	if (--group->refs > 0)
621 		return;
622 
623 	// group is no longer used
624 
625 	remove_group_from_session(group);
626 	deferred_delete_process_group(group);
627 }
628 
629 
630 /*!	You must hold the team lock when calling this function. */
631 static void
632 insert_group_into_session(struct process_session* session,
633 	struct process_group* group)
634 {
635 	if (group == NULL)
636 		return;
637 
638 	group->session = session;
639 	hash_insert(sGroupHash, group);
640 	session->group_count++;
641 }
642 
643 
644 /*!	You must hold the team lock when calling this function. */
645 static void
646 insert_team_into_group(struct process_group* group, Team* team)
647 {
648 	team->group = group;
649 	team->group_id = group->id;
650 	team->session_id = group->session->id;
651 
652 	team->group_next = group->teams;
653 	group->teams = team;
654 	acquire_process_group_ref(group->id);
655 }
656 
657 
658 /*!	Removes the team from the group.
659 
660 	\param team the team that'll be removed from it's group
661 */
662 static void
663 remove_team_from_group(Team* team)
664 {
665 	struct process_group* group = team->group;
666 	Team* current;
667 	Team* last = NULL;
668 
669 	// the team must be in any team to let this function have any effect
670 	if  (group == NULL)
671 		return;
672 
673 	for (current = group->teams; current != NULL;
674 			current = current->group_next) {
675 		if (current == team) {
676 			if (last == NULL)
677 				group->teams = current->group_next;
678 			else
679 				last->group_next = current->group_next;
680 
681 			team->group = NULL;
682 			break;
683 		}
684 		last = current;
685 	}
686 
687 	team->group = NULL;
688 	team->group_next = NULL;
689 
690 	release_process_group_ref(group->id);
691 }
692 
693 
694 static struct process_group*
695 create_process_group(pid_t id)
696 {
697 	struct process_group* group
698 		= (struct process_group*)malloc(sizeof(struct process_group));
699 	if (group == NULL)
700 		return NULL;
701 
702 	group->id = id;
703 	group->refs = 0;
704 	group->session = NULL;
705 	group->teams = NULL;
706 	group->orphaned = true;
707 	return group;
708 }
709 
710 
711 static struct process_session*
712 create_process_session(pid_t id)
713 {
714 	struct process_session* session
715 		= (struct process_session*)malloc(sizeof(struct process_session));
716 	if (session == NULL)
717 		return NULL;
718 
719 	session->id = id;
720 	session->group_count = 0;
721 	session->controlling_tty = -1;
722 	session->foreground_group = -1;
723 
724 	return session;
725 }
726 
727 
728 static void
729 set_team_name(Team* team, const char* name)
730 {
731 	if (const char* lastSlash = strrchr(name, '/'))
732 		name = lastSlash + 1;
733 
734 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
735 }
736 
737 
738 static Team*
739 create_team_struct(const char* name, bool kernel)
740 {
741 	Team* team = new(std::nothrow) Team;
742 	if (team == NULL)
743 		return NULL;
744 	ObjectDeleter<Team> teamDeleter(team);
745 
746 	team->next = team->siblings_next = team->children = team->parent = NULL;
747 	team->id = allocate_thread_id();
748 	set_team_name(team, name);
749 	team->args[0] = '\0';
750 	team->num_threads = 0;
751 	team->io_context = NULL;
752 	team->address_space = NULL;
753 	team->realtime_sem_context = NULL;
754 	team->xsi_sem_context = NULL;
755 	team->thread_list = NULL;
756 	team->main_thread = NULL;
757 	team->loading_info = NULL;
758 	team->state = TEAM_STATE_BIRTH;
759 	team->flags = 0;
760 	team->death_entry = NULL;
761 	team->user_data_area = -1;
762 	team->user_data = 0;
763 	team->used_user_data = 0;
764 	team->user_data_size = 0;
765 	team->free_user_threads = NULL;
766 
767 	team->supplementary_groups = NULL;
768 	team->supplementary_group_count = 0;
769 
770 	team->dead_threads_kernel_time = 0;
771 	team->dead_threads_user_time = 0;
772 
773 	// dead threads
774 	list_init(&team->dead_threads);
775 	team->dead_threads_count = 0;
776 
777 	// dead children
778 	team->dead_children.count = 0;
779 	team->dead_children.kernel_time = 0;
780 	team->dead_children.user_time = 0;
781 
782 	// job control entry
783 	team->job_control_entry = new(nothrow) job_control_entry;
784 	if (team->job_control_entry == NULL)
785 		return NULL;
786 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
787 		team->job_control_entry);
788 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
789 	team->job_control_entry->thread = team->id;
790 	team->job_control_entry->team = team;
791 
792 	list_init(&team->sem_list);
793 	list_init(&team->port_list);
794 	list_init(&team->image_list);
795 	list_init(&team->watcher_list);
796 
797 	clear_team_debug_info(&team->debug_info, true);
798 
799 	if (arch_team_init_team_struct(team, kernel) < 0)
800 		return NULL;
801 
802 	// publish dead/stopped/continued children condition vars
803 	team->dead_children.condition_variable.Init(&team->dead_children,
804 		"team children");
805 
806 	// keep all allocated structures
807 	jobControlEntryDeleter.Detach();
808 	teamDeleter.Detach();
809 
810 	return team;
811 }
812 
813 
814 static void
815 delete_team_struct(Team* team)
816 {
817 	// get rid of all associated data
818 	team->PrepareForDeletion();
819 
820 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
821 			&team->dead_threads)) {
822 		free(threadDeathEntry);
823 	}
824 
825 	while (job_control_entry* entry = team->dead_children.entries.RemoveHead())
826 		delete entry;
827 
828 	while (free_user_thread* entry = team->free_user_threads) {
829 		team->free_user_threads = entry->next;
830 		free(entry);
831 	}
832 
833 	malloc_referenced_release(team->supplementary_groups);
834 
835 	delete team->job_control_entry;
836 		// usually already NULL and transferred to the parent
837 	delete team;
838 }
839 
840 
841 static status_t
842 create_team_user_data(Team* team)
843 {
844 	void* address;
845 	size_t size = 4 * B_PAGE_SIZE;
846 	virtual_address_restrictions virtualRestrictions = {};
847 	virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
848 	virtualRestrictions.address_specification = B_BASE_ADDRESS;
849 	physical_address_restrictions physicalRestrictions = {};
850 	team->user_data_area = create_area_etc(team->id, "user area", size,
851 		B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
852 		&physicalRestrictions, &address);
853 	if (team->user_data_area < 0)
854 		return team->user_data_area;
855 
856 	team->user_data = (addr_t)address;
857 	team->used_user_data = 0;
858 	team->user_data_size = size;
859 	team->free_user_threads = NULL;
860 
861 	return B_OK;
862 }
863 
864 
865 static void
866 delete_team_user_data(Team* team)
867 {
868 	if (team->user_data_area >= 0) {
869 		vm_delete_area(team->id, team->user_data_area, true);
870 		team->user_data = 0;
871 		team->used_user_data = 0;
872 		team->user_data_size = 0;
873 		team->user_data_area = -1;
874 		while (free_user_thread* entry = team->free_user_threads) {
875 			team->free_user_threads = entry->next;
876 			free(entry);
877 		}
878 	}
879 }
880 
881 
882 static status_t
883 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
884 	int32 argCount, int32 envCount, char**& _flatArgs)
885 {
886 	if (argCount < 0 || envCount < 0)
887 		return B_BAD_VALUE;
888 
889 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
890 		return B_TOO_MANY_ARGS;
891 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
892 		return B_BAD_VALUE;
893 
894 	if (!IS_USER_ADDRESS(userFlatArgs))
895 		return B_BAD_ADDRESS;
896 
897 	// allocate kernel memory
898 	char** flatArgs = (char**)malloc(flatArgsSize);
899 	if (flatArgs == NULL)
900 		return B_NO_MEMORY;
901 
902 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
903 		free(flatArgs);
904 		return B_BAD_ADDRESS;
905 	}
906 
907 	// check and relocate the array
908 	status_t error = B_OK;
909 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
910 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
911 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
912 		if (i == argCount || i == argCount + envCount + 1) {
913 			// check array null termination
914 			if (flatArgs[i] != NULL) {
915 				error = B_BAD_VALUE;
916 				break;
917 			}
918 		} else {
919 			// check string
920 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
921 			size_t maxLen = stringEnd - arg;
922 			if (arg < stringBase || arg >= stringEnd
923 					|| strnlen(arg, maxLen) == maxLen) {
924 				error = B_BAD_VALUE;
925 				break;
926 			}
927 
928 			flatArgs[i] = arg;
929 		}
930 	}
931 
932 	if (error == B_OK)
933 		_flatArgs = flatArgs;
934 	else
935 		free(flatArgs);
936 
937 	return error;
938 }
939 
940 
941 static void
942 free_team_arg(struct team_arg* teamArg)
943 {
944 	if (teamArg != NULL) {
945 		free(teamArg->flat_args);
946 		free(teamArg->path);
947 		free(teamArg);
948 	}
949 }
950 
951 
952 static status_t
953 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
954 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
955 	port_id port, uint32 token)
956 {
957 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
958 	if (teamArg == NULL)
959 		return B_NO_MEMORY;
960 
961 	teamArg->path = strdup(path);
962 	if (teamArg->path == NULL) {
963 		free(teamArg);
964 		return B_NO_MEMORY;
965 	}
966 
967 	// copy the args over
968 
969 	teamArg->flat_args = flatArgs;
970 	teamArg->flat_args_size = flatArgsSize;
971 	teamArg->arg_count = argCount;
972 	teamArg->env_count = envCount;
973 	teamArg->umask = umask;
974 	teamArg->error_port = port;
975 	teamArg->error_token = token;
976 
977 	*_teamArg = teamArg;
978 	return B_OK;
979 }
980 
981 
982 static int32
983 team_create_thread_start(void* args)
984 {
985 	status_t err;
986 	Thread* thread;
987 	Team* team;
988 	struct team_arg* teamArgs = (struct team_arg*)args;
989 	const char* path;
990 	addr_t entry;
991 	char userStackName[128];
992 	uint32 sizeLeft;
993 	char** userArgs;
994 	char** userEnv;
995 	struct user_space_program_args* programArgs;
996 	uint32 argCount, envCount, i;
997 
998 	thread = thread_get_current_thread();
999 	team = thread->team;
1000 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1001 
1002 	TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
1003 
1004 	// get a user thread for the main thread
1005 	thread->user_thread = team_allocate_user_thread(team);
1006 
1007 	// create an initial primary stack area
1008 
1009 	// Main stack area layout is currently as follows (starting from 0):
1010 	//
1011 	// size								| usage
1012 	// ---------------------------------+--------------------------------
1013 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1014 	// TLS_SIZE							| TLS data
1015 	// sizeof(user_space_program_args)	| argument structure for the runtime
1016 	//									| loader
1017 	// flat arguments size				| flat process arguments and environment
1018 
1019 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1020 	// the heap
1021 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1022 
1023 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1024 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1025 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1026 	thread->user_stack_base
1027 		= USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1028 	thread->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1029 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1030 		// the exact location at the end of the user stack area
1031 
1032 	sprintf(userStackName, "%s_main_stack", team->name);
1033 	virtual_address_restrictions virtualRestrictions = {};
1034 	virtualRestrictions.address = (void*)thread->user_stack_base;
1035 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1036 	physical_address_restrictions physicalRestrictions = {};
1037 	thread->user_stack_area = create_area_etc(team->id, userStackName, sizeLeft,
1038 		B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
1039 		&virtualRestrictions, &physicalRestrictions, NULL);
1040 	if (thread->user_stack_area < 0) {
1041 		dprintf("team_create_thread_start: could not create default user stack "
1042 			"region: %s\n", strerror(thread->user_stack_area));
1043 
1044 		free_team_arg(teamArgs);
1045 		return thread->user_stack_area;
1046 	}
1047 
1048 	// now that the TLS area is allocated, initialize TLS
1049 	arch_thread_init_tls(thread);
1050 
1051 	argCount = teamArgs->arg_count;
1052 	envCount = teamArgs->env_count;
1053 
1054 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1055 		+ thread->user_stack_size + TLS_SIZE);
1056 
1057 	userArgs = (char**)(programArgs + 1);
1058 	userEnv = userArgs + argCount + 1;
1059 	path = teamArgs->path;
1060 
1061 	if (user_strlcpy(programArgs->program_path, path,
1062 				sizeof(programArgs->program_path)) < B_OK
1063 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1064 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1065 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1066 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1067 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1068 				sizeof(port_id)) < B_OK
1069 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1070 				sizeof(uint32)) < B_OK
1071 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1072 		|| user_memcpy(userArgs, teamArgs->flat_args,
1073 				teamArgs->flat_args_size) < B_OK) {
1074 		// the team deletion process will clean this mess
1075 		free_team_arg(teamArgs);
1076 		return B_BAD_ADDRESS;
1077 	}
1078 
1079 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1080 
1081 	// add args to info member
1082 	team->args[0] = 0;
1083 	strlcpy(team->args, path, sizeof(team->args));
1084 	for (i = 1; i < argCount; i++) {
1085 		strlcat(team->args, " ", sizeof(team->args));
1086 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1087 	}
1088 
1089 	free_team_arg(teamArgs);
1090 		// the arguments are already on the user stack, we no longer need
1091 		// them in this form
1092 
1093 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1094 	// automatic variables with function scope will never be destroyed.
1095 	{
1096 		// find runtime_loader path
1097 		KPath runtimeLoaderPath;
1098 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1099 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1100 		if (err < B_OK) {
1101 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1102 				strerror(err)));
1103 			return err;
1104 		}
1105 		runtimeLoaderPath.UnlockBuffer();
1106 		err = runtimeLoaderPath.Append("runtime_loader");
1107 
1108 		if (err == B_OK) {
1109 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1110 				&entry);
1111 		}
1112 	}
1113 
1114 	if (err < B_OK) {
1115 		// Luckily, we don't have to clean up the mess we created - that's
1116 		// done for us by the normal team deletion process
1117 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1118 			"%s\n", strerror(err)));
1119 		return err;
1120 	}
1121 
1122 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1123 
1124 	team->state = TEAM_STATE_NORMAL;
1125 
1126 	// jump to the entry point in user space
1127 	return arch_thread_enter_userspace(thread, entry, programArgs, NULL);
1128 		// only returns in case of error
1129 }
1130 
1131 
1132 static thread_id
1133 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1134 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1135 	port_id errorPort, uint32 errorToken)
1136 {
1137 	char** flatArgs = _flatArgs;
1138 	Team* team;
1139 	const char* threadName;
1140 	thread_id thread;
1141 	status_t status;
1142 	cpu_status state;
1143 	struct team_arg* teamArgs;
1144 	struct team_loading_info loadingInfo;
1145 	io_context* parentIOContext = NULL;
1146 
1147 	if (flatArgs == NULL || argCount == 0)
1148 		return B_BAD_VALUE;
1149 
1150 	const char* path = flatArgs[0];
1151 
1152 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1153 		path, flatArgs, argCount));
1154 
1155 	team = create_team_struct(path, false);
1156 	if (team == NULL)
1157 		return B_NO_MEMORY;
1158 
1159 	if (flags & B_WAIT_TILL_LOADED) {
1160 		loadingInfo.thread = thread_get_current_thread();
1161 		loadingInfo.result = B_ERROR;
1162 		loadingInfo.done = false;
1163 		team->loading_info = &loadingInfo;
1164 	}
1165 
1166  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1167 
1168 	// get the parent team
1169 	Team* parent;
1170 
1171 	if (parentID == B_CURRENT_TEAM)
1172 		parent = thread_get_current_thread()->team;
1173 	else
1174 		parent = team_get_team_struct_locked(parentID);
1175 
1176 	if (parent == NULL) {
1177 		teamLocker.Unlock();
1178 		status = B_BAD_TEAM_ID;
1179 		goto err0;
1180 	}
1181 
1182 	// inherit the parent's user/group
1183 	inherit_parent_user_and_group_locked(team, parent);
1184 
1185 	sTeamHash.InsertUnchecked(team);
1186 	insert_team_into_parent(parent, team);
1187 	insert_team_into_group(parent->group, team);
1188 	sUsedTeams++;
1189 
1190 	// get a reference to the parent's I/O context -- we need it to create ours
1191 	parentIOContext = parent->io_context;
1192 	vfs_get_io_context(parentIOContext);
1193 
1194 	teamLocker.Unlock();
1195 
1196 	// check the executable's set-user/group-id permission
1197 	update_set_id_user_and_group(team, path);
1198 
1199 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1200 		envCount, (mode_t)-1, errorPort, errorToken);
1201 	if (status != B_OK)
1202 		goto err1;
1203 
1204 	_flatArgs = NULL;
1205 		// args are owned by the team_arg structure now
1206 
1207 	// create a new io_context for this team
1208 	team->io_context = vfs_new_io_context(parentIOContext, true);
1209 	if (!team->io_context) {
1210 		status = B_NO_MEMORY;
1211 		goto err2;
1212 	}
1213 
1214 	// We don't need the parent's I/O context any longer.
1215 	vfs_put_io_context(parentIOContext);
1216 	parentIOContext = NULL;
1217 
1218 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1219 	vfs_exec_io_context(team->io_context);
1220 
1221 	// create an address space for this team
1222 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1223 		&team->address_space);
1224 	if (status != B_OK)
1225 		goto err3;
1226 
1227 	// cut the path from the main thread name
1228 	threadName = strrchr(path, '/');
1229 	if (threadName != NULL)
1230 		threadName++;
1231 	else
1232 		threadName = path;
1233 
1234 	// create the user data area
1235 	status = create_team_user_data(team);
1236 	if (status != B_OK)
1237 		goto err4;
1238 
1239 	// notify team listeners
1240 	sNotificationService.Notify(TEAM_ADDED, team);
1241 
1242 	// Create a kernel thread, but under the context of the new team
1243 	// The new thread will take over ownership of teamArgs
1244 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1245 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1246 	if (thread < 0) {
1247 		status = thread;
1248 		goto err5;
1249 	}
1250 
1251 	// wait for the loader of the new team to finish its work
1252 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1253 		Thread* mainThread;
1254 
1255 		state = disable_interrupts();
1256 		GRAB_THREAD_LOCK();
1257 
1258 		mainThread = thread_get_thread_struct_locked(thread);
1259 		if (mainThread) {
1260 			// resume the team's main thread
1261 			if (mainThread->state == B_THREAD_SUSPENDED)
1262 				scheduler_enqueue_in_run_queue(mainThread);
1263 
1264 			// Now suspend ourselves until loading is finished.
1265 			// We will be woken either by the thread, when it finished or
1266 			// aborted loading, or when the team is going to die (e.g. is
1267 			// killed). In either case the one setting `loadingInfo.done' is
1268 			// responsible for removing the info from the team structure.
1269 			while (!loadingInfo.done) {
1270 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1271 				scheduler_reschedule();
1272 			}
1273 		} else {
1274 			// Impressive! Someone managed to kill the thread in this short
1275 			// time.
1276 		}
1277 
1278 		RELEASE_THREAD_LOCK();
1279 		restore_interrupts(state);
1280 
1281 		if (loadingInfo.result < B_OK)
1282 			return loadingInfo.result;
1283 	}
1284 
1285 	// notify the debugger
1286 	user_debug_team_created(team->id);
1287 
1288 	return thread;
1289 
1290 err5:
1291 	sNotificationService.Notify(TEAM_REMOVED, team);
1292 	delete_team_user_data(team);
1293 err4:
1294 	team->address_space->Put();
1295 err3:
1296 	vfs_put_io_context(team->io_context);
1297 err2:
1298 	free_team_arg(teamArgs);
1299 err1:
1300 	if (parentIOContext != NULL)
1301 		vfs_put_io_context(parentIOContext);
1302 
1303 	// Remove the team structure from the team hash table and delete the team
1304 	// structure
1305 	state = disable_interrupts();
1306 	GRAB_TEAM_LOCK();
1307 
1308 	remove_team_from_group(team);
1309 	remove_team_from_parent(team->parent, team);
1310 	sTeamHash.RemoveUnchecked(team);
1311 
1312 	RELEASE_TEAM_LOCK();
1313 	restore_interrupts(state);
1314 
1315 err0:
1316 	delete_team_struct(team);
1317 
1318 	return status;
1319 }
1320 
1321 
1322 /*!	Almost shuts down the current team and loads a new image into it.
1323 	If successful, this function does not return and will takeover ownership of
1324 	the arguments provided.
1325 	This function may only be called from user space.
1326 */
1327 static status_t
1328 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1329 	int32 argCount, int32 envCount, mode_t umask)
1330 {
1331 	// NOTE: Since this function normally doesn't return, don't use automatic
1332 	// variables that need destruction in the function scope.
1333 	char** flatArgs = _flatArgs;
1334 	Team* team = thread_get_current_thread()->team;
1335 	struct team_arg* teamArgs;
1336 	const char* threadName;
1337 	status_t status = B_OK;
1338 	cpu_status state;
1339 	Thread* thread;
1340 	thread_id nubThreadID = -1;
1341 
1342 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1343 		path, argCount, envCount, team->id));
1344 
1345 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1346 
1347 	// switching the kernel at run time is probably not a good idea :)
1348 	if (team == team_get_kernel_team())
1349 		return B_NOT_ALLOWED;
1350 
1351 	// we currently need to be single threaded here
1352 	// ToDo: maybe we should just kill all other threads and
1353 	//	make the current thread the team's main thread?
1354 	if (team->main_thread != thread_get_current_thread())
1355 		return B_NOT_ALLOWED;
1356 
1357 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1358 	// We iterate through the thread list to make sure that there's no other
1359 	// thread.
1360 	state = disable_interrupts();
1361 	GRAB_TEAM_LOCK();
1362 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1363 
1364 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1365 		nubThreadID = team->debug_info.nub_thread;
1366 
1367 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1368 
1369 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1370 		if (thread != team->main_thread && thread->id != nubThreadID) {
1371 			status = B_NOT_ALLOWED;
1372 			break;
1373 		}
1374 	}
1375 
1376 	RELEASE_TEAM_LOCK();
1377 	restore_interrupts(state);
1378 
1379 	if (status != B_OK)
1380 		return status;
1381 
1382 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1383 		envCount, umask, -1, 0);
1384 	if (status != B_OK)
1385 		return status;
1386 
1387 	_flatArgs = NULL;
1388 		// args are owned by the team_arg structure now
1389 
1390 	// TODO: remove team resources if there are any left
1391 	// thread_atkernel_exit() might not be called at all
1392 
1393 	thread_reset_for_exec();
1394 
1395 	user_debug_prepare_for_exec();
1396 
1397 	delete_team_user_data(team);
1398 	vm_delete_areas(team->address_space, false);
1399 	xsi_sem_undo(team);
1400 	delete_owned_ports(team);
1401 	sem_delete_owned_sems(team);
1402 	remove_images(team);
1403 	vfs_exec_io_context(team->io_context);
1404 	delete_realtime_sem_context(team->realtime_sem_context);
1405 	team->realtime_sem_context = NULL;
1406 
1407 	status = create_team_user_data(team);
1408 	if (status != B_OK) {
1409 		// creating the user data failed -- we're toast
1410 		// TODO: We should better keep the old user area in the first place.
1411 		free_team_arg(teamArgs);
1412 		exit_thread(status);
1413 		return status;
1414 	}
1415 
1416 	user_debug_finish_after_exec();
1417 
1418 	// rename the team
1419 
1420 	set_team_name(team, path);
1421 
1422 	// cut the path from the team name and rename the main thread, too
1423 	threadName = strrchr(path, '/');
1424 	if (threadName != NULL)
1425 		threadName++;
1426 	else
1427 		threadName = path;
1428 	rename_thread(thread_get_current_thread_id(), threadName);
1429 
1430 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1431 
1432 	// Update user/group according to the executable's set-user/group-id
1433 	// permission.
1434 	update_set_id_user_and_group(team, path);
1435 
1436 	user_debug_team_exec();
1437 
1438 	// notify team listeners
1439 	sNotificationService.Notify(TEAM_EXEC, team);
1440 
1441 	status = team_create_thread_start(teamArgs);
1442 		// this one usually doesn't return...
1443 
1444 	// sorry, we have to kill us, there is no way out anymore
1445 	// (without any areas left and all that)
1446 	exit_thread(status);
1447 
1448 	// we return a status here since the signal that is sent by the
1449 	// call above is not immediately handled
1450 	return B_ERROR;
1451 }
1452 
1453 
1454 /*! This is the first function to be called from the newly created
1455 	main child thread.
1456 	It will fill in everything what's left to do from fork_arg, and
1457 	return from the parent's fork() syscall to the child.
1458 */
1459 static int32
1460 fork_team_thread_start(void* _args)
1461 {
1462 	Thread* thread = thread_get_current_thread();
1463 	struct fork_arg* forkArgs = (struct fork_arg*)_args;
1464 
1465 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1466 		// we need a local copy of the arch dependent part
1467 
1468 	thread->user_stack_area = forkArgs->user_stack_area;
1469 	thread->user_stack_base = forkArgs->user_stack_base;
1470 	thread->user_stack_size = forkArgs->user_stack_size;
1471 	thread->user_local_storage = forkArgs->user_local_storage;
1472 	thread->sig_block_mask = forkArgs->sig_block_mask;
1473 	thread->user_thread = forkArgs->user_thread;
1474 	memcpy(thread->sig_action, forkArgs->sig_action,
1475 		sizeof(forkArgs->sig_action));
1476 	thread->signal_stack_base = forkArgs->signal_stack_base;
1477 	thread->signal_stack_size = forkArgs->signal_stack_size;
1478 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1479 
1480 	arch_thread_init_tls(thread);
1481 
1482 	free(forkArgs);
1483 
1484 	// set frame of the parent thread to this one, too
1485 
1486 	arch_restore_fork_frame(&archArgs);
1487 		// This one won't return here
1488 
1489 	return 0;
1490 }
1491 
1492 
1493 static thread_id
1494 fork_team(void)
1495 {
1496 	Thread* parentThread = thread_get_current_thread();
1497 	Team* parentTeam = parentThread->team;
1498 	Team* team;
1499 	struct fork_arg* forkArgs;
1500 	struct area_info info;
1501 	thread_id threadID;
1502 	status_t status;
1503 	int32 cookie;
1504 
1505 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1506 
1507 	if (parentTeam == team_get_kernel_team())
1508 		return B_NOT_ALLOWED;
1509 
1510 	// create a new team
1511 	// TODO: this is very similar to load_image_internal() - maybe we can do
1512 	// something about it :)
1513 
1514 	team = create_team_struct(parentTeam->name, false);
1515 	if (team == NULL)
1516 		return B_NO_MEMORY;
1517 
1518 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1519 
1520 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1521 
1522 	// Inherit the parent's user/group.
1523 	inherit_parent_user_and_group_locked(team, parentTeam);
1524 
1525 	sTeamHash.InsertUnchecked(team);
1526 	insert_team_into_parent(parentTeam, team);
1527 	insert_team_into_group(parentTeam->group, team);
1528 	sUsedTeams++;
1529 
1530 	teamLocker.Unlock();
1531 
1532 	// inherit some team debug flags
1533 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1534 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1535 
1536 	forkArgs = (struct fork_arg*)malloc(sizeof(struct fork_arg));
1537 	if (forkArgs == NULL) {
1538 		status = B_NO_MEMORY;
1539 		goto err1;
1540 	}
1541 
1542 	// create a new io_context for this team
1543 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
1544 	if (!team->io_context) {
1545 		status = B_NO_MEMORY;
1546 		goto err2;
1547 	}
1548 
1549 	// duplicate the realtime sem context
1550 	if (parentTeam->realtime_sem_context) {
1551 		team->realtime_sem_context = clone_realtime_sem_context(
1552 			parentTeam->realtime_sem_context);
1553 		if (team->realtime_sem_context == NULL) {
1554 			status = B_NO_MEMORY;
1555 			goto err25;
1556 		}
1557 	}
1558 
1559 	// create an address space for this team
1560 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1561 		&team->address_space);
1562 	if (status < B_OK)
1563 		goto err3;
1564 
1565 	// copy all areas of the team
1566 	// TODO: should be able to handle stack areas differently (ie. don't have
1567 	// them copy-on-write)
1568 	// TODO: all stacks of other threads than the current one could be left out
1569 
1570 	forkArgs->user_thread = NULL;
1571 
1572 	cookie = 0;
1573 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1574 		if (info.area == parentTeam->user_data_area) {
1575 			// don't clone the user area; just create a new one
1576 			status = create_team_user_data(team);
1577 			if (status != B_OK)
1578 				break;
1579 
1580 			forkArgs->user_thread = team_allocate_user_thread(team);
1581 		} else {
1582 			void* address;
1583 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
1584 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1585 			if (area < B_OK) {
1586 				status = area;
1587 				break;
1588 			}
1589 
1590 			if (info.area == parentThread->user_stack_area)
1591 				forkArgs->user_stack_area = area;
1592 		}
1593 	}
1594 
1595 	if (status < B_OK)
1596 		goto err4;
1597 
1598 	if (forkArgs->user_thread == NULL) {
1599 #if KDEBUG
1600 		panic("user data area not found, parent area is %ld",
1601 			parentTeam->user_data_area);
1602 #endif
1603 		status = B_ERROR;
1604 		goto err4;
1605 	}
1606 
1607 	forkArgs->user_stack_base = parentThread->user_stack_base;
1608 	forkArgs->user_stack_size = parentThread->user_stack_size;
1609 	forkArgs->user_local_storage = parentThread->user_local_storage;
1610 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1611 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1612 		sizeof(forkArgs->sig_action));
1613 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1614 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1615 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1616 
1617 	arch_store_fork_frame(&forkArgs->arch_info);
1618 
1619 	// copy image list
1620 	image_info imageInfo;
1621 	cookie = 0;
1622 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1623 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1624 		if (image < 0)
1625 			goto err5;
1626 	}
1627 
1628 	// notify team listeners
1629 	sNotificationService.Notify(TEAM_ADDED, team);
1630 
1631 	// create a kernel thread under the context of the new team
1632 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1633 		parentThread->name, parentThread->priority, forkArgs,
1634 		team->id, team->id);
1635 	if (threadID < 0) {
1636 		status = threadID;
1637 		goto err5;
1638 	}
1639 
1640 	// notify the debugger
1641 	user_debug_team_created(team->id);
1642 
1643 	T(TeamForked(threadID));
1644 
1645 	resume_thread(threadID);
1646 	return threadID;
1647 
1648 err5:
1649 	sNotificationService.Notify(TEAM_REMOVED, team);
1650 	remove_images(team);
1651 err4:
1652 	team->address_space->RemoveAndPut();
1653 err3:
1654 	delete_realtime_sem_context(team->realtime_sem_context);
1655 err25:
1656 	vfs_put_io_context(team->io_context);
1657 err2:
1658 	free(forkArgs);
1659 err1:
1660 	// remove the team structure from the team hash table and delete the team
1661 	// structure
1662 	teamLocker.Lock();
1663 
1664 	remove_team_from_group(team);
1665 	remove_team_from_parent(parentTeam, team);
1666 	sTeamHash.RemoveUnchecked(team);
1667 
1668 	teamLocker.Unlock();
1669 
1670 	delete_team_struct(team);
1671 
1672 	return status;
1673 }
1674 
1675 
1676 /*!	Returns if the specified \a team has any children belonging to the
1677 	specified \a group.
1678 	Must be called with the team lock held.
1679 */
1680 static bool
1681 has_children_in_group(Team* parent, pid_t groupID)
1682 {
1683 	Team* team;
1684 
1685 	struct process_group* group = team_get_process_group_locked(
1686 		parent->group->session, groupID);
1687 	if (group == NULL)
1688 		return false;
1689 
1690 	for (team = group->teams; team; team = team->group_next) {
1691 		if (team->parent == parent)
1692 			return true;
1693 	}
1694 
1695 	return false;
1696 }
1697 
1698 
1699 static job_control_entry*
1700 get_job_control_entry(team_job_control_children& children, pid_t id)
1701 {
1702 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
1703 		 job_control_entry* entry = it.Next();) {
1704 
1705 		if (id > 0) {
1706 			if (entry->thread == id)
1707 				return entry;
1708 		} else if (id == -1) {
1709 			return entry;
1710 		} else {
1711 			pid_t processGroup
1712 				= (entry->team ? entry->team->group_id : entry->group_id);
1713 			if (processGroup == -id)
1714 				return entry;
1715 		}
1716 	}
1717 
1718 	return NULL;
1719 }
1720 
1721 
1722 static job_control_entry*
1723 get_job_control_entry(Team* team, pid_t id, uint32 flags)
1724 {
1725 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1726 
1727 	if (entry == NULL && (flags & WCONTINUED) != 0)
1728 		entry = get_job_control_entry(team->continued_children, id);
1729 
1730 	if (entry == NULL && (flags & WUNTRACED) != 0)
1731 		entry = get_job_control_entry(team->stopped_children, id);
1732 
1733 	return entry;
1734 }
1735 
1736 
1737 job_control_entry::job_control_entry()
1738 	:
1739 	has_group_ref(false)
1740 {
1741 }
1742 
1743 
1744 job_control_entry::~job_control_entry()
1745 {
1746 	if (has_group_ref) {
1747 		InterruptsSpinLocker locker(gTeamSpinlock);
1748 		release_process_group_ref(group_id);
1749 	}
1750 }
1751 
1752 
1753 /*!	Team and thread lock must be held.
1754 */
1755 void
1756 job_control_entry::InitDeadState()
1757 {
1758 	if (team != NULL) {
1759 		Thread* thread = team->main_thread;
1760 		group_id = team->group_id;
1761 		this->thread = thread->id;
1762 		status = thread->exit.status;
1763 		reason = thread->exit.reason;
1764 		signal = thread->exit.signal;
1765 		team = NULL;
1766 		acquire_process_group_ref(group_id);
1767 		has_group_ref = true;
1768 	}
1769 }
1770 
1771 
1772 job_control_entry&
1773 job_control_entry::operator=(const job_control_entry& other)
1774 {
1775 	state = other.state;
1776 	thread = other.thread;
1777 	has_group_ref = false;
1778 	team = other.team;
1779 	group_id = other.group_id;
1780 	status = other.status;
1781 	reason = other.reason;
1782 	signal = other.signal;
1783 
1784 	return *this;
1785 }
1786 
1787 
1788 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1789 	comes to the reason why a thread has died than waitpid() can be.
1790 */
1791 static thread_id
1792 wait_for_child(pid_t child, uint32 flags, int32* _reason,
1793 	status_t* _returnCode)
1794 {
1795 	Thread* thread = thread_get_current_thread();
1796 	Team* team = thread->team;
1797 	struct job_control_entry foundEntry;
1798 	struct job_control_entry* freeDeathEntry = NULL;
1799 	status_t status = B_OK;
1800 
1801 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1802 
1803 	T(WaitForChild(child, flags));
1804 
1805 	if (child == 0) {
1806 		// wait for all children in the process group of the calling team
1807 		child = -team->group_id;
1808 	}
1809 
1810 	bool ignoreFoundEntries = false;
1811 	bool ignoreFoundEntriesChecked = false;
1812 
1813 	while (true) {
1814 		InterruptsSpinLocker locker(gTeamSpinlock);
1815 
1816 		// check whether any condition holds
1817 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1818 
1819 		// If we don't have an entry yet, check whether there are any children
1820 		// complying to the process group specification at all.
1821 		if (entry == NULL) {
1822 			// No success yet -- check whether there are any children we could
1823 			// wait for.
1824 			bool childrenExist = false;
1825 			if (child == -1) {
1826 				childrenExist = team->children != NULL;
1827 			} else if (child < -1) {
1828 				childrenExist = has_children_in_group(team, -child);
1829 			} else {
1830 				if (Team* childTeam = team_get_team_struct_locked(child))
1831 					childrenExist = childTeam->parent == team;
1832 			}
1833 
1834 			if (!childrenExist) {
1835 				// there is no child we could wait for
1836 				status = ECHILD;
1837 			} else {
1838 				// the children we're waiting for are still running
1839 				status = B_WOULD_BLOCK;
1840 			}
1841 		} else {
1842 			// got something
1843 			foundEntry = *entry;
1844 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1845 				// The child is dead. Reap its death entry.
1846 				freeDeathEntry = entry;
1847 				team->dead_children.entries.Remove(entry);
1848 				team->dead_children.count--;
1849 			} else {
1850 				// The child is well. Reset its job control state.
1851 				team_set_job_control_state(entry->team,
1852 					JOB_CONTROL_STATE_NONE, 0, false);
1853 			}
1854 		}
1855 
1856 		// If we haven't got anything yet, prepare for waiting for the
1857 		// condition variable.
1858 		ConditionVariableEntry deadWaitEntry;
1859 
1860 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1861 			team->dead_children.condition_variable.Add(&deadWaitEntry);
1862 
1863 		locker.Unlock();
1864 
1865 		// we got our entry and can return to our caller
1866 		if (status == B_OK) {
1867 			if (ignoreFoundEntries) {
1868 				// ... unless we shall ignore found entries
1869 				delete freeDeathEntry;
1870 				freeDeathEntry = NULL;
1871 				continue;
1872 			}
1873 
1874 			break;
1875 		}
1876 
1877 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1878 			T(WaitForChildDone(status));
1879 			return status;
1880 		}
1881 
1882 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1883 		if (status == B_INTERRUPTED) {
1884 			T(WaitForChildDone(status));
1885 			return status;
1886 		}
1887 
1888 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1889 		// all our children are dead and fail with ECHILD. We check the
1890 		// condition at this point.
1891 		if (!ignoreFoundEntriesChecked) {
1892 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1893 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1894 				|| handler.sa_handler == SIG_IGN) {
1895 				ignoreFoundEntries = true;
1896 			}
1897 
1898 			ignoreFoundEntriesChecked = true;
1899 		}
1900 	}
1901 
1902 	delete freeDeathEntry;
1903 
1904 	// when we got here, we have a valid death entry, and
1905 	// already got unregistered from the team or group
1906 	int reason = 0;
1907 	switch (foundEntry.state) {
1908 		case JOB_CONTROL_STATE_DEAD:
1909 			reason = foundEntry.reason;
1910 			break;
1911 		case JOB_CONTROL_STATE_STOPPED:
1912 			reason = THREAD_STOPPED;
1913 			break;
1914 		case JOB_CONTROL_STATE_CONTINUED:
1915 			reason = THREAD_CONTINUED;
1916 			break;
1917 		case JOB_CONTROL_STATE_NONE:
1918 			// can't happen
1919 			break;
1920 	}
1921 
1922 	*_returnCode = foundEntry.status;
1923 	*_reason = (foundEntry.signal << 16) | reason;
1924 
1925 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1926 	// status is available.
1927 	if (is_signal_blocked(SIGCHLD)) {
1928 		InterruptsSpinLocker locker(gTeamSpinlock);
1929 
1930 		if (get_job_control_entry(team, child, flags) == NULL)
1931 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1932 	}
1933 
1934 	// When the team is dead, the main thread continues to live in the kernel
1935 	// team for a very short time. To avoid surprises for the caller we rather
1936 	// wait until the thread is really gone.
1937 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1938 		wait_for_thread(foundEntry.thread, NULL);
1939 
1940 	T(WaitForChildDone(foundEntry));
1941 
1942 	return foundEntry.thread;
1943 }
1944 
1945 
1946 /*! Fills the team_info structure with information from the specified
1947 	team.
1948 	The team lock must be held when called.
1949 */
1950 static status_t
1951 fill_team_info(Team* team, team_info* info, size_t size)
1952 {
1953 	if (size != sizeof(team_info))
1954 		return B_BAD_VALUE;
1955 
1956 	// ToDo: Set more informations for team_info
1957 	memset(info, 0, size);
1958 
1959 	info->team = team->id;
1960 	info->thread_count = team->num_threads;
1961 	info->image_count = count_images(team);
1962 	//info->area_count =
1963 	info->debugger_nub_thread = team->debug_info.nub_thread;
1964 	info->debugger_nub_port = team->debug_info.nub_port;
1965 	//info->uid =
1966 	//info->gid =
1967 
1968 	strlcpy(info->args, team->args, sizeof(info->args));
1969 	info->argc = 1;
1970 
1971 	return B_OK;
1972 }
1973 
1974 
1975 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1976 	Interrupts must be disabled and team lock be held.
1977 */
1978 static bool
1979 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1980 {
1981 	// Orphaned Process Group: "A process group in which the parent of every
1982 	// member is either itself a member of the group or is not a member of the
1983 	// group's session." (Open Group Base Specs Issue 6)
1984 
1985 	// once orphaned, things won't change (exception: cf. setpgid())
1986 	if (group->orphaned)
1987 		return true;
1988 
1989 	Team* team = group->teams;
1990 	while (team != NULL) {
1991 		Team* parent = team->parent;
1992 		if (team->id != dyingProcess && parent != NULL
1993 			&& parent->id != dyingProcess
1994 			&& parent->group_id != group->id
1995 			&& parent->session_id == group->session->id) {
1996 			return false;
1997 		}
1998 
1999 		team = team->group_next;
2000 	}
2001 
2002 	group->orphaned = true;
2003 	return true;
2004 }
2005 
2006 
2007 /*!	Returns whether the process group contains stopped processes.
2008 	Interrupts must be disabled and team lock be held.
2009 */
2010 static bool
2011 process_group_has_stopped_processes(process_group* group)
2012 {
2013 	SpinLocker _(gThreadSpinlock);
2014 
2015 	Team* team = group->teams;
2016 	while (team != NULL) {
2017 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2018 			return true;
2019 
2020 		team = team->group_next;
2021 	}
2022 
2023 	return false;
2024 }
2025 
2026 
2027 //	#pragma mark - Private kernel API
2028 
2029 
2030 status_t
2031 team_init(kernel_args* args)
2032 {
2033 	struct process_session* session;
2034 	struct process_group* group;
2035 
2036 	// create the team hash table
2037 	new(&sTeamHash) TeamHashTable;
2038 	if (sTeamHash.Init(32) != B_OK)
2039 		panic("Failed to init team hash table!");
2040 
2041 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2042 		&process_group_compare, &process_group_hash);
2043 
2044 	// create initial session and process groups
2045 
2046 	session = create_process_session(1);
2047 	if (session == NULL)
2048 		panic("Could not create initial session.\n");
2049 
2050 	group = create_process_group(1);
2051 	if (group == NULL)
2052 		panic("Could not create initial process group.\n");
2053 
2054 	insert_group_into_session(session, group);
2055 
2056 	// create the kernel team
2057 	sKernelTeam = create_team_struct("kernel_team", true);
2058 	if (sKernelTeam == NULL)
2059 		panic("could not create kernel team!\n");
2060 	strcpy(sKernelTeam->args, sKernelTeam->name);
2061 	sKernelTeam->state = TEAM_STATE_NORMAL;
2062 
2063 	sKernelTeam->saved_set_uid = 0;
2064 	sKernelTeam->real_uid = 0;
2065 	sKernelTeam->effective_uid = 0;
2066 	sKernelTeam->saved_set_gid = 0;
2067 	sKernelTeam->real_gid = 0;
2068 	sKernelTeam->effective_gid = 0;
2069 	sKernelTeam->supplementary_groups = NULL;
2070 	sKernelTeam->supplementary_group_count = 0;
2071 
2072 	insert_team_into_group(group, sKernelTeam);
2073 
2074 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2075 	if (sKernelTeam->io_context == NULL)
2076 		panic("could not create io_context for kernel team!\n");
2077 
2078 	// stick it in the team hash
2079 	sTeamHash.InsertUnchecked(sKernelTeam);
2080 
2081 	add_debugger_command_etc("team", &dump_team_info,
2082 		"Dump info about a particular team",
2083 		"[ <id> | <address> | <name> ]\n"
2084 		"Prints information about the specified team. If no argument is given\n"
2085 		"the current team is selected.\n"
2086 		"  <id>       - The ID of the team.\n"
2087 		"  <address>  - The address of the team structure.\n"
2088 		"  <name>     - The team's name.\n", 0);
2089 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2090 		"\n"
2091 		"Prints a list of all existing teams.\n", 0);
2092 
2093 	new(&sNotificationService) TeamNotificationService();
2094 
2095 	return B_OK;
2096 }
2097 
2098 
2099 int32
2100 team_max_teams(void)
2101 {
2102 	return sMaxTeams;
2103 }
2104 
2105 
2106 int32
2107 team_used_teams(void)
2108 {
2109 	return sUsedTeams;
2110 }
2111 
2112 
2113 /*!	Iterates through the list of teams. The team spinlock must be held.
2114 */
2115 Team*
2116 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2117 {
2118 	for (TeamHashTable::Iterator it = sTeamHash.GetIterator();
2119 		Team* team = it.Next();) {
2120 		if (callback(team, cookie))
2121 			return team;
2122 	}
2123 
2124 	return NULL;
2125 }
2126 
2127 
2128 /*! Fills the provided death entry if it's in the team.
2129 	You need to have the team lock held when calling this function.
2130 */
2131 job_control_entry*
2132 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2133 {
2134 	if (child <= 0)
2135 		return NULL;
2136 
2137 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2138 		child);
2139 	if (entry) {
2140 		// remove the entry only, if the caller is the parent of the found team
2141 		if (team_get_current_team_id() == entry->thread) {
2142 			team->dead_children.entries.Remove(entry);
2143 			team->dead_children.count--;
2144 			*_deleteEntry = true;
2145 		} else {
2146 			*_deleteEntry = false;
2147 		}
2148 	}
2149 
2150 	return entry;
2151 }
2152 
2153 
2154 /*! Quick check to see if we have a valid team ID. */
2155 bool
2156 team_is_valid(team_id id)
2157 {
2158 	Team* team;
2159 	cpu_status state;
2160 
2161 	if (id <= 0)
2162 		return false;
2163 
2164 	state = disable_interrupts();
2165 	GRAB_TEAM_LOCK();
2166 
2167 	team = team_get_team_struct_locked(id);
2168 
2169 	RELEASE_TEAM_LOCK();
2170 	restore_interrupts(state);
2171 
2172 	return team != NULL;
2173 }
2174 
2175 
2176 Team*
2177 team_get_team_struct_locked(team_id id)
2178 {
2179 	return sTeamHash.Lookup(id);
2180 }
2181 
2182 
2183 /*! This searches the session of the team for the specified group ID.
2184 	You must hold the team lock when you call this function.
2185 */
2186 struct process_group*
2187 team_get_process_group_locked(struct process_session* session, pid_t id)
2188 {
2189 	struct process_group* group;
2190 	struct team_key key;
2191 	key.id = id;
2192 
2193 	group = (struct process_group*)hash_lookup(sGroupHash, &key);
2194 	if (group != NULL && (session == NULL || session == group->session))
2195 		return group;
2196 
2197 	return NULL;
2198 }
2199 
2200 
2201 void
2202 team_delete_process_group(struct process_group* group)
2203 {
2204 	if (group == NULL)
2205 		return;
2206 
2207 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2208 
2209 	// remove_group_from_session() keeps this pointer around
2210 	// only if the session can be freed as well
2211 	if (group->session) {
2212 		TRACE(("team_delete_process_group(): frees session %ld\n",
2213 			group->session->id));
2214 		free(group->session);
2215 	}
2216 
2217 	free(group);
2218 }
2219 
2220 
2221 void
2222 team_set_controlling_tty(int32 ttyIndex)
2223 {
2224 	Team* team = thread_get_current_thread()->team;
2225 
2226 	InterruptsSpinLocker _(gTeamSpinlock);
2227 
2228 	team->group->session->controlling_tty = ttyIndex;
2229 	team->group->session->foreground_group = -1;
2230 }
2231 
2232 
2233 int32
2234 team_get_controlling_tty()
2235 {
2236 	Team* team = thread_get_current_thread()->team;
2237 
2238 	InterruptsSpinLocker _(gTeamSpinlock);
2239 
2240 	return team->group->session->controlling_tty;
2241 }
2242 
2243 
2244 status_t
2245 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2246 {
2247 	Thread* thread = thread_get_current_thread();
2248 	Team* team = thread->team;
2249 
2250 	InterruptsSpinLocker locker(gTeamSpinlock);
2251 
2252 	process_session* session = team->group->session;
2253 
2254 	// must be the controlling tty of the calling process
2255 	if (session->controlling_tty != ttyIndex)
2256 		return ENOTTY;
2257 
2258 	// check process group -- must belong to our session
2259 	process_group* group = team_get_process_group_locked(session,
2260 		processGroupID);
2261 	if (group == NULL)
2262 		return B_BAD_VALUE;
2263 
2264 	// If we are a background group, we can't do that unharmed, only if we
2265 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2266 	if (session->foreground_group != -1
2267 		&& session->foreground_group != team->group_id
2268 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2269 		&& !is_signal_blocked(SIGTTOU)) {
2270 		pid_t groupID = team->group->id;
2271 		locker.Unlock();
2272 		send_signal(-groupID, SIGTTOU);
2273 		return B_INTERRUPTED;
2274 	}
2275 
2276 	team->group->session->foreground_group = processGroupID;
2277 
2278 	return B_OK;
2279 }
2280 
2281 
2282 /*!	Removes the specified team from the global team hash, and from its parent.
2283 	It also moves all of its children up to the parent.
2284 	You must hold the team lock when you call this function.
2285 */
2286 void
2287 team_remove_team(Team* team)
2288 {
2289 	Team* parent = team->parent;
2290 
2291 	// remember how long this team lasted
2292 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2293 		+ team->dead_children.kernel_time;
2294 	parent->dead_children.user_time += team->dead_threads_user_time
2295 		+ team->dead_children.user_time;
2296 
2297 	// Also grab the thread spinlock while removing the team from the hash.
2298 	// This makes the following sequence safe: grab teams lock, lookup team,
2299 	// grab threads lock, unlock teams lock,
2300 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2301 	// lock another team's IO context.
2302 	GRAB_THREAD_LOCK();
2303 	sTeamHash.RemoveUnchecked(team);
2304 	RELEASE_THREAD_LOCK();
2305 	sUsedTeams--;
2306 
2307 	team->state = TEAM_STATE_DEATH;
2308 
2309 	// If we're a controlling process (i.e. a session leader with controlling
2310 	// terminal), there's a bit of signalling we have to do.
2311 	if (team->session_id == team->id
2312 		&& team->group->session->controlling_tty >= 0) {
2313 		process_session* session = team->group->session;
2314 
2315 		session->controlling_tty = -1;
2316 
2317 		// send SIGHUP to the foreground
2318 		if (session->foreground_group >= 0) {
2319 			send_signal_etc(-session->foreground_group, SIGHUP,
2320 				SIGNAL_FLAG_TEAMS_LOCKED);
2321 		}
2322 
2323 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2324 		// stopped processes
2325 		Team* child = team->children;
2326 		while (child != NULL) {
2327 			process_group* childGroup = child->group;
2328 			if (!childGroup->orphaned
2329 				&& update_orphaned_process_group(childGroup, team->id)
2330 				&& process_group_has_stopped_processes(childGroup)) {
2331 				send_signal_etc(-childGroup->id, SIGHUP,
2332 					SIGNAL_FLAG_TEAMS_LOCKED);
2333 				send_signal_etc(-childGroup->id, SIGCONT,
2334 					SIGNAL_FLAG_TEAMS_LOCKED);
2335 			}
2336 
2337 			child = child->siblings_next;
2338 		}
2339 	} else {
2340 		// update "orphaned" flags of all children's process groups
2341 		Team* child = team->children;
2342 		while (child != NULL) {
2343 			process_group* childGroup = child->group;
2344 			if (!childGroup->orphaned)
2345 				update_orphaned_process_group(childGroup, team->id);
2346 
2347 			child = child->siblings_next;
2348 		}
2349 
2350 		// update "orphaned" flag of this team's process group
2351 		update_orphaned_process_group(team->group, team->id);
2352 	}
2353 
2354 	// reparent each of the team's children
2355 	reparent_children(team);
2356 
2357 	// remove us from our process group
2358 	remove_team_from_group(team);
2359 
2360 	// remove us from our parent
2361 	remove_team_from_parent(parent, team);
2362 }
2363 
2364 
2365 /*!	Kills all threads but the main thread of the team.
2366 	To be called on exit of the team's main thread. The teams spinlock must be
2367 	held. The function may temporarily drop the spinlock, but will reacquire it
2368 	before it returns.
2369 	\param team The team in question.
2370 	\param state The CPU state as returned by disable_interrupts(). Will be
2371 		adjusted, if the function needs to unlock and relock.
2372 	\return The port of the debugger for the team, -1 if none. To be passed to
2373 		team_delete_team().
2374 */
2375 port_id
2376 team_shutdown_team(Team* team, cpu_status& state)
2377 {
2378 	ASSERT(thread_get_current_thread() == team->main_thread);
2379 
2380 	// Make sure debugging changes won't happen anymore.
2381 	port_id debuggerPort = -1;
2382 	while (true) {
2383 		// If a debugger change is in progress for the team, we'll have to
2384 		// wait until it is done.
2385 		ConditionVariableEntry waitForDebuggerEntry;
2386 		bool waitForDebugger = false;
2387 
2388 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2389 
2390 		if (team->debug_info.debugger_changed_condition != NULL) {
2391 			team->debug_info.debugger_changed_condition->Add(
2392 				&waitForDebuggerEntry);
2393 			waitForDebugger = true;
2394 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2395 			// The team is being debugged. That will stop with the termination
2396 			// of the nub thread. Since we won't let go of the team lock, unless
2397 			// we set team::death_entry or until we have removed the tem from
2398 			// the team hash, no-one can install a debugger anymore. We fetch
2399 			// the debugger's port to send it a message at the bitter end.
2400 			debuggerPort = team->debug_info.debugger_port;
2401 		}
2402 
2403 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2404 
2405 		if (!waitForDebugger)
2406 			break;
2407 
2408 		// wait for the debugger change to be finished
2409 		RELEASE_TEAM_LOCK();
2410 		restore_interrupts(state);
2411 
2412 		waitForDebuggerEntry.Wait();
2413 
2414 		state = disable_interrupts();
2415 		GRAB_TEAM_LOCK();
2416 	}
2417 
2418 	// kill all threads but the main thread
2419 	team_death_entry deathEntry;
2420 	deathEntry.condition.Init(team, "team death");
2421 
2422 	while (true) {
2423 		team->death_entry = &deathEntry;
2424 		deathEntry.remaining_threads = 0;
2425 
2426 		Thread* thread = team->thread_list;
2427 		while (thread != NULL) {
2428 			if (thread != team->main_thread) {
2429 				send_signal_etc(thread->id, SIGKILLTHR,
2430 					B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED);
2431 				deathEntry.remaining_threads++;
2432 			}
2433 
2434 			thread = thread->team_next;
2435 		}
2436 
2437 		if (deathEntry.remaining_threads == 0)
2438 			break;
2439 
2440 		// there are threads to wait for
2441 		ConditionVariableEntry entry;
2442 		deathEntry.condition.Add(&entry);
2443 
2444 		RELEASE_TEAM_LOCK();
2445 		restore_interrupts(state);
2446 
2447 		entry.Wait();
2448 
2449 		state = disable_interrupts();
2450 		GRAB_TEAM_LOCK();
2451 	}
2452 
2453 	team->death_entry = NULL;
2454 		// That makes the team "undead" again, but we have the teams spinlock
2455 		// and our caller won't drop it until after removing the team from the
2456 		// teams hash table.
2457 
2458 	return debuggerPort;
2459 }
2460 
2461 
2462 void
2463 team_delete_team(Team* team, port_id debuggerPort)
2464 {
2465 	team_id teamID = team->id;
2466 
2467 	ASSERT(team->num_threads == 0);
2468 
2469 	// If someone is waiting for this team to be loaded, but it dies
2470 	// unexpectedly before being done, we need to notify the waiting
2471 	// thread now.
2472 
2473 	cpu_status state = disable_interrupts();
2474 	GRAB_TEAM_LOCK();
2475 
2476 	if (team->loading_info) {
2477 		// there's indeed someone waiting
2478 		struct team_loading_info* loadingInfo = team->loading_info;
2479 		team->loading_info = NULL;
2480 
2481 		loadingInfo->result = B_ERROR;
2482 		loadingInfo->done = true;
2483 
2484 		GRAB_THREAD_LOCK();
2485 
2486 		// wake up the waiting thread
2487 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2488 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2489 
2490 		RELEASE_THREAD_LOCK();
2491 	}
2492 
2493 	RELEASE_TEAM_LOCK();
2494 	restore_interrupts(state);
2495 
2496 	// notify team watchers
2497 
2498 	{
2499 		// we're not reachable from anyone anymore at this point, so we
2500 		// can safely access the list without any locking
2501 		struct team_watcher* watcher;
2502 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2503 				&team->watcher_list)) != NULL) {
2504 			watcher->hook(teamID, watcher->data);
2505 			free(watcher);
2506 		}
2507 	}
2508 
2509 	sNotificationService.Notify(TEAM_REMOVED, team);
2510 
2511 	// free team resources
2512 
2513 	vfs_put_io_context(team->io_context);
2514 	delete_realtime_sem_context(team->realtime_sem_context);
2515 	xsi_sem_undo(team);
2516 	delete_owned_ports(team);
2517 	sem_delete_owned_sems(team);
2518 	remove_images(team);
2519 	team->address_space->RemoveAndPut();
2520 
2521 	delete_team_struct(team);
2522 
2523 	// notify the debugger, that the team is gone
2524 	user_debug_team_deleted(teamID, debuggerPort);
2525 }
2526 
2527 
2528 Team*
2529 team_get_kernel_team(void)
2530 {
2531 	return sKernelTeam;
2532 }
2533 
2534 
2535 team_id
2536 team_get_kernel_team_id(void)
2537 {
2538 	if (!sKernelTeam)
2539 		return 0;
2540 
2541 	return sKernelTeam->id;
2542 }
2543 
2544 
2545 team_id
2546 team_get_current_team_id(void)
2547 {
2548 	return thread_get_current_thread()->team->id;
2549 }
2550 
2551 
2552 status_t
2553 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
2554 {
2555 	cpu_status state;
2556 	Team* team;
2557 	status_t status;
2558 
2559 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2560 	if (id == 1) {
2561 		// we're the kernel team, so we don't have to go through all
2562 		// the hassle (locking and hash lookup)
2563 		*_addressSpace = VMAddressSpace::GetKernel();
2564 		return B_OK;
2565 	}
2566 
2567 	state = disable_interrupts();
2568 	GRAB_TEAM_LOCK();
2569 
2570 	team = team_get_team_struct_locked(id);
2571 	if (team != NULL) {
2572 		team->address_space->Get();
2573 		*_addressSpace = team->address_space;
2574 		status = B_OK;
2575 	} else
2576 		status = B_BAD_VALUE;
2577 
2578 	RELEASE_TEAM_LOCK();
2579 	restore_interrupts(state);
2580 
2581 	return status;
2582 }
2583 
2584 
2585 /*!	Sets the team's job control state.
2586 	Interrupts must be disabled and the team lock be held.
2587 	\a threadsLocked indicates whether the thread lock is being held, too.
2588 */
2589 void
2590 team_set_job_control_state(Team* team, job_control_state newState,
2591 	int signal, bool threadsLocked)
2592 {
2593 	if (team == NULL || team->job_control_entry == NULL)
2594 		return;
2595 
2596 	// don't touch anything, if the state stays the same or the team is already
2597 	// dead
2598 	job_control_entry* entry = team->job_control_entry;
2599 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2600 		return;
2601 
2602 	T(SetJobControlState(team->id, newState, signal));
2603 
2604 	// remove from the old list
2605 	switch (entry->state) {
2606 		case JOB_CONTROL_STATE_NONE:
2607 			// entry is in no list ATM
2608 			break;
2609 		case JOB_CONTROL_STATE_DEAD:
2610 			// can't get here
2611 			break;
2612 		case JOB_CONTROL_STATE_STOPPED:
2613 			team->parent->stopped_children.entries.Remove(entry);
2614 			break;
2615 		case JOB_CONTROL_STATE_CONTINUED:
2616 			team->parent->continued_children.entries.Remove(entry);
2617 			break;
2618 	}
2619 
2620 	entry->state = newState;
2621 	entry->signal = signal;
2622 
2623 	// add to new list
2624 	team_job_control_children* childList = NULL;
2625 	switch (entry->state) {
2626 		case JOB_CONTROL_STATE_NONE:
2627 			// entry doesn't get into any list
2628 			break;
2629 		case JOB_CONTROL_STATE_DEAD:
2630 			childList = &team->parent->dead_children;
2631 			team->parent->dead_children.count++;
2632 			break;
2633 		case JOB_CONTROL_STATE_STOPPED:
2634 			childList = &team->parent->stopped_children;
2635 			break;
2636 		case JOB_CONTROL_STATE_CONTINUED:
2637 			childList = &team->parent->continued_children;
2638 			break;
2639 	}
2640 
2641 	if (childList != NULL) {
2642 		childList->entries.Add(entry);
2643 		team->parent->dead_children.condition_variable.NotifyAll(
2644 			threadsLocked);
2645 	}
2646 }
2647 
2648 
2649 /*! Adds a hook to the team that is called as soon as this
2650 	team goes away.
2651 	This call might get public in the future.
2652 */
2653 status_t
2654 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2655 {
2656 	struct team_watcher* watcher;
2657 	Team* team;
2658 	cpu_status state;
2659 
2660 	if (hook == NULL || teamID < B_OK)
2661 		return B_BAD_VALUE;
2662 
2663 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2664 	if (watcher == NULL)
2665 		return B_NO_MEMORY;
2666 
2667 	watcher->hook = hook;
2668 	watcher->data = data;
2669 
2670 	// find team and add watcher
2671 
2672 	state = disable_interrupts();
2673 	GRAB_TEAM_LOCK();
2674 
2675 	team = team_get_team_struct_locked(teamID);
2676 	if (team != NULL)
2677 		list_add_item(&team->watcher_list, watcher);
2678 
2679 	RELEASE_TEAM_LOCK();
2680 	restore_interrupts(state);
2681 
2682 	if (team == NULL) {
2683 		free(watcher);
2684 		return B_BAD_TEAM_ID;
2685 	}
2686 
2687 	return B_OK;
2688 }
2689 
2690 
2691 status_t
2692 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2693 {
2694 	struct team_watcher* watcher = NULL;
2695 	Team* team;
2696 	cpu_status state;
2697 
2698 	if (hook == NULL || teamID < B_OK)
2699 		return B_BAD_VALUE;
2700 
2701 	// find team and remove watcher (if present)
2702 
2703 	state = disable_interrupts();
2704 	GRAB_TEAM_LOCK();
2705 
2706 	team = team_get_team_struct_locked(teamID);
2707 	if (team != NULL) {
2708 		// search for watcher
2709 		while ((watcher = (struct team_watcher*)list_get_next_item(
2710 				&team->watcher_list, watcher)) != NULL) {
2711 			if (watcher->hook == hook && watcher->data == data) {
2712 				// got it!
2713 				list_remove_item(&team->watcher_list, watcher);
2714 				break;
2715 			}
2716 		}
2717 	}
2718 
2719 	RELEASE_TEAM_LOCK();
2720 	restore_interrupts(state);
2721 
2722 	if (watcher == NULL)
2723 		return B_ENTRY_NOT_FOUND;
2724 
2725 	free(watcher);
2726 	return B_OK;
2727 }
2728 
2729 
2730 /*!	The team lock must be held or the team must still be single threaded.
2731 */
2732 struct user_thread*
2733 team_allocate_user_thread(Team* team)
2734 {
2735 	if (team->user_data == 0)
2736 		return NULL;
2737 
2738 	user_thread* thread = NULL;
2739 
2740 	// take an entry from the free list, if any
2741 	if (struct free_user_thread* entry = team->free_user_threads) {
2742 		thread = entry->thread;
2743 		team->free_user_threads = entry->next;
2744 		deferred_free(entry);
2745 		return thread;
2746 	} else {
2747 		// enough space left?
2748 		size_t needed = _ALIGN(sizeof(user_thread));
2749 		if (team->user_data_size - team->used_user_data < needed)
2750 			return NULL;
2751 		// TODO: This imposes a per team thread limit! We should resize the
2752 		// area, if necessary. That's problematic at this point, though, since
2753 		// we've got the team lock.
2754 
2755 		thread = (user_thread*)(team->user_data + team->used_user_data);
2756 		team->used_user_data += needed;
2757 	}
2758 
2759 	thread->defer_signals = 0;
2760 	thread->pending_signals = 0;
2761 	thread->wait_status = B_OK;
2762 
2763 	return thread;
2764 }
2765 
2766 
2767 /*!	The team lock must not be held. \a thread must be the current thread.
2768 */
2769 void
2770 team_free_user_thread(Thread* thread)
2771 {
2772 	user_thread* userThread = thread->user_thread;
2773 	if (userThread == NULL)
2774 		return;
2775 
2776 	// create a free list entry
2777 	free_user_thread* entry
2778 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2779 	if (entry == NULL) {
2780 		// we have to leak the user thread :-/
2781 		return;
2782 	}
2783 
2784 	InterruptsSpinLocker _(gTeamSpinlock);
2785 
2786 	// detach from thread
2787 	SpinLocker threadLocker(gThreadSpinlock);
2788 	thread->user_thread = NULL;
2789 	threadLocker.Unlock();
2790 
2791 	entry->thread = userThread;
2792 	entry->next = thread->team->free_user_threads;
2793 	thread->team->free_user_threads = entry;
2794 }
2795 
2796 
2797 //	#pragma mark - Associated data interface
2798 
2799 
2800 AssociatedData::AssociatedData()
2801 	:
2802 	fOwner(NULL)
2803 {
2804 }
2805 
2806 
2807 AssociatedData::~AssociatedData()
2808 {
2809 }
2810 
2811 
2812 void
2813 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
2814 {
2815 }
2816 
2817 
2818 AssociatedDataOwner::AssociatedDataOwner()
2819 {
2820 	mutex_init(&fLock, "associated data owner");
2821 }
2822 
2823 
2824 AssociatedDataOwner::~AssociatedDataOwner()
2825 {
2826 	mutex_destroy(&fLock);
2827 }
2828 
2829 
2830 bool
2831 AssociatedDataOwner::AddData(AssociatedData* data)
2832 {
2833 	MutexLocker locker(fLock);
2834 
2835 	if (data->Owner() != NULL)
2836 		return false;
2837 
2838 	data->AcquireReference();
2839 	fList.Add(data);
2840 	data->SetOwner(this);
2841 
2842 	return true;
2843 }
2844 
2845 
2846 bool
2847 AssociatedDataOwner::RemoveData(AssociatedData* data)
2848 {
2849 	MutexLocker locker(fLock);
2850 
2851 	if (data->Owner() != this)
2852 		return false;
2853 
2854 	data->SetOwner(NULL);
2855 	fList.Remove(data);
2856 
2857 	locker.Unlock();
2858 
2859 	data->ReleaseReference();
2860 
2861 	return true;
2862 }
2863 
2864 
2865 void
2866 AssociatedDataOwner::PrepareForDeletion()
2867 {
2868 	MutexLocker locker(fLock);
2869 
2870 	// move all data to a temporary list and unset the owner
2871 	DataList list;
2872 	list.MoveFrom(&fList);
2873 
2874 	for (DataList::Iterator it = list.GetIterator();
2875 		AssociatedData* data = it.Next();) {
2876 		data->SetOwner(NULL);
2877 	}
2878 
2879 	locker.Unlock();
2880 
2881 	// call the notification hooks and release our references
2882 	while (AssociatedData* data = list.RemoveHead()) {
2883 		data->OwnerDeleted(this);
2884 		data->ReleaseReference();
2885 	}
2886 }
2887 
2888 
2889 /*!	Associates data with the current team.
2890 	When the team is deleted, the data object is notified.
2891 	The team acquires a reference to the object.
2892 
2893 	\param data The data object.
2894 	\return \c true on success, \c false otherwise. Fails only when the supplied
2895 		data object is already associated with another owner.
2896 */
2897 bool
2898 team_associate_data(AssociatedData* data)
2899 {
2900 	return thread_get_current_thread()->team->AddData(data);
2901 }
2902 
2903 
2904 /*!	Dissociates data from the current team.
2905 	Balances an earlier call to team_associate_data().
2906 
2907 	\param data The data object.
2908 	\return \c true on success, \c false otherwise. Fails only when the data
2909 		object is not associated with the current team.
2910 */
2911 bool
2912 team_dissociate_data(AssociatedData* data)
2913 {
2914 	return thread_get_current_thread()->team->RemoveData(data);
2915 }
2916 
2917 
2918 //	#pragma mark - Public kernel API
2919 
2920 
2921 thread_id
2922 load_image(int32 argCount, const char** args, const char** env)
2923 {
2924 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2925 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2926 }
2927 
2928 
2929 thread_id
2930 load_image_etc(int32 argCount, const char* const* args,
2931 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2932 {
2933 	// we need to flatten the args and environment
2934 
2935 	if (args == NULL)
2936 		return B_BAD_VALUE;
2937 
2938 	// determine total needed size
2939 	int32 argSize = 0;
2940 	for (int32 i = 0; i < argCount; i++)
2941 		argSize += strlen(args[i]) + 1;
2942 
2943 	int32 envCount = 0;
2944 	int32 envSize = 0;
2945 	while (env != NULL && env[envCount] != NULL)
2946 		envSize += strlen(env[envCount++]) + 1;
2947 
2948 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2949 	if (size > MAX_PROCESS_ARGS_SIZE)
2950 		return B_TOO_MANY_ARGS;
2951 
2952 	// allocate space
2953 	char** flatArgs = (char**)malloc(size);
2954 	if (flatArgs == NULL)
2955 		return B_NO_MEMORY;
2956 
2957 	char** slot = flatArgs;
2958 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2959 
2960 	// copy arguments and environment
2961 	for (int32 i = 0; i < argCount; i++) {
2962 		int32 argSize = strlen(args[i]) + 1;
2963 		memcpy(stringSpace, args[i], argSize);
2964 		*slot++ = stringSpace;
2965 		stringSpace += argSize;
2966 	}
2967 
2968 	*slot++ = NULL;
2969 
2970 	for (int32 i = 0; i < envCount; i++) {
2971 		int32 envSize = strlen(env[i]) + 1;
2972 		memcpy(stringSpace, env[i], envSize);
2973 		*slot++ = stringSpace;
2974 		stringSpace += envSize;
2975 	}
2976 
2977 	*slot++ = NULL;
2978 
2979 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2980 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2981 
2982 	free(flatArgs);
2983 		// load_image_internal() unset our variable if it took over ownership
2984 
2985 	return thread;
2986 }
2987 
2988 
2989 status_t
2990 wait_for_team(team_id id, status_t* _returnCode)
2991 {
2992 	Team* team;
2993 	thread_id thread;
2994 	cpu_status state;
2995 
2996 	// find main thread and wait for that
2997 
2998 	state = disable_interrupts();
2999 	GRAB_TEAM_LOCK();
3000 
3001 	team = team_get_team_struct_locked(id);
3002 	if (team != NULL && team->main_thread != NULL)
3003 		thread = team->main_thread->id;
3004 	else
3005 		thread = B_BAD_THREAD_ID;
3006 
3007 	RELEASE_TEAM_LOCK();
3008 	restore_interrupts(state);
3009 
3010 	if (thread < 0)
3011 		return thread;
3012 
3013 	return wait_for_thread(thread, _returnCode);
3014 }
3015 
3016 
3017 status_t
3018 kill_team(team_id id)
3019 {
3020 	status_t status = B_OK;
3021 	thread_id threadID = -1;
3022 	Team* team;
3023 	cpu_status state;
3024 
3025 	state = disable_interrupts();
3026 	GRAB_TEAM_LOCK();
3027 
3028 	team = team_get_team_struct_locked(id);
3029 	if (team != NULL) {
3030 		if (team != sKernelTeam) {
3031 			threadID = team->id;
3032 				// the team ID is the same as the ID of its main thread
3033 		} else
3034 			status = B_NOT_ALLOWED;
3035 	} else
3036 		status = B_BAD_THREAD_ID;
3037 
3038 	RELEASE_TEAM_LOCK();
3039 	restore_interrupts(state);
3040 
3041 	if (status < B_OK)
3042 		return status;
3043 
3044 	// just kill the main thread in the team. The cleanup code there will
3045 	// take care of the team
3046 	return kill_thread(threadID);
3047 }
3048 
3049 
3050 status_t
3051 _get_team_info(team_id id, team_info* info, size_t size)
3052 {
3053 	cpu_status state;
3054 	status_t status = B_OK;
3055 	Team* team;
3056 
3057 	state = disable_interrupts();
3058 	GRAB_TEAM_LOCK();
3059 
3060 	if (id == B_CURRENT_TEAM)
3061 		team = thread_get_current_thread()->team;
3062 	else
3063 		team = team_get_team_struct_locked(id);
3064 
3065 	if (team == NULL) {
3066 		status = B_BAD_TEAM_ID;
3067 		goto err;
3068 	}
3069 
3070 	status = fill_team_info(team, info, size);
3071 
3072 err:
3073 	RELEASE_TEAM_LOCK();
3074 	restore_interrupts(state);
3075 
3076 	return status;
3077 }
3078 
3079 
3080 status_t
3081 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3082 {
3083 	status_t status = B_BAD_TEAM_ID;
3084 	Team* team = NULL;
3085 	int32 slot = *cookie;
3086 	team_id lastTeamID;
3087 	cpu_status state;
3088 
3089 	if (slot < 1)
3090 		slot = 1;
3091 
3092 	state = disable_interrupts();
3093 	GRAB_TEAM_LOCK();
3094 
3095 	lastTeamID = peek_next_thread_id();
3096 	if (slot >= lastTeamID)
3097 		goto err;
3098 
3099 	// get next valid team
3100 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3101 		slot++;
3102 
3103 	if (team) {
3104 		status = fill_team_info(team, info, size);
3105 		*cookie = ++slot;
3106 	}
3107 
3108 err:
3109 	RELEASE_TEAM_LOCK();
3110 	restore_interrupts(state);
3111 
3112 	return status;
3113 }
3114 
3115 
3116 status_t
3117 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3118 {
3119 	bigtime_t kernelTime = 0, userTime = 0;
3120 	status_t status = B_OK;
3121 	Team* team;
3122 	cpu_status state;
3123 
3124 	if (size != sizeof(team_usage_info)
3125 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
3126 		return B_BAD_VALUE;
3127 
3128 	state = disable_interrupts();
3129 	GRAB_TEAM_LOCK();
3130 
3131 	if (id == B_CURRENT_TEAM)
3132 		team = thread_get_current_thread()->team;
3133 	else
3134 		team = team_get_team_struct_locked(id);
3135 
3136 	if (team == NULL) {
3137 		status = B_BAD_TEAM_ID;
3138 		goto out;
3139 	}
3140 
3141 	switch (who) {
3142 		case B_TEAM_USAGE_SELF:
3143 		{
3144 			Thread* thread = team->thread_list;
3145 
3146 			for (; thread != NULL; thread = thread->team_next) {
3147 				kernelTime += thread->kernel_time;
3148 				userTime += thread->user_time;
3149 			}
3150 
3151 			kernelTime += team->dead_threads_kernel_time;
3152 			userTime += team->dead_threads_user_time;
3153 			break;
3154 		}
3155 
3156 		case B_TEAM_USAGE_CHILDREN:
3157 		{
3158 			Team* child = team->children;
3159 			for (; child != NULL; child = child->siblings_next) {
3160 				Thread* thread = team->thread_list;
3161 
3162 				for (; thread != NULL; thread = thread->team_next) {
3163 					kernelTime += thread->kernel_time;
3164 					userTime += thread->user_time;
3165 				}
3166 
3167 				kernelTime += child->dead_threads_kernel_time;
3168 				userTime += child->dead_threads_user_time;
3169 			}
3170 
3171 			kernelTime += team->dead_children.kernel_time;
3172 			userTime += team->dead_children.user_time;
3173 			break;
3174 		}
3175 	}
3176 
3177 out:
3178 	RELEASE_TEAM_LOCK();
3179 	restore_interrupts(state);
3180 
3181 	if (status == B_OK) {
3182 		info->kernel_time = kernelTime;
3183 		info->user_time = userTime;
3184 	}
3185 
3186 	return status;
3187 }
3188 
3189 
3190 pid_t
3191 getpid(void)
3192 {
3193 	return thread_get_current_thread()->team->id;
3194 }
3195 
3196 
3197 pid_t
3198 getppid(void)
3199 {
3200 	Team* team = thread_get_current_thread()->team;
3201 	cpu_status state;
3202 	pid_t parent;
3203 
3204 	state = disable_interrupts();
3205 	GRAB_TEAM_LOCK();
3206 
3207 	parent = team->parent->id;
3208 
3209 	RELEASE_TEAM_LOCK();
3210 	restore_interrupts(state);
3211 
3212 	return parent;
3213 }
3214 
3215 
3216 pid_t
3217 getpgid(pid_t process)
3218 {
3219 	Thread* thread;
3220 	pid_t result = -1;
3221 	cpu_status state;
3222 
3223 	if (process == 0)
3224 		process = thread_get_current_thread()->team->id;
3225 
3226 	state = disable_interrupts();
3227 	GRAB_THREAD_LOCK();
3228 
3229 	thread = thread_get_thread_struct_locked(process);
3230 	if (thread != NULL)
3231 		result = thread->team->group_id;
3232 
3233 	RELEASE_THREAD_LOCK();
3234 	restore_interrupts(state);
3235 
3236 	return thread != NULL ? result : B_BAD_VALUE;
3237 }
3238 
3239 
3240 pid_t
3241 getsid(pid_t process)
3242 {
3243 	Thread* thread;
3244 	pid_t result = -1;
3245 	cpu_status state;
3246 
3247 	if (process == 0)
3248 		process = thread_get_current_thread()->team->id;
3249 
3250 	state = disable_interrupts();
3251 	GRAB_THREAD_LOCK();
3252 
3253 	thread = thread_get_thread_struct_locked(process);
3254 	if (thread != NULL)
3255 		result = thread->team->session_id;
3256 
3257 	RELEASE_THREAD_LOCK();
3258 	restore_interrupts(state);
3259 
3260 	return thread != NULL ? result : B_BAD_VALUE;
3261 }
3262 
3263 
3264 //	#pragma mark - User syscalls
3265 
3266 
3267 status_t
3268 _user_exec(const char* userPath, const char* const* userFlatArgs,
3269 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3270 {
3271 	// NOTE: Since this function normally doesn't return, don't use automatic
3272 	// variables that need destruction in the function scope.
3273 	char path[B_PATH_NAME_LENGTH];
3274 
3275 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3276 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3277 		return B_BAD_ADDRESS;
3278 
3279 	// copy and relocate the flat arguments
3280 	char** flatArgs;
3281 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3282 		argCount, envCount, flatArgs);
3283 
3284 	if (error == B_OK) {
3285 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3286 			envCount, umask);
3287 			// this one only returns in case of error
3288 	}
3289 
3290 	free(flatArgs);
3291 	return error;
3292 }
3293 
3294 
3295 thread_id
3296 _user_fork(void)
3297 {
3298 	return fork_team();
3299 }
3300 
3301 
3302 thread_id
3303 _user_wait_for_child(thread_id child, uint32 flags, int32* _userReason,
3304 	status_t* _userReturnCode)
3305 {
3306 	status_t returnCode;
3307 	int32 reason;
3308 	thread_id deadChild;
3309 
3310 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3311 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3312 		return B_BAD_ADDRESS;
3313 
3314 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3315 
3316 	if (deadChild >= B_OK) {
3317 		// copy result data on successful completion
3318 		if ((_userReason != NULL
3319 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3320 			|| (_userReturnCode != NULL
3321 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3322 					< B_OK)) {
3323 			return B_BAD_ADDRESS;
3324 		}
3325 
3326 		return deadChild;
3327 	}
3328 
3329 	return syscall_restart_handle_post(deadChild);
3330 }
3331 
3332 
3333 pid_t
3334 _user_process_info(pid_t process, int32 which)
3335 {
3336 	// we only allow to return the parent of the current process
3337 	if (which == PARENT_ID
3338 		&& process != 0 && process != thread_get_current_thread()->team->id)
3339 		return B_BAD_VALUE;
3340 
3341 	switch (which) {
3342 		case SESSION_ID:
3343 			return getsid(process);
3344 		case GROUP_ID:
3345 			return getpgid(process);
3346 		case PARENT_ID:
3347 			return getppid();
3348 	}
3349 
3350 	return B_BAD_VALUE;
3351 }
3352 
3353 
3354 pid_t
3355 _user_setpgid(pid_t processID, pid_t groupID)
3356 {
3357 	Thread* thread = thread_get_current_thread();
3358 	Team* currentTeam = thread->team;
3359 	Team* team;
3360 
3361 	if (groupID < 0)
3362 		return B_BAD_VALUE;
3363 
3364 	if (processID == 0)
3365 		processID = currentTeam->id;
3366 
3367 	// if the group ID is not specified, use the target process' ID
3368 	if (groupID == 0)
3369 		groupID = processID;
3370 
3371 	if (processID == currentTeam->id) {
3372 		// we set our own group
3373 
3374 		// we must not change our process group ID if we're a session leader
3375 		if (is_session_leader(currentTeam))
3376 			return B_NOT_ALLOWED;
3377 	} else {
3378 		// another team is the target of the call -- check it out
3379 		InterruptsSpinLocker _(gTeamSpinlock);
3380 
3381 		team = team_get_team_struct_locked(processID);
3382 		if (team == NULL)
3383 			return ESRCH;
3384 
3385 		// The team must be a child of the calling team and in the same session.
3386 		// (If that's the case it isn't a session leader either.)
3387 		if (team->parent != currentTeam
3388 			|| team->session_id != currentTeam->session_id) {
3389 			return B_NOT_ALLOWED;
3390 		}
3391 
3392 		if (team->group_id == groupID)
3393 			return groupID;
3394 
3395 		// The call is also supposed to fail on a child, when the child already
3396 		// has executed exec*() [EACCES].
3397 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3398 			return EACCES;
3399 	}
3400 
3401 	struct process_group* group = NULL;
3402 	if (groupID == processID) {
3403 		// A new process group might be needed.
3404 		group = create_process_group(groupID);
3405 		if (group == NULL)
3406 			return B_NO_MEMORY;
3407 
3408 		// Assume orphaned. We consider the situation of the team's parent
3409 		// below.
3410 		group->orphaned = true;
3411 	}
3412 
3413 	status_t status = B_OK;
3414 	struct process_group* freeGroup = NULL;
3415 
3416 	InterruptsSpinLocker locker(gTeamSpinlock);
3417 
3418 	team = team_get_team_struct_locked(processID);
3419 	if (team != NULL) {
3420 		// check the conditions again -- they might have changed in the meantime
3421 		if (is_session_leader(team)
3422 			|| team->session_id != currentTeam->session_id) {
3423 			status = B_NOT_ALLOWED;
3424 		} else if (team != currentTeam
3425 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3426 			status = EACCES;
3427 		} else if (team->group_id == groupID) {
3428 			// the team is already in the desired process group
3429 			freeGroup = group;
3430 		} else {
3431 			// Check if a process group with the requested ID already exists.
3432 			struct process_group* targetGroup
3433 				= team_get_process_group_locked(team->group->session, groupID);
3434 			if (targetGroup != NULL) {
3435 				// In case of processID == groupID we have to free the
3436 				// allocated group.
3437 				freeGroup = group;
3438 			} else if (processID == groupID) {
3439 				// We created a new process group, let us insert it into the
3440 				// team's session.
3441 				insert_group_into_session(team->group->session, group);
3442 				targetGroup = group;
3443 			}
3444 
3445 			if (targetGroup != NULL) {
3446 				// we got a group, let's move the team there
3447 				process_group* oldGroup = team->group;
3448 
3449 				remove_team_from_group(team);
3450 				insert_team_into_group(targetGroup, team);
3451 
3452 				// Update the "orphaned" flag of all potentially affected
3453 				// groups.
3454 
3455 				// the team's old group
3456 				if (oldGroup->teams != NULL) {
3457 					oldGroup->orphaned = false;
3458 					update_orphaned_process_group(oldGroup, -1);
3459 				}
3460 
3461 				// the team's new group
3462 				Team* parent = team->parent;
3463 				targetGroup->orphaned &= parent == NULL
3464 					|| parent->group == targetGroup
3465 					|| team->parent->session_id != team->session_id;
3466 
3467 				// children's groups
3468 				Team* child = team->children;
3469 				while (child != NULL) {
3470 					child->group->orphaned = false;
3471 					update_orphaned_process_group(child->group, -1);
3472 
3473 					child = child->siblings_next;
3474 				}
3475 			} else
3476 				status = B_NOT_ALLOWED;
3477 		}
3478 	} else
3479 		status = B_NOT_ALLOWED;
3480 
3481 	// Changing the process group might have changed the situation for a parent
3482 	// waiting in wait_for_child(). Hence we notify it.
3483 	if (status == B_OK)
3484 		team->parent->dead_children.condition_variable.NotifyAll(false);
3485 
3486 	locker.Unlock();
3487 
3488 	if (status != B_OK) {
3489 		// in case of error, the group hasn't been added into the hash
3490 		team_delete_process_group(group);
3491 	}
3492 
3493 	team_delete_process_group(freeGroup);
3494 
3495 	return status == B_OK ? groupID : status;
3496 }
3497 
3498 
3499 pid_t
3500 _user_setsid(void)
3501 {
3502 	Team* team = thread_get_current_thread()->team;
3503 	struct process_session* session;
3504 	struct process_group* group;
3505 	cpu_status state;
3506 	bool failed = false;
3507 
3508 	// the team must not already be a process group leader
3509 	if (is_process_group_leader(team))
3510 		return B_NOT_ALLOWED;
3511 
3512 	group = create_process_group(team->id);
3513 	if (group == NULL)
3514 		return B_NO_MEMORY;
3515 
3516 	session = create_process_session(group->id);
3517 	if (session == NULL) {
3518 		team_delete_process_group(group);
3519 		return B_NO_MEMORY;
3520 	}
3521 
3522 	state = disable_interrupts();
3523 	GRAB_TEAM_LOCK();
3524 
3525 	// this may have changed since the check above
3526 	if (!is_process_group_leader(team)) {
3527 		remove_team_from_group(team);
3528 
3529 		insert_group_into_session(session, group);
3530 		insert_team_into_group(group, team);
3531 	} else
3532 		failed = true;
3533 
3534 	RELEASE_TEAM_LOCK();
3535 	restore_interrupts(state);
3536 
3537 	if (failed) {
3538 		team_delete_process_group(group);
3539 		free(session);
3540 		return B_NOT_ALLOWED;
3541 	}
3542 
3543 	return team->group_id;
3544 }
3545 
3546 
3547 status_t
3548 _user_wait_for_team(team_id id, status_t* _userReturnCode)
3549 {
3550 	status_t returnCode;
3551 	status_t status;
3552 
3553 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3554 		return B_BAD_ADDRESS;
3555 
3556 	status = wait_for_team(id, &returnCode);
3557 	if (status >= B_OK && _userReturnCode != NULL) {
3558 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
3559 				!= B_OK)
3560 			return B_BAD_ADDRESS;
3561 		return B_OK;
3562 	}
3563 
3564 	return syscall_restart_handle_post(status);
3565 }
3566 
3567 
3568 thread_id
3569 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3570 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3571 	port_id errorPort, uint32 errorToken)
3572 {
3573 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3574 
3575 	if (argCount < 1)
3576 		return B_BAD_VALUE;
3577 
3578 	// copy and relocate the flat arguments
3579 	char** flatArgs;
3580 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3581 		argCount, envCount, flatArgs);
3582 	if (error != B_OK)
3583 		return error;
3584 
3585 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3586 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3587 		errorToken);
3588 
3589 	free(flatArgs);
3590 		// load_image_internal() unset our variable if it took over ownership
3591 
3592 	return thread;
3593 }
3594 
3595 
3596 void
3597 _user_exit_team(status_t returnValue)
3598 {
3599 	Thread* thread = thread_get_current_thread();
3600 	Team* team = thread->team;
3601 	Thread* mainThread = team->main_thread;
3602 
3603 	mainThread->exit.status = returnValue;
3604 	mainThread->exit.reason = THREAD_RETURN_EXIT;
3605 
3606 	// Also set the exit code in the current thread for the sake of it
3607 	if (thread != mainThread) {
3608 		thread->exit.status = returnValue;
3609 		thread->exit.reason = THREAD_RETURN_EXIT;
3610 	}
3611 
3612 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT)
3613 			!= 0) {
3614 		// This team is currently being debugged, and requested that teams
3615 		// should not be exited.
3616 		user_debug_stop_thread();
3617 	}
3618 
3619 	send_signal(thread->id, SIGKILL);
3620 }
3621 
3622 
3623 status_t
3624 _user_kill_team(team_id team)
3625 {
3626 	return kill_team(team);
3627 }
3628 
3629 
3630 status_t
3631 _user_get_team_info(team_id id, team_info* userInfo)
3632 {
3633 	status_t status;
3634 	team_info info;
3635 
3636 	if (!IS_USER_ADDRESS(userInfo))
3637 		return B_BAD_ADDRESS;
3638 
3639 	status = _get_team_info(id, &info, sizeof(team_info));
3640 	if (status == B_OK) {
3641 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3642 			return B_BAD_ADDRESS;
3643 	}
3644 
3645 	return status;
3646 }
3647 
3648 
3649 status_t
3650 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
3651 {
3652 	status_t status;
3653 	team_info info;
3654 	int32 cookie;
3655 
3656 	if (!IS_USER_ADDRESS(userCookie)
3657 		|| !IS_USER_ADDRESS(userInfo)
3658 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3659 		return B_BAD_ADDRESS;
3660 
3661 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3662 	if (status != B_OK)
3663 		return status;
3664 
3665 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3666 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3667 		return B_BAD_ADDRESS;
3668 
3669 	return status;
3670 }
3671 
3672 
3673 team_id
3674 _user_get_current_team(void)
3675 {
3676 	return team_get_current_team_id();
3677 }
3678 
3679 
3680 status_t
3681 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
3682 	size_t size)
3683 {
3684 	team_usage_info info;
3685 	status_t status;
3686 
3687 	if (!IS_USER_ADDRESS(userInfo))
3688 		return B_BAD_ADDRESS;
3689 
3690 	status = _get_team_usage_info(team, who, &info, size);
3691 	if (status != B_OK)
3692 		return status;
3693 
3694 	if (user_memcpy(userInfo, &info, size) < B_OK)
3695 		return B_BAD_ADDRESS;
3696 
3697 	return status;
3698 }
3699 
3700 
3701 status_t
3702 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
3703 	size_t size, size_t* _sizeNeeded)
3704 {
3705 	// check parameters
3706 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
3707 		|| (buffer == NULL && size > 0)
3708 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
3709 		return B_BAD_ADDRESS;
3710 	}
3711 
3712 	KMessage info;
3713 
3714 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
3715 		// allocate memory for a copy of the team struct
3716 		Team* teamClone = new(std::nothrow) Team;
3717 		if (teamClone == NULL)
3718 			return B_NO_MEMORY;
3719 		ObjectDeleter<Team> teamCloneDeleter(teamClone);
3720 
3721 		io_context* ioContext;
3722 		{
3723 			// get the team structure
3724 			InterruptsSpinLocker _(gTeamSpinlock);
3725 			Team* team = teamID == B_CURRENT_TEAM
3726 				? thread_get_current_thread()->team
3727 				: team_get_team_struct_locked(teamID);
3728 			if (team == NULL)
3729 				return B_BAD_TEAM_ID;
3730 
3731 			// copy it
3732 			memcpy(teamClone, team, sizeof(*team));
3733 
3734 			// also fetch a reference to the I/O context
3735 			ioContext = team->io_context;
3736 			vfs_get_io_context(ioContext);
3737 		}
3738 		CObjectDeleter<io_context> ioContextPutter(ioContext,
3739 			&vfs_put_io_context);
3740 
3741 		// add the basic data to the info message
3742 		if (info.AddInt32("id", teamClone->id) != B_OK
3743 			|| info.AddString("name", teamClone->name) != B_OK
3744 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
3745 			|| info.AddInt32("session", teamClone->session_id) != B_OK
3746 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
3747 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
3748 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
3749 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
3750 			return B_NO_MEMORY;
3751 		}
3752 
3753 		// get the current working directory from the I/O context
3754 		dev_t cwdDevice;
3755 		ino_t cwdDirectory;
3756 		{
3757 			MutexLocker ioContextLocker(ioContext->io_mutex);
3758 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
3759 		}
3760 
3761 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
3762 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
3763 			return B_NO_MEMORY;
3764 		}
3765 	}
3766 
3767 	// TODO: Support the other flags!
3768 
3769 	// copy the needed size and, if it fits, the message back to userland
3770 	size_t sizeNeeded = info.ContentSize();
3771 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
3772 		return B_BAD_ADDRESS;
3773 
3774 	if (sizeNeeded > size)
3775 		return B_BUFFER_OVERFLOW;
3776 
3777 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
3778 		return B_BAD_ADDRESS;
3779 
3780 	return B_OK;
3781 }
3782