xref: /haiku/src/system/kernel/team.cpp (revision 37e5a036605931f55d82e971f8ab99c48023a5c4)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <sem.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
49 #include <syscalls.h>
50 #include <tls.h>
51 #include <tracing.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
55 #include <vfs.h>
56 #include <vm/vm.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
59 
60 #include "TeamThreadTables.h"
61 
62 
63 //#define TRACE_TEAM
64 #ifdef TRACE_TEAM
65 #	define TRACE(x) dprintf x
66 #else
67 #	define TRACE(x) ;
68 #endif
69 
70 
71 struct team_key {
72 	team_id id;
73 };
74 
75 struct team_arg {
76 	char	*path;
77 	char	**flat_args;
78 	size_t	flat_args_size;
79 	uint32	arg_count;
80 	uint32	env_count;
81 	mode_t	umask;
82 	uint32	flags;
83 	port_id	error_port;
84 	uint32	error_token;
85 };
86 
87 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
88 
89 
90 namespace {
91 
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, Team* team);
98 };
99 
100 
101 // #pragma mark - TeamTable
102 
103 
104 typedef BKernel::TeamThreadTable<Team> TeamTable;
105 
106 
107 // #pragma mark - ProcessGroupHashDefinition
108 
109 
110 struct ProcessGroupHashDefinition {
111 	typedef pid_t			KeyType;
112 	typedef	ProcessGroup	ValueType;
113 
114 	size_t HashKey(pid_t key) const
115 	{
116 		return key;
117 	}
118 
119 	size_t Hash(ProcessGroup* value) const
120 	{
121 		return HashKey(value->id);
122 	}
123 
124 	bool Compare(pid_t key, ProcessGroup* value) const
125 	{
126 		return value->id == key;
127 	}
128 
129 	ProcessGroup*& GetLink(ProcessGroup* value) const
130 	{
131 		return value->next;
132 	}
133 };
134 
135 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
136 
137 
138 }	// unnamed namespace
139 
140 
141 // #pragma mark -
142 
143 
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash;
146 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
147 
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash;
150 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
151 
152 static Team* sKernelTeam = NULL;
153 
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups;
157 static mutex sOrphanedCheckLock
158 	= MUTEX_INITIALIZER("orphaned process group check");
159 
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams = 2048;
163 static int32 sUsedTeams = 1;
164 
165 static TeamNotificationService sNotificationService;
166 
167 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
168 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
169 
170 
171 // #pragma mark - TeamListIterator
172 
173 
174 TeamListIterator::TeamListIterator()
175 {
176 	// queue the entry
177 	InterruptsSpinLocker locker(sTeamHashLock);
178 	sTeamHash.InsertIteratorEntry(&fEntry);
179 }
180 
181 
182 TeamListIterator::~TeamListIterator()
183 {
184 	// remove the entry
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	sTeamHash.RemoveIteratorEntry(&fEntry);
187 }
188 
189 
190 Team*
191 TeamListIterator::Next()
192 {
193 	// get the next team -- if there is one, get reference for it
194 	InterruptsSpinLocker locker(sTeamHashLock);
195 	Team* team = sTeamHash.NextElement(&fEntry);
196 	if (team != NULL)
197 		team->AcquireReference();
198 
199 	return team;
200 }
201 
202 
203 // #pragma mark - Tracing
204 
205 
206 #if TEAM_TRACING
207 namespace TeamTracing {
208 
209 class TeamForked : public AbstractTraceEntry {
210 public:
211 	TeamForked(thread_id forkedThread)
212 		:
213 		fForkedThread(forkedThread)
214 	{
215 		Initialized();
216 	}
217 
218 	virtual void AddDump(TraceOutput& out)
219 	{
220 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
221 	}
222 
223 private:
224 	thread_id			fForkedThread;
225 };
226 
227 
228 class ExecTeam : public AbstractTraceEntry {
229 public:
230 	ExecTeam(const char* path, int32 argCount, const char* const* args,
231 			int32 envCount, const char* const* env)
232 		:
233 		fArgCount(argCount),
234 		fArgs(NULL)
235 	{
236 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
237 			false);
238 
239 		// determine the buffer size we need for the args
240 		size_t argBufferSize = 0;
241 		for (int32 i = 0; i < argCount; i++)
242 			argBufferSize += strlen(args[i]) + 1;
243 
244 		// allocate a buffer
245 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
246 		if (fArgs) {
247 			char* buffer = fArgs;
248 			for (int32 i = 0; i < argCount; i++) {
249 				size_t argSize = strlen(args[i]) + 1;
250 				memcpy(buffer, args[i], argSize);
251 				buffer += argSize;
252 			}
253 		}
254 
255 		// ignore env for the time being
256 		(void)envCount;
257 		(void)env;
258 
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		out.Print("team exec, \"%p\", args:", fPath);
265 
266 		if (fArgs != NULL) {
267 			char* args = fArgs;
268 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
269 				out.Print(" \"%s\"", args);
270 				args += strlen(args) + 1;
271 			}
272 		} else
273 			out.Print(" <too long>");
274 	}
275 
276 private:
277 	char*	fPath;
278 	int32	fArgCount;
279 	char*	fArgs;
280 };
281 
282 
283 static const char*
284 job_control_state_name(job_control_state state)
285 {
286 	switch (state) {
287 		case JOB_CONTROL_STATE_NONE:
288 			return "none";
289 		case JOB_CONTROL_STATE_STOPPED:
290 			return "stopped";
291 		case JOB_CONTROL_STATE_CONTINUED:
292 			return "continued";
293 		case JOB_CONTROL_STATE_DEAD:
294 			return "dead";
295 		default:
296 			return "invalid";
297 	}
298 }
299 
300 
301 class SetJobControlState : public AbstractTraceEntry {
302 public:
303 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
304 		:
305 		fTeam(team),
306 		fNewState(newState),
307 		fSignal(signal != NULL ? signal->Number() : 0)
308 	{
309 		Initialized();
310 	}
311 
312 	virtual void AddDump(TraceOutput& out)
313 	{
314 		out.Print("team set job control state, team %" B_PRId32 ", "
315 			"new state: %s, signal: %d",
316 			fTeam, job_control_state_name(fNewState), fSignal);
317 	}
318 
319 private:
320 	team_id				fTeam;
321 	job_control_state	fNewState;
322 	int					fSignal;
323 };
324 
325 
326 class WaitForChild : public AbstractTraceEntry {
327 public:
328 	WaitForChild(pid_t child, uint32 flags)
329 		:
330 		fChild(child),
331 		fFlags(flags)
332 	{
333 		Initialized();
334 	}
335 
336 	virtual void AddDump(TraceOutput& out)
337 	{
338 		out.Print("team wait for child, child: %" B_PRId32 ", "
339 			"flags: %#" B_PRIx32, fChild, fFlags);
340 	}
341 
342 private:
343 	pid_t	fChild;
344 	uint32	fFlags;
345 };
346 
347 
348 class WaitForChildDone : public AbstractTraceEntry {
349 public:
350 	WaitForChildDone(const job_control_entry& entry)
351 		:
352 		fState(entry.state),
353 		fTeam(entry.thread),
354 		fStatus(entry.status),
355 		fReason(entry.reason),
356 		fSignal(entry.signal)
357 	{
358 		Initialized();
359 	}
360 
361 	WaitForChildDone(status_t error)
362 		:
363 		fTeam(error)
364 	{
365 		Initialized();
366 	}
367 
368 	virtual void AddDump(TraceOutput& out)
369 	{
370 		if (fTeam >= 0) {
371 			out.Print("team wait for child done, team: %" B_PRId32 ", "
372 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
373 				fTeam, job_control_state_name(fState), fStatus, fReason,
374 				fSignal);
375 		} else {
376 			out.Print("team wait for child failed, error: "
377 				"%#" B_PRIx32 ", ", fTeam);
378 		}
379 	}
380 
381 private:
382 	job_control_state	fState;
383 	team_id				fTeam;
384 	status_t			fStatus;
385 	uint16				fReason;
386 	uint16				fSignal;
387 };
388 
389 }	// namespace TeamTracing
390 
391 #	define T(x) new(std::nothrow) TeamTracing::x;
392 #else
393 #	define T(x) ;
394 #endif
395 
396 
397 //	#pragma mark - TeamNotificationService
398 
399 
400 TeamNotificationService::TeamNotificationService()
401 	: DefaultNotificationService("teams")
402 {
403 }
404 
405 
406 void
407 TeamNotificationService::Notify(uint32 eventCode, Team* team)
408 {
409 	char eventBuffer[128];
410 	KMessage event;
411 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
412 	event.AddInt32("event", eventCode);
413 	event.AddInt32("team", team->id);
414 	event.AddPointer("teamStruct", team);
415 
416 	DefaultNotificationService::Notify(event, eventCode);
417 }
418 
419 
420 //	#pragma mark - Team
421 
422 
423 Team::Team(team_id id, bool kernel)
424 {
425 	// allocate an ID
426 	this->id = id;
427 	visible = true;
428 	serial_number = -1;
429 
430 	// init mutex
431 	if (kernel) {
432 		mutex_init(&fLock, "Team:kernel");
433 	} else {
434 		char lockName[16];
435 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
436 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
437 	}
438 
439 	hash_next = siblings_next = children = parent = NULL;
440 	fName[0] = '\0';
441 	fArgs[0] = '\0';
442 	num_threads = 0;
443 	io_context = NULL;
444 	address_space = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	thread_list = NULL;
448 	main_thread = NULL;
449 	loading_info = NULL;
450 	state = TEAM_STATE_BIRTH;
451 	flags = 0;
452 	death_entry = NULL;
453 	user_data_area = -1;
454 	user_data = 0;
455 	used_user_data = 0;
456 	user_data_size = 0;
457 	free_user_threads = NULL;
458 
459 	commpage_address = NULL;
460 
461 	supplementary_groups = NULL;
462 	supplementary_group_count = 0;
463 
464 	dead_threads_kernel_time = 0;
465 	dead_threads_user_time = 0;
466 	cpu_clock_offset = 0;
467 
468 	// dead threads
469 	list_init(&dead_threads);
470 	dead_threads_count = 0;
471 
472 	// dead children
473 	dead_children.count = 0;
474 	dead_children.kernel_time = 0;
475 	dead_children.user_time = 0;
476 
477 	// job control entry
478 	job_control_entry = new(nothrow) ::job_control_entry;
479 	if (job_control_entry != NULL) {
480 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
481 		job_control_entry->thread = id;
482 		job_control_entry->team = this;
483 	}
484 
485 	// exit status -- setting initialized to false suffices
486 	exit.initialized = false;
487 
488 	list_init(&sem_list);
489 	list_init_etc(&port_list, port_team_link_offset());
490 	list_init(&image_list);
491 	list_init(&watcher_list);
492 
493 	clear_team_debug_info(&debug_info, true);
494 
495 	// init dead/stopped/continued children condition vars
496 	dead_children.condition_variable.Init(&dead_children, "team children");
497 
498 	B_INITIALIZE_SPINLOCK(&time_lock);
499 	B_INITIALIZE_SPINLOCK(&signal_lock);
500 
501 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
502 		kernel ? -1 : MAX_QUEUED_SIGNALS);
503 	memset(fSignalActions, 0, sizeof(fSignalActions));
504 
505 	fUserDefinedTimerCount = 0;
506 }
507 
508 
509 Team::~Team()
510 {
511 	// get rid of all associated data
512 	PrepareForDeletion();
513 
514 	if (io_context != NULL)
515 		vfs_put_io_context(io_context);
516 	delete_owned_ports(this);
517 	sem_delete_owned_sems(this);
518 
519 	DeleteUserTimers(false);
520 
521 	fPendingSignals.Clear();
522 
523 	if (fQueuedSignalsCounter != NULL)
524 		fQueuedSignalsCounter->ReleaseReference();
525 
526 	while (thread_death_entry* threadDeathEntry
527 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
528 		free(threadDeathEntry);
529 	}
530 
531 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
532 		delete entry;
533 
534 	while (free_user_thread* entry = free_user_threads) {
535 		free_user_threads = entry->next;
536 		free(entry);
537 	}
538 
539 	malloc_referenced_release(supplementary_groups);
540 
541 	delete job_control_entry;
542 		// usually already NULL and transferred to the parent
543 
544 	mutex_destroy(&fLock);
545 }
546 
547 
548 /*static*/ Team*
549 Team::Create(team_id id, const char* name, bool kernel)
550 {
551 	// create the team object
552 	Team* team = new(std::nothrow) Team(id, kernel);
553 	if (team == NULL)
554 		return NULL;
555 	ObjectDeleter<Team> teamDeleter(team);
556 
557 	if (name != NULL)
558 		team->SetName(name);
559 
560 	// check initialization
561 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
562 		return NULL;
563 
564 	// finish initialization (arch specifics)
565 	if (arch_team_init_team_struct(team, kernel) != B_OK)
566 		return NULL;
567 
568 	if (!kernel) {
569 		status_t error = user_timer_create_team_timers(team);
570 		if (error != B_OK)
571 			return NULL;
572 	}
573 
574 	// everything went fine
575 	return teamDeleter.Detach();
576 }
577 
578 
579 /*!	\brief Returns the team with the given ID.
580 	Returns a reference to the team.
581 	Team and thread spinlock must not be held.
582 */
583 /*static*/ Team*
584 Team::Get(team_id id)
585 {
586 	if (id == B_CURRENT_TEAM) {
587 		Team* team = thread_get_current_thread()->team;
588 		team->AcquireReference();
589 		return team;
590 	}
591 
592 	InterruptsSpinLocker locker(sTeamHashLock);
593 	Team* team = sTeamHash.Lookup(id);
594 	if (team != NULL)
595 		team->AcquireReference();
596 	return team;
597 }
598 
599 
600 /*!	\brief Returns the team with the given ID in a locked state.
601 	Returns a reference to the team.
602 	Team and thread spinlock must not be held.
603 */
604 /*static*/ Team*
605 Team::GetAndLock(team_id id)
606 {
607 	// get the team
608 	Team* team = Get(id);
609 	if (team == NULL)
610 		return NULL;
611 
612 	// lock it
613 	team->Lock();
614 
615 	// only return the team, when it isn't already dying
616 	if (team->state >= TEAM_STATE_SHUTDOWN) {
617 		team->Unlock();
618 		team->ReleaseReference();
619 		return NULL;
620 	}
621 
622 	return team;
623 }
624 
625 
626 /*!	Locks the team and its parent team (if any).
627 	The caller must hold a reference to the team or otherwise make sure that
628 	it won't be deleted.
629 	If the team doesn't have a parent, only the team itself is locked. If the
630 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
631 	only the team itself is locked.
632 
633 	\param dontLockParentIfKernel If \c true, the team's parent team is only
634 		locked, if it is not the kernel team.
635 */
636 void
637 Team::LockTeamAndParent(bool dontLockParentIfKernel)
638 {
639 	// The locking order is parent -> child. Since the parent can change as long
640 	// as we don't lock the team, we need to do a trial and error loop.
641 	Lock();
642 
643 	while (true) {
644 		// If the team doesn't have a parent, we're done. Otherwise try to lock
645 		// the parent.This will succeed in most cases, simplifying things.
646 		Team* parent = this->parent;
647 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
648 			|| parent->TryLock()) {
649 			return;
650 		}
651 
652 		// get a temporary reference to the parent, unlock this team, lock the
653 		// parent, and re-lock this team
654 		BReference<Team> parentReference(parent);
655 
656 		Unlock();
657 		parent->Lock();
658 		Lock();
659 
660 		// If the parent hasn't changed in the meantime, we're done.
661 		if (this->parent == parent)
662 			return;
663 
664 		// The parent has changed -- unlock and retry.
665 		parent->Unlock();
666 	}
667 }
668 
669 
670 /*!	Unlocks the team and its parent team (if any).
671 */
672 void
673 Team::UnlockTeamAndParent()
674 {
675 	if (parent != NULL)
676 		parent->Unlock();
677 
678 	Unlock();
679 }
680 
681 
682 /*!	Locks the team, its parent team (if any), and the team's process group.
683 	The caller must hold a reference to the team or otherwise make sure that
684 	it won't be deleted.
685 	If the team doesn't have a parent, only the team itself is locked.
686 */
687 void
688 Team::LockTeamParentAndProcessGroup()
689 {
690 	LockTeamAndProcessGroup();
691 
692 	// We hold the group's and the team's lock, but not the parent team's lock.
693 	// If we have a parent, try to lock it.
694 	if (this->parent == NULL || this->parent->TryLock())
695 		return;
696 
697 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
698 	// the job.
699 	Unlock();
700 	LockTeamAndParent(false);
701 }
702 
703 
704 /*!	Unlocks the team, its parent team (if any), and the team's process group.
705 */
706 void
707 Team::UnlockTeamParentAndProcessGroup()
708 {
709 	group->Unlock();
710 
711 	if (parent != NULL)
712 		parent->Unlock();
713 
714 	Unlock();
715 }
716 
717 
718 void
719 Team::LockTeamAndProcessGroup()
720 {
721 	// The locking order is process group -> child. Since the process group can
722 	// change as long as we don't lock the team, we need to do a trial and error
723 	// loop.
724 	Lock();
725 
726 	while (true) {
727 		// Try to lock the group. This will succeed in most cases, simplifying
728 		// things.
729 		ProcessGroup* group = this->group;
730 		if (group->TryLock())
731 			return;
732 
733 		// get a temporary reference to the group, unlock this team, lock the
734 		// group, and re-lock this team
735 		BReference<ProcessGroup> groupReference(group);
736 
737 		Unlock();
738 		group->Lock();
739 		Lock();
740 
741 		// If the group hasn't changed in the meantime, we're done.
742 		if (this->group == group)
743 			return;
744 
745 		// The group has changed -- unlock and retry.
746 		group->Unlock();
747 	}
748 }
749 
750 
751 void
752 Team::UnlockTeamAndProcessGroup()
753 {
754 	group->Unlock();
755 	Unlock();
756 }
757 
758 
759 void
760 Team::SetName(const char* name)
761 {
762 	if (const char* lastSlash = strrchr(name, '/'))
763 		name = lastSlash + 1;
764 
765 	strlcpy(fName, name, B_OS_NAME_LENGTH);
766 }
767 
768 
769 void
770 Team::SetArgs(const char* args)
771 {
772 	strlcpy(fArgs, args, sizeof(fArgs));
773 }
774 
775 
776 void
777 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
778 {
779 	fArgs[0] = '\0';
780 	strlcpy(fArgs, path, sizeof(fArgs));
781 	for (int i = 0; i < otherArgCount; i++) {
782 		strlcat(fArgs, " ", sizeof(fArgs));
783 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
784 	}
785 }
786 
787 
788 void
789 Team::ResetSignalsOnExec()
790 {
791 	// We are supposed to keep pending signals. Signal actions shall be reset
792 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
793 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
794 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
795 	// flags, but since there aren't any handlers, they make little sense, so
796 	// we clear them.
797 
798 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
799 		struct sigaction& action = SignalActionFor(i);
800 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
801 			action.sa_handler = SIG_DFL;
802 
803 		action.sa_mask = 0;
804 		action.sa_flags = 0;
805 		action.sa_userdata = NULL;
806 	}
807 }
808 
809 
810 void
811 Team::InheritSignalActions(Team* parent)
812 {
813 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
814 }
815 
816 
817 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
818 	ID.
819 
820 	The caller must hold the team's lock.
821 
822 	\param timer The timer to be added. If it doesn't have an ID yet, it is
823 		considered user-defined and will be assigned an ID.
824 	\return \c B_OK, if the timer was added successfully, another error code
825 		otherwise.
826 */
827 status_t
828 Team::AddUserTimer(UserTimer* timer)
829 {
830 	// don't allow addition of timers when already shutting the team down
831 	if (state >= TEAM_STATE_SHUTDOWN)
832 		return B_BAD_TEAM_ID;
833 
834 	// If the timer is user-defined, check timer limit and increment
835 	// user-defined count.
836 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
837 		return EAGAIN;
838 
839 	fUserTimers.AddTimer(timer);
840 
841 	return B_OK;
842 }
843 
844 
845 /*!	Removes the given user timer from the team.
846 
847 	The caller must hold the team's lock.
848 
849 	\param timer The timer to be removed.
850 
851 */
852 void
853 Team::RemoveUserTimer(UserTimer* timer)
854 {
855 	fUserTimers.RemoveTimer(timer);
856 
857 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
858 		UserDefinedTimersRemoved(1);
859 }
860 
861 
862 /*!	Deletes all (or all user-defined) user timers of the team.
863 
864 	Timer's belonging to the team's threads are not affected.
865 	The caller must hold the team's lock.
866 
867 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
868 		otherwise all timers are deleted.
869 */
870 void
871 Team::DeleteUserTimers(bool userDefinedOnly)
872 {
873 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
874 	UserDefinedTimersRemoved(count);
875 }
876 
877 
878 /*!	If not at the limit yet, increments the team's user-defined timer count.
879 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
880 */
881 bool
882 Team::CheckAddUserDefinedTimer()
883 {
884 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
885 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
886 		atomic_add(&fUserDefinedTimerCount, -1);
887 		return false;
888 	}
889 
890 	return true;
891 }
892 
893 
894 /*!	Subtracts the given count for the team's user-defined timer count.
895 	\param count The count to subtract.
896 */
897 void
898 Team::UserDefinedTimersRemoved(int32 count)
899 {
900 	atomic_add(&fUserDefinedTimerCount, -count);
901 }
902 
903 
904 void
905 Team::DeactivateCPUTimeUserTimers()
906 {
907 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
908 		timer->Deactivate();
909 
910 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
911 		timer->Deactivate();
912 }
913 
914 
915 /*!	Returns the team's current total CPU time (kernel + user + offset).
916 
917 	The caller must hold \c time_lock.
918 
919 	\param ignoreCurrentRun If \c true and the current thread is one team's
920 		threads, don't add the time since the last time \c last_time was
921 		updated. Should be used in "thread unscheduled" scheduler callbacks,
922 		since although the thread is still running at that time, its time has
923 		already been stopped.
924 	\return The team's current total CPU time.
925 */
926 bigtime_t
927 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
928 {
929 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
930 		+ dead_threads_user_time;
931 
932 	Thread* currentThread = thread_get_current_thread();
933 	bigtime_t now = system_time();
934 
935 	for (Thread* thread = thread_list; thread != NULL;
936 			thread = thread->team_next) {
937 		bool alreadyLocked = thread == lockedThread;
938 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
939 		time += thread->kernel_time + thread->user_time;
940 
941 		if (thread->last_time != 0) {
942 			if (!ignoreCurrentRun || thread != currentThread)
943 				time += now - thread->last_time;
944 		}
945 
946 		if (alreadyLocked)
947 			threadTimeLocker.Detach();
948 	}
949 
950 	return time;
951 }
952 
953 
954 /*!	Returns the team's current user CPU time.
955 
956 	The caller must hold \c time_lock.
957 
958 	\return The team's current user CPU time.
959 */
960 bigtime_t
961 Team::UserCPUTime() const
962 {
963 	bigtime_t time = dead_threads_user_time;
964 
965 	bigtime_t now = system_time();
966 
967 	for (Thread* thread = thread_list; thread != NULL;
968 			thread = thread->team_next) {
969 		SpinLocker threadTimeLocker(thread->time_lock);
970 		time += thread->user_time;
971 
972 		if (thread->last_time != 0 && !thread->in_kernel)
973 			time += now - thread->last_time;
974 	}
975 
976 	return time;
977 }
978 
979 
980 //	#pragma mark - ProcessGroup
981 
982 
983 ProcessGroup::ProcessGroup(pid_t id)
984 	:
985 	id(id),
986 	teams(NULL),
987 	fSession(NULL),
988 	fInOrphanedCheckList(false)
989 {
990 	char lockName[32];
991 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
992 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
993 }
994 
995 
996 ProcessGroup::~ProcessGroup()
997 {
998 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
999 
1000 	// If the group is in the orphaned check list, remove it.
1001 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1002 
1003 	if (fInOrphanedCheckList)
1004 		sOrphanedCheckProcessGroups.Remove(this);
1005 
1006 	orphanedCheckLocker.Unlock();
1007 
1008 	// remove group from the hash table and from the session
1009 	if (fSession != NULL) {
1010 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1011 		sGroupHash.RemoveUnchecked(this);
1012 		groupHashLocker.Unlock();
1013 
1014 		fSession->ReleaseReference();
1015 	}
1016 
1017 	mutex_destroy(&fLock);
1018 }
1019 
1020 
1021 /*static*/ ProcessGroup*
1022 ProcessGroup::Get(pid_t id)
1023 {
1024 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1025 	ProcessGroup* group = sGroupHash.Lookup(id);
1026 	if (group != NULL)
1027 		group->AcquireReference();
1028 	return group;
1029 }
1030 
1031 
1032 /*!	Adds the group the given session and makes it publicly accessible.
1033 	The caller must not hold the process group hash lock.
1034 */
1035 void
1036 ProcessGroup::Publish(ProcessSession* session)
1037 {
1038 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1039 	PublishLocked(session);
1040 }
1041 
1042 
1043 /*!	Adds the group to the given session and makes it publicly accessible.
1044 	The caller must hold the process group hash lock.
1045 */
1046 void
1047 ProcessGroup::PublishLocked(ProcessSession* session)
1048 {
1049 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1050 
1051 	fSession = session;
1052 	fSession->AcquireReference();
1053 
1054 	sGroupHash.InsertUnchecked(this);
1055 }
1056 
1057 
1058 /*!	Checks whether the process group is orphaned.
1059 	The caller must hold the group's lock.
1060 	\return \c true, if the group is orphaned, \c false otherwise.
1061 */
1062 bool
1063 ProcessGroup::IsOrphaned() const
1064 {
1065 	// Orphaned Process Group: "A process group in which the parent of every
1066 	// member is either itself a member of the group or is not a member of the
1067 	// group's session." (Open Group Base Specs Issue 7)
1068 	bool orphaned = true;
1069 
1070 	Team* team = teams;
1071 	while (orphaned && team != NULL) {
1072 		team->LockTeamAndParent(false);
1073 
1074 		Team* parent = team->parent;
1075 		if (parent != NULL && parent->group_id != id
1076 			&& parent->session_id == fSession->id) {
1077 			orphaned = false;
1078 		}
1079 
1080 		team->UnlockTeamAndParent();
1081 
1082 		team = team->group_next;
1083 	}
1084 
1085 	return orphaned;
1086 }
1087 
1088 
1089 void
1090 ProcessGroup::ScheduleOrphanedCheck()
1091 {
1092 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1093 
1094 	if (!fInOrphanedCheckList) {
1095 		sOrphanedCheckProcessGroups.Add(this);
1096 		fInOrphanedCheckList = true;
1097 	}
1098 }
1099 
1100 
1101 void
1102 ProcessGroup::UnsetOrphanedCheck()
1103 {
1104 	fInOrphanedCheckList = false;
1105 }
1106 
1107 
1108 //	#pragma mark - ProcessSession
1109 
1110 
1111 ProcessSession::ProcessSession(pid_t id)
1112 	:
1113 	id(id),
1114 	controlling_tty(-1),
1115 	foreground_group(-1)
1116 {
1117 	char lockName[32];
1118 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1119 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1120 }
1121 
1122 
1123 ProcessSession::~ProcessSession()
1124 {
1125 	mutex_destroy(&fLock);
1126 }
1127 
1128 
1129 //	#pragma mark - KDL functions
1130 
1131 
1132 static void
1133 _dump_team_info(Team* team)
1134 {
1135 	kprintf("TEAM: %p\n", team);
1136 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1137 		team->id);
1138 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1139 	kprintf("name:             '%s'\n", team->Name());
1140 	kprintf("args:             '%s'\n", team->Args());
1141 	kprintf("hash_next:        %p\n", team->hash_next);
1142 	kprintf("parent:           %p", team->parent);
1143 	if (team->parent != NULL) {
1144 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1145 	} else
1146 		kprintf("\n");
1147 
1148 	kprintf("children:         %p\n", team->children);
1149 	kprintf("num_threads:      %d\n", team->num_threads);
1150 	kprintf("state:            %d\n", team->state);
1151 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1152 	kprintf("io_context:       %p\n", team->io_context);
1153 	if (team->address_space)
1154 		kprintf("address_space:    %p\n", team->address_space);
1155 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1156 		(void*)team->user_data, team->user_data_area);
1157 	kprintf("free user thread: %p\n", team->free_user_threads);
1158 	kprintf("main_thread:      %p\n", team->main_thread);
1159 	kprintf("thread_list:      %p\n", team->thread_list);
1160 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1161 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1162 }
1163 
1164 
1165 static int
1166 dump_team_info(int argc, char** argv)
1167 {
1168 	ulong arg;
1169 	bool found = false;
1170 
1171 	if (argc < 2) {
1172 		Thread* thread = thread_get_current_thread();
1173 		if (thread != NULL && thread->team != NULL)
1174 			_dump_team_info(thread->team);
1175 		else
1176 			kprintf("No current team!\n");
1177 		return 0;
1178 	}
1179 
1180 	arg = strtoul(argv[1], NULL, 0);
1181 	if (IS_KERNEL_ADDRESS(arg)) {
1182 		// semi-hack
1183 		_dump_team_info((Team*)arg);
1184 		return 0;
1185 	}
1186 
1187 	// walk through the thread list, trying to match name or id
1188 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1189 		Team* team = it.Next();) {
1190 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1191 			|| team->id == (team_id)arg) {
1192 			_dump_team_info(team);
1193 			found = true;
1194 			break;
1195 		}
1196 	}
1197 
1198 	if (!found)
1199 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1200 	return 0;
1201 }
1202 
1203 
1204 static int
1205 dump_teams(int argc, char** argv)
1206 {
1207 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1208 		B_PRINTF_POINTER_WIDTH, "parent");
1209 
1210 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1211 		Team* team = it.Next();) {
1212 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 
1219 //	#pragma mark - Private functions
1220 
1221 
1222 /*!	Inserts team \a team into the child list of team \a parent.
1223 
1224 	The caller must hold the lock of both \a parent and \a team.
1225 
1226 	\param parent The parent team.
1227 	\param team The team to be inserted into \a parent's child list.
1228 */
1229 static void
1230 insert_team_into_parent(Team* parent, Team* team)
1231 {
1232 	ASSERT(parent != NULL);
1233 
1234 	team->siblings_next = parent->children;
1235 	parent->children = team;
1236 	team->parent = parent;
1237 }
1238 
1239 
1240 /*!	Removes team \a team from the child list of team \a parent.
1241 
1242 	The caller must hold the lock of both \a parent and \a team.
1243 
1244 	\param parent The parent team.
1245 	\param team The team to be removed from \a parent's child list.
1246 */
1247 static void
1248 remove_team_from_parent(Team* parent, Team* team)
1249 {
1250 	Team* child;
1251 	Team* last = NULL;
1252 
1253 	for (child = parent->children; child != NULL;
1254 			child = child->siblings_next) {
1255 		if (child == team) {
1256 			if (last == NULL)
1257 				parent->children = child->siblings_next;
1258 			else
1259 				last->siblings_next = child->siblings_next;
1260 
1261 			team->parent = NULL;
1262 			break;
1263 		}
1264 		last = child;
1265 	}
1266 }
1267 
1268 
1269 /*!	Returns whether the given team is a session leader.
1270 	The caller must hold the team's lock or its process group's lock.
1271 */
1272 static bool
1273 is_session_leader(Team* team)
1274 {
1275 	return team->session_id == team->id;
1276 }
1277 
1278 
1279 /*!	Returns whether the given team is a process group leader.
1280 	The caller must hold the team's lock or its process group's lock.
1281 */
1282 static bool
1283 is_process_group_leader(Team* team)
1284 {
1285 	return team->group_id == team->id;
1286 }
1287 
1288 
1289 /*!	Inserts the given team into the given process group.
1290 	The caller must hold the process group's lock, the team's lock, and the
1291 	team's parent's lock.
1292 */
1293 static void
1294 insert_team_into_group(ProcessGroup* group, Team* team)
1295 {
1296 	team->group = group;
1297 	team->group_id = group->id;
1298 	team->session_id = group->Session()->id;
1299 
1300 	team->group_next = group->teams;
1301 	group->teams = team;
1302 	group->AcquireReference();
1303 }
1304 
1305 
1306 /*!	Removes the given team from its process group.
1307 
1308 	The caller must hold the process group's lock, the team's lock, and the
1309 	team's parent's lock. Interrupts must be enabled.
1310 
1311 	\param team The team that'll be removed from its process group.
1312 */
1313 static void
1314 remove_team_from_group(Team* team)
1315 {
1316 	ProcessGroup* group = team->group;
1317 	Team* current;
1318 	Team* last = NULL;
1319 
1320 	// the team must be in a process group to let this function have any effect
1321 	if  (group == NULL)
1322 		return;
1323 
1324 	for (current = group->teams; current != NULL;
1325 			current = current->group_next) {
1326 		if (current == team) {
1327 			if (last == NULL)
1328 				group->teams = current->group_next;
1329 			else
1330 				last->group_next = current->group_next;
1331 
1332 			team->group = NULL;
1333 			break;
1334 		}
1335 		last = current;
1336 	}
1337 
1338 	team->group = NULL;
1339 	team->group_next = NULL;
1340 
1341 	group->ReleaseReference();
1342 }
1343 
1344 
1345 static status_t
1346 create_team_user_data(Team* team, void* exactAddress = NULL)
1347 {
1348 	void* address;
1349 	uint32 addressSpec;
1350 
1351 	if (exactAddress != NULL) {
1352 		address = exactAddress;
1353 		addressSpec = B_EXACT_ADDRESS;
1354 	} else {
1355 		address = (void*)KERNEL_USER_DATA_BASE;
1356 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1357 	}
1358 
1359 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1360 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1361 
1362 	virtual_address_restrictions virtualRestrictions = {};
1363 	if (result == B_OK || exactAddress != NULL) {
1364 		if (exactAddress != NULL)
1365 			virtualRestrictions.address = exactAddress;
1366 		else
1367 			virtualRestrictions.address = address;
1368 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1369 	} else {
1370 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1371 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1372 	}
1373 
1374 	physical_address_restrictions physicalRestrictions = {};
1375 	team->user_data_area = create_area_etc(team->id, "user area",
1376 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1377 		&virtualRestrictions, &physicalRestrictions, &address);
1378 	if (team->user_data_area < 0)
1379 		return team->user_data_area;
1380 
1381 	team->user_data = (addr_t)address;
1382 	team->used_user_data = 0;
1383 	team->user_data_size = kTeamUserDataInitialSize;
1384 	team->free_user_threads = NULL;
1385 
1386 	return B_OK;
1387 }
1388 
1389 
1390 static void
1391 delete_team_user_data(Team* team)
1392 {
1393 	if (team->user_data_area >= 0) {
1394 		vm_delete_area(team->id, team->user_data_area, true);
1395 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1396 			kTeamUserDataReservedSize);
1397 
1398 		team->user_data = 0;
1399 		team->used_user_data = 0;
1400 		team->user_data_size = 0;
1401 		team->user_data_area = -1;
1402 		while (free_user_thread* entry = team->free_user_threads) {
1403 			team->free_user_threads = entry->next;
1404 			free(entry);
1405 		}
1406 	}
1407 }
1408 
1409 
1410 static status_t
1411 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1412 	int32 argCount, int32 envCount, char**& _flatArgs)
1413 {
1414 	if (argCount < 0 || envCount < 0)
1415 		return B_BAD_VALUE;
1416 
1417 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1418 		return B_TOO_MANY_ARGS;
1419 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1420 		return B_BAD_VALUE;
1421 
1422 	if (!IS_USER_ADDRESS(userFlatArgs))
1423 		return B_BAD_ADDRESS;
1424 
1425 	// allocate kernel memory
1426 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1427 	if (flatArgs == NULL)
1428 		return B_NO_MEMORY;
1429 
1430 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1431 		free(flatArgs);
1432 		return B_BAD_ADDRESS;
1433 	}
1434 
1435 	// check and relocate the array
1436 	status_t error = B_OK;
1437 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1438 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1439 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1440 		if (i == argCount || i == argCount + envCount + 1) {
1441 			// check array null termination
1442 			if (flatArgs[i] != NULL) {
1443 				error = B_BAD_VALUE;
1444 				break;
1445 			}
1446 		} else {
1447 			// check string
1448 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1449 			size_t maxLen = stringEnd - arg;
1450 			if (arg < stringBase || arg >= stringEnd
1451 					|| strnlen(arg, maxLen) == maxLen) {
1452 				error = B_BAD_VALUE;
1453 				break;
1454 			}
1455 
1456 			flatArgs[i] = arg;
1457 		}
1458 	}
1459 
1460 	if (error == B_OK)
1461 		_flatArgs = flatArgs;
1462 	else
1463 		free(flatArgs);
1464 
1465 	return error;
1466 }
1467 
1468 
1469 static void
1470 free_team_arg(struct team_arg* teamArg)
1471 {
1472 	if (teamArg != NULL) {
1473 		free(teamArg->flat_args);
1474 		free(teamArg->path);
1475 		free(teamArg);
1476 	}
1477 }
1478 
1479 
1480 static status_t
1481 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1482 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1483 	port_id port, uint32 token)
1484 {
1485 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1486 	if (teamArg == NULL)
1487 		return B_NO_MEMORY;
1488 
1489 	teamArg->path = strdup(path);
1490 	if (teamArg->path == NULL) {
1491 		free(teamArg);
1492 		return B_NO_MEMORY;
1493 	}
1494 
1495 	// copy the args over
1496 	teamArg->flat_args = flatArgs;
1497 	teamArg->flat_args_size = flatArgsSize;
1498 	teamArg->arg_count = argCount;
1499 	teamArg->env_count = envCount;
1500 	teamArg->flags = 0;
1501 	teamArg->umask = umask;
1502 	teamArg->error_port = port;
1503 	teamArg->error_token = token;
1504 
1505 	// determine the flags from the environment
1506 	const char* const* env = flatArgs + argCount + 1;
1507 	for (int32 i = 0; i < envCount; i++) {
1508 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1509 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1510 			break;
1511 		}
1512 	}
1513 
1514 	*_teamArg = teamArg;
1515 	return B_OK;
1516 }
1517 
1518 
1519 static status_t
1520 team_create_thread_start_internal(void* args)
1521 {
1522 	status_t err;
1523 	Thread* thread;
1524 	Team* team;
1525 	struct team_arg* teamArgs = (struct team_arg*)args;
1526 	const char* path;
1527 	addr_t entry;
1528 	char** userArgs;
1529 	char** userEnv;
1530 	struct user_space_program_args* programArgs;
1531 	uint32 argCount, envCount;
1532 
1533 	thread = thread_get_current_thread();
1534 	team = thread->team;
1535 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1536 
1537 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1538 		thread->id));
1539 
1540 	// Main stack area layout is currently as follows (starting from 0):
1541 	//
1542 	// size								| usage
1543 	// ---------------------------------+--------------------------------
1544 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1545 	// TLS_SIZE							| TLS data
1546 	// sizeof(user_space_program_args)	| argument structure for the runtime
1547 	//									| loader
1548 	// flat arguments size				| flat process arguments and environment
1549 
1550 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1551 	// the heap
1552 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1553 
1554 	argCount = teamArgs->arg_count;
1555 	envCount = teamArgs->env_count;
1556 
1557 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1558 		+ thread->user_stack_size + TLS_SIZE);
1559 
1560 	userArgs = (char**)(programArgs + 1);
1561 	userEnv = userArgs + argCount + 1;
1562 	path = teamArgs->path;
1563 
1564 	if (user_strlcpy(programArgs->program_path, path,
1565 				sizeof(programArgs->program_path)) < B_OK
1566 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1567 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1568 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1569 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1570 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1571 				sizeof(port_id)) < B_OK
1572 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1573 				sizeof(uint32)) < B_OK
1574 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1575 		|| user_memcpy(userArgs, teamArgs->flat_args,
1576 				teamArgs->flat_args_size) < B_OK) {
1577 		// the team deletion process will clean this mess
1578 		free_team_arg(teamArgs);
1579 		return B_BAD_ADDRESS;
1580 	}
1581 
1582 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1583 
1584 	// set team args and update state
1585 	team->Lock();
1586 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1587 	team->state = TEAM_STATE_NORMAL;
1588 	team->Unlock();
1589 
1590 	free_team_arg(teamArgs);
1591 		// the arguments are already on the user stack, we no longer need
1592 		// them in this form
1593 
1594 	// Clone commpage area
1595 	area_id commPageArea = clone_commpage_area(team->id,
1596 		&team->commpage_address);
1597 	if (commPageArea  < B_OK) {
1598 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1599 			strerror(commPageArea)));
1600 		return commPageArea;
1601 	}
1602 
1603 	// Register commpage image
1604 	image_id commPageImage = get_commpage_image();
1605 	image_info imageInfo;
1606 	err = get_image_info(commPageImage, &imageInfo);
1607 	if (err != B_OK) {
1608 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1609 			strerror(err)));
1610 		return err;
1611 	}
1612 	imageInfo.text = team->commpage_address;
1613 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1614 	if (image < 0) {
1615 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1616 			strerror(image)));
1617 		return image;
1618 	}
1619 
1620 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1621 	// automatic variables with function scope will never be destroyed.
1622 	{
1623 		// find runtime_loader path
1624 		KPath runtimeLoaderPath;
1625 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1626 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1627 		if (err < B_OK) {
1628 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1629 				strerror(err)));
1630 			return err;
1631 		}
1632 		runtimeLoaderPath.UnlockBuffer();
1633 		err = runtimeLoaderPath.Append("runtime_loader");
1634 
1635 		if (err == B_OK) {
1636 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1637 				&entry);
1638 		}
1639 	}
1640 
1641 	if (err < B_OK) {
1642 		// Luckily, we don't have to clean up the mess we created - that's
1643 		// done for us by the normal team deletion process
1644 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1645 			"%s\n", strerror(err)));
1646 		return err;
1647 	}
1648 
1649 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1650 
1651 	// enter userspace -- returns only in case of error
1652 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1653 		programArgs, team->commpage_address);
1654 }
1655 
1656 
1657 static status_t
1658 team_create_thread_start(void* args)
1659 {
1660 	team_create_thread_start_internal(args);
1661 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1662 	thread_exit();
1663 		// does not return
1664 	return B_OK;
1665 }
1666 
1667 
1668 static thread_id
1669 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1670 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1671 	port_id errorPort, uint32 errorToken)
1672 {
1673 	char** flatArgs = _flatArgs;
1674 	thread_id thread;
1675 	status_t status;
1676 	struct team_arg* teamArgs;
1677 	struct team_loading_info loadingInfo;
1678 	io_context* parentIOContext = NULL;
1679 	team_id teamID;
1680 
1681 	if (flatArgs == NULL || argCount == 0)
1682 		return B_BAD_VALUE;
1683 
1684 	const char* path = flatArgs[0];
1685 
1686 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1687 		"\n", path, flatArgs, argCount));
1688 
1689 	// cut the path from the main thread name
1690 	const char* threadName = strrchr(path, '/');
1691 	if (threadName != NULL)
1692 		threadName++;
1693 	else
1694 		threadName = path;
1695 
1696 	// create the main thread object
1697 	Thread* mainThread;
1698 	status = Thread::Create(threadName, mainThread);
1699 	if (status != B_OK)
1700 		return status;
1701 	BReference<Thread> mainThreadReference(mainThread, true);
1702 
1703 	// create team object
1704 	Team* team = Team::Create(mainThread->id, path, false);
1705 	if (team == NULL)
1706 		return B_NO_MEMORY;
1707 	BReference<Team> teamReference(team, true);
1708 
1709 	if (flags & B_WAIT_TILL_LOADED) {
1710 		loadingInfo.thread = thread_get_current_thread();
1711 		loadingInfo.result = B_ERROR;
1712 		loadingInfo.done = false;
1713 		team->loading_info = &loadingInfo;
1714 	}
1715 
1716 	// get the parent team
1717 	Team* parent = Team::Get(parentID);
1718 	if (parent == NULL)
1719 		return B_BAD_TEAM_ID;
1720 	BReference<Team> parentReference(parent, true);
1721 
1722 	parent->LockTeamAndProcessGroup();
1723 	team->Lock();
1724 
1725 	// inherit the parent's user/group
1726 	inherit_parent_user_and_group(team, parent);
1727 
1728  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1729 
1730 	sTeamHash.Insert(team);
1731 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1732 	if (!teamLimitReached)
1733 		sUsedTeams++;
1734 
1735 	teamsLocker.Unlock();
1736 
1737 	insert_team_into_parent(parent, team);
1738 	insert_team_into_group(parent->group, team);
1739 
1740 	// get a reference to the parent's I/O context -- we need it to create ours
1741 	parentIOContext = parent->io_context;
1742 	vfs_get_io_context(parentIOContext);
1743 
1744 	team->Unlock();
1745 	parent->UnlockTeamAndProcessGroup();
1746 
1747 	// notify team listeners
1748 	sNotificationService.Notify(TEAM_ADDED, team);
1749 
1750 	// check the executable's set-user/group-id permission
1751 	update_set_id_user_and_group(team, path);
1752 
1753 	if (teamLimitReached) {
1754 		status = B_NO_MORE_TEAMS;
1755 		goto err1;
1756 	}
1757 
1758 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1759 		envCount, (mode_t)-1, errorPort, errorToken);
1760 	if (status != B_OK)
1761 		goto err1;
1762 
1763 	_flatArgs = NULL;
1764 		// args are owned by the team_arg structure now
1765 
1766 	// create a new io_context for this team
1767 	team->io_context = vfs_new_io_context(parentIOContext, true);
1768 	if (!team->io_context) {
1769 		status = B_NO_MEMORY;
1770 		goto err2;
1771 	}
1772 
1773 	// We don't need the parent's I/O context any longer.
1774 	vfs_put_io_context(parentIOContext);
1775 	parentIOContext = NULL;
1776 
1777 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1778 	vfs_exec_io_context(team->io_context);
1779 
1780 	// create an address space for this team
1781 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1782 		&team->address_space);
1783 	if (status != B_OK)
1784 		goto err2;
1785 
1786 	team->address_space->SetRandomizingEnabled(
1787 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1788 
1789 	// create the user data area
1790 	status = create_team_user_data(team);
1791 	if (status != B_OK)
1792 		goto err4;
1793 
1794 	// In case we start the main thread, we shouldn't access the team object
1795 	// afterwards, so cache the team's ID.
1796 	teamID = team->id;
1797 
1798 	// Create a kernel thread, but under the context of the new team
1799 	// The new thread will take over ownership of teamArgs.
1800 	{
1801 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1802 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1803 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1804 			+ teamArgs->flat_args_size;
1805 		thread = thread_create_thread(threadAttributes, false);
1806 		if (thread < 0) {
1807 			status = thread;
1808 			goto err5;
1809 		}
1810 	}
1811 
1812 	// The team has been created successfully, so we keep the reference. Or
1813 	// more precisely: It's owned by the team's main thread, now.
1814 	teamReference.Detach();
1815 
1816 	// wait for the loader of the new team to finish its work
1817 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1818 		if (mainThread != NULL) {
1819 			// resume the team's main thread
1820 			thread_continue(mainThread);
1821 		}
1822 
1823 		// Now suspend ourselves until loading is finished. We will be woken
1824 		// either by the thread, when it finished or aborted loading, or when
1825 		// the team is going to die (e.g. is killed). In either case the one
1826 		// setting `loadingInfo.done' is responsible for removing the info from
1827 		// the team structure.
1828 		while (!loadingInfo.done)
1829 			thread_suspend();
1830 
1831 		if (loadingInfo.result < B_OK)
1832 			return loadingInfo.result;
1833 	}
1834 
1835 	// notify the debugger
1836 	user_debug_team_created(teamID);
1837 
1838 	return thread;
1839 
1840 err5:
1841 	delete_team_user_data(team);
1842 err4:
1843 	team->address_space->Put();
1844 err2:
1845 	free_team_arg(teamArgs);
1846 err1:
1847 	if (parentIOContext != NULL)
1848 		vfs_put_io_context(parentIOContext);
1849 
1850 	// Remove the team structure from the process group, the parent team, and
1851 	// the team hash table and delete the team structure.
1852 	parent->LockTeamAndProcessGroup();
1853 	team->Lock();
1854 
1855 	remove_team_from_group(team);
1856 	remove_team_from_parent(team->parent, team);
1857 
1858 	team->Unlock();
1859 	parent->UnlockTeamAndProcessGroup();
1860 
1861 	teamsLocker.Lock();
1862 	sTeamHash.Remove(team);
1863 	if (!teamLimitReached)
1864 		sUsedTeams--;
1865 	teamsLocker.Unlock();
1866 
1867 	sNotificationService.Notify(TEAM_REMOVED, team);
1868 
1869 	return status;
1870 }
1871 
1872 
1873 /*!	Almost shuts down the current team and loads a new image into it.
1874 	If successful, this function does not return and will takeover ownership of
1875 	the arguments provided.
1876 	This function may only be called in a userland team (caused by one of the
1877 	exec*() syscalls).
1878 */
1879 static status_t
1880 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1881 	int32 argCount, int32 envCount, mode_t umask)
1882 {
1883 	// NOTE: Since this function normally doesn't return, don't use automatic
1884 	// variables that need destruction in the function scope.
1885 	char** flatArgs = _flatArgs;
1886 	Team* team = thread_get_current_thread()->team;
1887 	struct team_arg* teamArgs;
1888 	const char* threadName;
1889 	thread_id nubThreadID = -1;
1890 
1891 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1892 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1893 		team->id));
1894 
1895 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1896 
1897 	// switching the kernel at run time is probably not a good idea :)
1898 	if (team == team_get_kernel_team())
1899 		return B_NOT_ALLOWED;
1900 
1901 	// we currently need to be single threaded here
1902 	// TODO: maybe we should just kill all other threads and
1903 	//	make the current thread the team's main thread?
1904 	Thread* currentThread = thread_get_current_thread();
1905 	if (currentThread != team->main_thread)
1906 		return B_NOT_ALLOWED;
1907 
1908 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1909 	// We iterate through the thread list to make sure that there's no other
1910 	// thread.
1911 	TeamLocker teamLocker(team);
1912 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1913 
1914 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1915 		nubThreadID = team->debug_info.nub_thread;
1916 
1917 	debugInfoLocker.Unlock();
1918 
1919 	for (Thread* thread = team->thread_list; thread != NULL;
1920 			thread = thread->team_next) {
1921 		if (thread != team->main_thread && thread->id != nubThreadID)
1922 			return B_NOT_ALLOWED;
1923 	}
1924 
1925 	team->DeleteUserTimers(true);
1926 	team->ResetSignalsOnExec();
1927 
1928 	teamLocker.Unlock();
1929 
1930 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1931 		argCount, envCount, umask, -1, 0);
1932 	if (status != B_OK)
1933 		return status;
1934 
1935 	_flatArgs = NULL;
1936 		// args are owned by the team_arg structure now
1937 
1938 	// TODO: remove team resources if there are any left
1939 	// thread_atkernel_exit() might not be called at all
1940 
1941 	thread_reset_for_exec();
1942 
1943 	user_debug_prepare_for_exec();
1944 
1945 	delete_team_user_data(team);
1946 	vm_delete_areas(team->address_space, false);
1947 	xsi_sem_undo(team);
1948 	delete_owned_ports(team);
1949 	sem_delete_owned_sems(team);
1950 	remove_images(team);
1951 	vfs_exec_io_context(team->io_context);
1952 	delete_realtime_sem_context(team->realtime_sem_context);
1953 	team->realtime_sem_context = NULL;
1954 
1955 	// update ASLR
1956 	team->address_space->SetRandomizingEnabled(
1957 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1958 
1959 	status = create_team_user_data(team);
1960 	if (status != B_OK) {
1961 		// creating the user data failed -- we're toast
1962 		free_team_arg(teamArgs);
1963 		exit_thread(status);
1964 		return status;
1965 	}
1966 
1967 	user_debug_finish_after_exec();
1968 
1969 	// rename the team
1970 
1971 	team->Lock();
1972 	team->SetName(path);
1973 	team->Unlock();
1974 
1975 	// cut the path from the team name and rename the main thread, too
1976 	threadName = strrchr(path, '/');
1977 	if (threadName != NULL)
1978 		threadName++;
1979 	else
1980 		threadName = path;
1981 	rename_thread(thread_get_current_thread_id(), threadName);
1982 
1983 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1984 
1985 	// Update user/group according to the executable's set-user/group-id
1986 	// permission.
1987 	update_set_id_user_and_group(team, path);
1988 
1989 	user_debug_team_exec();
1990 
1991 	// notify team listeners
1992 	sNotificationService.Notify(TEAM_EXEC, team);
1993 
1994 	// get a user thread for the thread
1995 	user_thread* userThread = team_allocate_user_thread(team);
1996 		// cannot fail (the allocation for the team would have failed already)
1997 	ThreadLocker currentThreadLocker(currentThread);
1998 	currentThread->user_thread = userThread;
1999 	currentThreadLocker.Unlock();
2000 
2001 	// create the user stack for the thread
2002 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2003 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2004 	if (status == B_OK) {
2005 		// prepare the stack, load the runtime loader, and enter userspace
2006 		team_create_thread_start(teamArgs);
2007 			// does never return
2008 	} else
2009 		free_team_arg(teamArgs);
2010 
2011 	// Sorry, we have to kill ourselves, there is no way out anymore
2012 	// (without any areas left and all that).
2013 	exit_thread(status);
2014 
2015 	// We return a status here since the signal that is sent by the
2016 	// call above is not immediately handled.
2017 	return B_ERROR;
2018 }
2019 
2020 
2021 static thread_id
2022 fork_team(void)
2023 {
2024 	Thread* parentThread = thread_get_current_thread();
2025 	Team* parentTeam = parentThread->team;
2026 	Team* team;
2027 	arch_fork_arg* forkArgs;
2028 	struct area_info info;
2029 	thread_id threadID;
2030 	status_t status;
2031 	ssize_t areaCookie;
2032 	int32 imageCookie;
2033 
2034 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2035 
2036 	if (parentTeam == team_get_kernel_team())
2037 		return B_NOT_ALLOWED;
2038 
2039 	// create a new team
2040 	// TODO: this is very similar to load_image_internal() - maybe we can do
2041 	// something about it :)
2042 
2043 	// create the main thread object
2044 	Thread* thread;
2045 	status = Thread::Create(parentThread->name, thread);
2046 	if (status != B_OK)
2047 		return status;
2048 	BReference<Thread> threadReference(thread, true);
2049 
2050 	// create the team object
2051 	team = Team::Create(thread->id, NULL, false);
2052 	if (team == NULL)
2053 		return B_NO_MEMORY;
2054 
2055 	parentTeam->LockTeamAndProcessGroup();
2056 	team->Lock();
2057 
2058 	team->SetName(parentTeam->Name());
2059 	team->SetArgs(parentTeam->Args());
2060 
2061 	team->commpage_address = parentTeam->commpage_address;
2062 
2063 	// Inherit the parent's user/group.
2064 	inherit_parent_user_and_group(team, parentTeam);
2065 
2066 	// inherit signal handlers
2067 	team->InheritSignalActions(parentTeam);
2068 
2069 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2070 
2071 	sTeamHash.Insert(team);
2072 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2073 	if (!teamLimitReached)
2074 		sUsedTeams++;
2075 
2076 	teamsLocker.Unlock();
2077 
2078 	insert_team_into_parent(parentTeam, team);
2079 	insert_team_into_group(parentTeam->group, team);
2080 
2081 	team->Unlock();
2082 	parentTeam->UnlockTeamAndProcessGroup();
2083 
2084 	// notify team listeners
2085 	sNotificationService.Notify(TEAM_ADDED, team);
2086 
2087 	// inherit some team debug flags
2088 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2089 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2090 
2091 	if (teamLimitReached) {
2092 		status = B_NO_MORE_TEAMS;
2093 		goto err1;
2094 	}
2095 
2096 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2097 	if (forkArgs == NULL) {
2098 		status = B_NO_MEMORY;
2099 		goto err1;
2100 	}
2101 
2102 	// create a new io_context for this team
2103 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2104 	if (!team->io_context) {
2105 		status = B_NO_MEMORY;
2106 		goto err2;
2107 	}
2108 
2109 	// duplicate the realtime sem context
2110 	if (parentTeam->realtime_sem_context) {
2111 		team->realtime_sem_context = clone_realtime_sem_context(
2112 			parentTeam->realtime_sem_context);
2113 		if (team->realtime_sem_context == NULL) {
2114 			status = B_NO_MEMORY;
2115 			goto err2;
2116 		}
2117 	}
2118 
2119 	// create an address space for this team
2120 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2121 		&team->address_space);
2122 	if (status < B_OK)
2123 		goto err3;
2124 
2125 	// copy all areas of the team
2126 	// TODO: should be able to handle stack areas differently (ie. don't have
2127 	// them copy-on-write)
2128 
2129 	areaCookie = 0;
2130 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2131 		if (info.area == parentTeam->user_data_area) {
2132 			// don't clone the user area; just create a new one
2133 			status = create_team_user_data(team, info.address);
2134 			if (status != B_OK)
2135 				break;
2136 
2137 			thread->user_thread = team_allocate_user_thread(team);
2138 		} else {
2139 			void* address;
2140 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2141 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2142 			if (area < B_OK) {
2143 				status = area;
2144 				break;
2145 			}
2146 
2147 			if (info.area == parentThread->user_stack_area)
2148 				thread->user_stack_area = area;
2149 		}
2150 	}
2151 
2152 	if (status < B_OK)
2153 		goto err4;
2154 
2155 	if (thread->user_thread == NULL) {
2156 #if KDEBUG
2157 		panic("user data area not found, parent area is %" B_PRId32,
2158 			parentTeam->user_data_area);
2159 #endif
2160 		status = B_ERROR;
2161 		goto err4;
2162 	}
2163 
2164 	thread->user_stack_base = parentThread->user_stack_base;
2165 	thread->user_stack_size = parentThread->user_stack_size;
2166 	thread->user_local_storage = parentThread->user_local_storage;
2167 	thread->sig_block_mask = parentThread->sig_block_mask;
2168 	thread->signal_stack_base = parentThread->signal_stack_base;
2169 	thread->signal_stack_size = parentThread->signal_stack_size;
2170 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2171 
2172 	arch_store_fork_frame(forkArgs);
2173 
2174 	// copy image list
2175 	image_info imageInfo;
2176 	imageCookie = 0;
2177 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2178 			== B_OK) {
2179 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2180 		if (image < 0)
2181 			goto err5;
2182 	}
2183 
2184 	// create the main thread
2185 	{
2186 		ThreadCreationAttributes threadCreationAttributes(NULL,
2187 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2188 		threadCreationAttributes.forkArgs = forkArgs;
2189 		threadID = thread_create_thread(threadCreationAttributes, false);
2190 		if (threadID < 0) {
2191 			status = threadID;
2192 			goto err5;
2193 		}
2194 	}
2195 
2196 	// notify the debugger
2197 	user_debug_team_created(team->id);
2198 
2199 	T(TeamForked(threadID));
2200 
2201 	resume_thread(threadID);
2202 	return threadID;
2203 
2204 err5:
2205 	remove_images(team);
2206 err4:
2207 	team->address_space->RemoveAndPut();
2208 err3:
2209 	delete_realtime_sem_context(team->realtime_sem_context);
2210 err2:
2211 	free(forkArgs);
2212 err1:
2213 	// Remove the team structure from the process group, the parent team, and
2214 	// the team hash table and delete the team structure.
2215 	parentTeam->LockTeamAndProcessGroup();
2216 	team->Lock();
2217 
2218 	remove_team_from_group(team);
2219 	remove_team_from_parent(team->parent, team);
2220 
2221 	team->Unlock();
2222 	parentTeam->UnlockTeamAndProcessGroup();
2223 
2224 	teamsLocker.Lock();
2225 	sTeamHash.Remove(team);
2226 	if (!teamLimitReached)
2227 		sUsedTeams--;
2228 	teamsLocker.Unlock();
2229 
2230 	sNotificationService.Notify(TEAM_REMOVED, team);
2231 
2232 	team->ReleaseReference();
2233 
2234 	return status;
2235 }
2236 
2237 
2238 /*!	Returns if the specified team \a parent has any children belonging to the
2239 	process group with the specified ID \a groupID.
2240 	The caller must hold \a parent's lock.
2241 */
2242 static bool
2243 has_children_in_group(Team* parent, pid_t groupID)
2244 {
2245 	for (Team* child = parent->children; child != NULL;
2246 			child = child->siblings_next) {
2247 		TeamLocker childLocker(child);
2248 		if (child->group_id == groupID)
2249 			return true;
2250 	}
2251 
2252 	return false;
2253 }
2254 
2255 
2256 /*!	Returns the first job control entry from \a children, which matches \a id.
2257 	\a id can be:
2258 	- \code > 0 \endcode: Matching an entry with that team ID.
2259 	- \code == -1 \endcode: Matching any entry.
2260 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2261 	\c 0 is an invalid value for \a id.
2262 
2263 	The caller must hold the lock of the team that \a children belongs to.
2264 
2265 	\param children The job control entry list to check.
2266 	\param id The match criterion.
2267 	\return The first matching entry or \c NULL, if none matches.
2268 */
2269 static job_control_entry*
2270 get_job_control_entry(team_job_control_children& children, pid_t id)
2271 {
2272 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2273 		 job_control_entry* entry = it.Next();) {
2274 
2275 		if (id > 0) {
2276 			if (entry->thread == id)
2277 				return entry;
2278 		} else if (id == -1) {
2279 			return entry;
2280 		} else {
2281 			pid_t processGroup
2282 				= (entry->team ? entry->team->group_id : entry->group_id);
2283 			if (processGroup == -id)
2284 				return entry;
2285 		}
2286 	}
2287 
2288 	return NULL;
2289 }
2290 
2291 
2292 /*!	Returns the first job control entry from one of team's dead, continued, or
2293     stopped children which matches \a id.
2294 	\a id can be:
2295 	- \code > 0 \endcode: Matching an entry with that team ID.
2296 	- \code == -1 \endcode: Matching any entry.
2297 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2298 	\c 0 is an invalid value for \a id.
2299 
2300 	The caller must hold \a team's lock.
2301 
2302 	\param team The team whose dead, stopped, and continued child lists shall be
2303 		checked.
2304 	\param id The match criterion.
2305 	\param flags Specifies which children shall be considered. Dead children
2306 		always are. Stopped children are considered when \a flags is ORed
2307 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2308 		bitwise with \c WCONTINUED.
2309 	\return The first matching entry or \c NULL, if none matches.
2310 */
2311 static job_control_entry*
2312 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2313 {
2314 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2315 
2316 	if (entry == NULL && (flags & WCONTINUED) != 0)
2317 		entry = get_job_control_entry(team->continued_children, id);
2318 
2319 	if (entry == NULL && (flags & WUNTRACED) != 0)
2320 		entry = get_job_control_entry(team->stopped_children, id);
2321 
2322 	return entry;
2323 }
2324 
2325 
2326 job_control_entry::job_control_entry()
2327 	:
2328 	has_group_ref(false)
2329 {
2330 }
2331 
2332 
2333 job_control_entry::~job_control_entry()
2334 {
2335 	if (has_group_ref) {
2336 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2337 
2338 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2339 		if (group == NULL) {
2340 			panic("job_control_entry::~job_control_entry(): unknown group "
2341 				"ID: %" B_PRId32, group_id);
2342 			return;
2343 		}
2344 
2345 		groupHashLocker.Unlock();
2346 
2347 		group->ReleaseReference();
2348 	}
2349 }
2350 
2351 
2352 /*!	Invoked when the owning team is dying, initializing the entry according to
2353 	the dead state.
2354 
2355 	The caller must hold the owning team's lock and the scheduler lock.
2356 */
2357 void
2358 job_control_entry::InitDeadState()
2359 {
2360 	if (team != NULL) {
2361 		ASSERT(team->exit.initialized);
2362 
2363 		group_id = team->group_id;
2364 		team->group->AcquireReference();
2365 		has_group_ref = true;
2366 
2367 		thread = team->id;
2368 		status = team->exit.status;
2369 		reason = team->exit.reason;
2370 		signal = team->exit.signal;
2371 		signaling_user = team->exit.signaling_user;
2372 
2373 		team = NULL;
2374 	}
2375 }
2376 
2377 
2378 job_control_entry&
2379 job_control_entry::operator=(const job_control_entry& other)
2380 {
2381 	state = other.state;
2382 	thread = other.thread;
2383 	signal = other.signal;
2384 	has_group_ref = false;
2385 	signaling_user = other.signaling_user;
2386 	team = other.team;
2387 	group_id = other.group_id;
2388 	status = other.status;
2389 	reason = other.reason;
2390 
2391 	return *this;
2392 }
2393 
2394 
2395 /*! This is the kernel backend for waitid().
2396 */
2397 static thread_id
2398 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2399 {
2400 	Thread* thread = thread_get_current_thread();
2401 	Team* team = thread->team;
2402 	struct job_control_entry foundEntry;
2403 	struct job_control_entry* freeDeathEntry = NULL;
2404 	status_t status = B_OK;
2405 
2406 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2407 		child, flags));
2408 
2409 	T(WaitForChild(child, flags));
2410 
2411 	pid_t originalChild = child;
2412 
2413 	bool ignoreFoundEntries = false;
2414 	bool ignoreFoundEntriesChecked = false;
2415 
2416 	while (true) {
2417 		// lock the team
2418 		TeamLocker teamLocker(team);
2419 
2420 		// A 0 child argument means to wait for all children in the process
2421 		// group of the calling team.
2422 		child = originalChild == 0 ? -team->group_id : originalChild;
2423 
2424 		// check whether any condition holds
2425 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2426 
2427 		// If we don't have an entry yet, check whether there are any children
2428 		// complying to the process group specification at all.
2429 		if (entry == NULL) {
2430 			// No success yet -- check whether there are any children complying
2431 			// to the process group specification at all.
2432 			bool childrenExist = false;
2433 			if (child == -1) {
2434 				childrenExist = team->children != NULL;
2435 			} else if (child < -1) {
2436 				childrenExist = has_children_in_group(team, -child);
2437 			} else {
2438 				if (Team* childTeam = Team::Get(child)) {
2439 					BReference<Team> childTeamReference(childTeam, true);
2440 					TeamLocker childTeamLocker(childTeam);
2441 					childrenExist = childTeam->parent == team;
2442 				}
2443 			}
2444 
2445 			if (!childrenExist) {
2446 				// there is no child we could wait for
2447 				status = ECHILD;
2448 			} else {
2449 				// the children we're waiting for are still running
2450 				status = B_WOULD_BLOCK;
2451 			}
2452 		} else {
2453 			// got something
2454 			foundEntry = *entry;
2455 
2456 			// unless WNOWAIT has been specified, "consume" the wait state
2457 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2458 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2459 					// The child is dead. Reap its death entry.
2460 					freeDeathEntry = entry;
2461 					team->dead_children.entries.Remove(entry);
2462 					team->dead_children.count--;
2463 				} else {
2464 					// The child is well. Reset its job control state.
2465 					team_set_job_control_state(entry->team,
2466 						JOB_CONTROL_STATE_NONE, NULL);
2467 				}
2468 			}
2469 		}
2470 
2471 		// If we haven't got anything yet, prepare for waiting for the
2472 		// condition variable.
2473 		ConditionVariableEntry deadWaitEntry;
2474 
2475 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2476 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2477 
2478 		teamLocker.Unlock();
2479 
2480 		// we got our entry and can return to our caller
2481 		if (status == B_OK) {
2482 			if (ignoreFoundEntries) {
2483 				// ... unless we shall ignore found entries
2484 				delete freeDeathEntry;
2485 				freeDeathEntry = NULL;
2486 				continue;
2487 			}
2488 
2489 			break;
2490 		}
2491 
2492 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2493 			T(WaitForChildDone(status));
2494 			return status;
2495 		}
2496 
2497 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2498 		if (status == B_INTERRUPTED) {
2499 			T(WaitForChildDone(status));
2500 			return status;
2501 		}
2502 
2503 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2504 		// all our children are dead and fail with ECHILD. We check the
2505 		// condition at this point.
2506 		if (!ignoreFoundEntriesChecked) {
2507 			teamLocker.Lock();
2508 
2509 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2510 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2511 				|| handler.sa_handler == SIG_IGN) {
2512 				ignoreFoundEntries = true;
2513 			}
2514 
2515 			teamLocker.Unlock();
2516 
2517 			ignoreFoundEntriesChecked = true;
2518 		}
2519 	}
2520 
2521 	delete freeDeathEntry;
2522 
2523 	// When we got here, we have a valid death entry, and already got
2524 	// unregistered from the team or group. Fill in the returned info.
2525 	memset(&_info, 0, sizeof(_info));
2526 	_info.si_signo = SIGCHLD;
2527 	_info.si_pid = foundEntry.thread;
2528 	_info.si_uid = foundEntry.signaling_user;
2529 	// TODO: Fill in si_errno?
2530 
2531 	switch (foundEntry.state) {
2532 		case JOB_CONTROL_STATE_DEAD:
2533 			_info.si_code = foundEntry.reason;
2534 			_info.si_status = foundEntry.reason == CLD_EXITED
2535 				? foundEntry.status : foundEntry.signal;
2536 			break;
2537 		case JOB_CONTROL_STATE_STOPPED:
2538 			_info.si_code = CLD_STOPPED;
2539 			_info.si_status = foundEntry.signal;
2540 			break;
2541 		case JOB_CONTROL_STATE_CONTINUED:
2542 			_info.si_code = CLD_CONTINUED;
2543 			_info.si_status = 0;
2544 			break;
2545 		case JOB_CONTROL_STATE_NONE:
2546 			// can't happen
2547 			break;
2548 	}
2549 
2550 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2551 	// status is available.
2552 	TeamLocker teamLocker(team);
2553 	InterruptsSpinLocker signalLocker(team->signal_lock);
2554 	SpinLocker threadCreationLocker(gThreadCreationLock);
2555 
2556 	if (is_team_signal_blocked(team, SIGCHLD)) {
2557 		if (get_job_control_entry(team, child, flags) == NULL)
2558 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2559 	}
2560 
2561 	threadCreationLocker.Unlock();
2562 	signalLocker.Unlock();
2563 	teamLocker.Unlock();
2564 
2565 	// When the team is dead, the main thread continues to live in the kernel
2566 	// team for a very short time. To avoid surprises for the caller we rather
2567 	// wait until the thread is really gone.
2568 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2569 		wait_for_thread(foundEntry.thread, NULL);
2570 
2571 	T(WaitForChildDone(foundEntry));
2572 
2573 	return foundEntry.thread;
2574 }
2575 
2576 
2577 /*! Fills the team_info structure with information from the specified team.
2578 	Interrupts must be enabled. The team must not be locked.
2579 */
2580 static status_t
2581 fill_team_info(Team* team, team_info* info, size_t size)
2582 {
2583 	if (size != sizeof(team_info))
2584 		return B_BAD_VALUE;
2585 
2586 	// TODO: Set more informations for team_info
2587 	memset(info, 0, size);
2588 
2589 	info->team = team->id;
2590 		// immutable
2591 	info->image_count = count_images(team);
2592 		// protected by sImageMutex
2593 
2594 	TeamLocker teamLocker(team);
2595 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2596 
2597 	info->thread_count = team->num_threads;
2598 	//info->area_count =
2599 	info->debugger_nub_thread = team->debug_info.nub_thread;
2600 	info->debugger_nub_port = team->debug_info.nub_port;
2601 	info->uid = team->effective_uid;
2602 	info->gid = team->effective_gid;
2603 
2604 	strlcpy(info->args, team->Args(), sizeof(info->args));
2605 	info->argc = 1;
2606 
2607 	return B_OK;
2608 }
2609 
2610 
2611 /*!	Returns whether the process group contains stopped processes.
2612 	The caller must hold the process group's lock.
2613 */
2614 static bool
2615 process_group_has_stopped_processes(ProcessGroup* group)
2616 {
2617 	Team* team = group->teams;
2618 	while (team != NULL) {
2619 		// the parent team's lock guards the job control entry -- acquire it
2620 		team->LockTeamAndParent(false);
2621 
2622 		if (team->job_control_entry != NULL
2623 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2624 			team->UnlockTeamAndParent();
2625 			return true;
2626 		}
2627 
2628 		team->UnlockTeamAndParent();
2629 
2630 		team = team->group_next;
2631 	}
2632 
2633 	return false;
2634 }
2635 
2636 
2637 /*!	Iterates through all process groups queued in team_remove_team() and signals
2638 	those that are orphaned and have stopped processes.
2639 	The caller must not hold any team or process group locks.
2640 */
2641 static void
2642 orphaned_process_group_check()
2643 {
2644 	// process as long as there are groups in the list
2645 	while (true) {
2646 		// remove the head from the list
2647 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2648 
2649 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2650 		if (group == NULL)
2651 			return;
2652 
2653 		group->UnsetOrphanedCheck();
2654 		BReference<ProcessGroup> groupReference(group);
2655 
2656 		orphanedCheckLocker.Unlock();
2657 
2658 		AutoLocker<ProcessGroup> groupLocker(group);
2659 
2660 		// If the group is orphaned and contains stopped processes, we're
2661 		// supposed to send SIGHUP + SIGCONT.
2662 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2663 			Thread* currentThread = thread_get_current_thread();
2664 
2665 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2666 			send_signal_to_process_group_locked(group, signal, 0);
2667 
2668 			signal.SetNumber(SIGCONT);
2669 			send_signal_to_process_group_locked(group, signal, 0);
2670 		}
2671 	}
2672 }
2673 
2674 
2675 static status_t
2676 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2677 	uint32 flags)
2678 {
2679 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2680 		return B_BAD_VALUE;
2681 
2682 	// get the team
2683 	Team* team = Team::GetAndLock(id);
2684 	if (team == NULL)
2685 		return B_BAD_TEAM_ID;
2686 	BReference<Team> teamReference(team, true);
2687 	TeamLocker teamLocker(team, true);
2688 
2689 	if ((flags & B_CHECK_PERMISSION) != 0) {
2690 		uid_t uid = geteuid();
2691 		if (uid != 0 && uid != team->effective_uid)
2692 			return B_NOT_ALLOWED;
2693 	}
2694 
2695 	bigtime_t kernelTime = 0;
2696 	bigtime_t userTime = 0;
2697 
2698 	switch (who) {
2699 		case B_TEAM_USAGE_SELF:
2700 		{
2701 			Thread* thread = team->thread_list;
2702 
2703 			for (; thread != NULL; thread = thread->team_next) {
2704 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2705 				kernelTime += thread->kernel_time;
2706 				userTime += thread->user_time;
2707 			}
2708 
2709 			kernelTime += team->dead_threads_kernel_time;
2710 			userTime += team->dead_threads_user_time;
2711 			break;
2712 		}
2713 
2714 		case B_TEAM_USAGE_CHILDREN:
2715 		{
2716 			Team* child = team->children;
2717 			for (; child != NULL; child = child->siblings_next) {
2718 				TeamLocker childLocker(child);
2719 
2720 				Thread* thread = team->thread_list;
2721 
2722 				for (; thread != NULL; thread = thread->team_next) {
2723 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2724 					kernelTime += thread->kernel_time;
2725 					userTime += thread->user_time;
2726 				}
2727 
2728 				kernelTime += child->dead_threads_kernel_time;
2729 				userTime += child->dead_threads_user_time;
2730 			}
2731 
2732 			kernelTime += team->dead_children.kernel_time;
2733 			userTime += team->dead_children.user_time;
2734 			break;
2735 		}
2736 	}
2737 
2738 	info->kernel_time = kernelTime;
2739 	info->user_time = userTime;
2740 
2741 	return B_OK;
2742 }
2743 
2744 
2745 //	#pragma mark - Private kernel API
2746 
2747 
2748 status_t
2749 team_init(kernel_args* args)
2750 {
2751 	// create the team hash table
2752 	new(&sTeamHash) TeamTable;
2753 	if (sTeamHash.Init(64) != B_OK)
2754 		panic("Failed to init team hash table!");
2755 
2756 	new(&sGroupHash) ProcessGroupHashTable;
2757 	if (sGroupHash.Init() != B_OK)
2758 		panic("Failed to init process group hash table!");
2759 
2760 	// create initial session and process groups
2761 
2762 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2763 	if (session == NULL)
2764 		panic("Could not create initial session.\n");
2765 	BReference<ProcessSession> sessionReference(session, true);
2766 
2767 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2768 	if (group == NULL)
2769 		panic("Could not create initial process group.\n");
2770 	BReference<ProcessGroup> groupReference(group, true);
2771 
2772 	group->Publish(session);
2773 
2774 	// create the kernel team
2775 	sKernelTeam = Team::Create(1, "kernel_team", true);
2776 	if (sKernelTeam == NULL)
2777 		panic("could not create kernel team!\n");
2778 	sKernelTeam->SetArgs(sKernelTeam->Name());
2779 	sKernelTeam->state = TEAM_STATE_NORMAL;
2780 
2781 	sKernelTeam->saved_set_uid = 0;
2782 	sKernelTeam->real_uid = 0;
2783 	sKernelTeam->effective_uid = 0;
2784 	sKernelTeam->saved_set_gid = 0;
2785 	sKernelTeam->real_gid = 0;
2786 	sKernelTeam->effective_gid = 0;
2787 	sKernelTeam->supplementary_groups = NULL;
2788 	sKernelTeam->supplementary_group_count = 0;
2789 
2790 	insert_team_into_group(group, sKernelTeam);
2791 
2792 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2793 	if (sKernelTeam->io_context == NULL)
2794 		panic("could not create io_context for kernel team!\n");
2795 
2796 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2797 		dprintf("Failed to resize FD table for kernel team!\n");
2798 
2799 	// stick it in the team hash
2800 	sTeamHash.Insert(sKernelTeam);
2801 
2802 	add_debugger_command_etc("team", &dump_team_info,
2803 		"Dump info about a particular team",
2804 		"[ <id> | <address> | <name> ]\n"
2805 		"Prints information about the specified team. If no argument is given\n"
2806 		"the current team is selected.\n"
2807 		"  <id>       - The ID of the team.\n"
2808 		"  <address>  - The address of the team structure.\n"
2809 		"  <name>     - The team's name.\n", 0);
2810 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2811 		"\n"
2812 		"Prints a list of all existing teams.\n", 0);
2813 
2814 	new(&sNotificationService) TeamNotificationService();
2815 
2816 	sNotificationService.Register();
2817 
2818 	return B_OK;
2819 }
2820 
2821 
2822 int32
2823 team_max_teams(void)
2824 {
2825 	return sMaxTeams;
2826 }
2827 
2828 
2829 int32
2830 team_used_teams(void)
2831 {
2832 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2833 	return sUsedTeams;
2834 }
2835 
2836 
2837 /*! Returns a death entry of a child team specified by ID (if any).
2838 	The caller must hold the team's lock.
2839 
2840 	\param team The team whose dead children list to check.
2841 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2842 	\param _deleteEntry Return variable, indicating whether the caller needs to
2843 		delete the returned entry.
2844 	\return The death entry of the matching team, or \c NULL, if no death entry
2845 		for the team was found.
2846 */
2847 job_control_entry*
2848 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2849 {
2850 	if (child <= 0)
2851 		return NULL;
2852 
2853 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2854 		child);
2855 	if (entry) {
2856 		// remove the entry only, if the caller is the parent of the found team
2857 		if (team_get_current_team_id() == entry->thread) {
2858 			team->dead_children.entries.Remove(entry);
2859 			team->dead_children.count--;
2860 			*_deleteEntry = true;
2861 		} else {
2862 			*_deleteEntry = false;
2863 		}
2864 	}
2865 
2866 	return entry;
2867 }
2868 
2869 
2870 /*! Quick check to see if we have a valid team ID. */
2871 bool
2872 team_is_valid(team_id id)
2873 {
2874 	if (id <= 0)
2875 		return false;
2876 
2877 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2878 
2879 	return team_get_team_struct_locked(id) != NULL;
2880 }
2881 
2882 
2883 Team*
2884 team_get_team_struct_locked(team_id id)
2885 {
2886 	return sTeamHash.Lookup(id);
2887 }
2888 
2889 
2890 void
2891 team_set_controlling_tty(int32 ttyIndex)
2892 {
2893 	// lock the team, so its session won't change while we're playing with it
2894 	Team* team = thread_get_current_thread()->team;
2895 	TeamLocker teamLocker(team);
2896 
2897 	// get and lock the session
2898 	ProcessSession* session = team->group->Session();
2899 	AutoLocker<ProcessSession> sessionLocker(session);
2900 
2901 	// set the session's fields
2902 	session->controlling_tty = ttyIndex;
2903 	session->foreground_group = -1;
2904 }
2905 
2906 
2907 int32
2908 team_get_controlling_tty()
2909 {
2910 	// lock the team, so its session won't change while we're playing with it
2911 	Team* team = thread_get_current_thread()->team;
2912 	TeamLocker teamLocker(team);
2913 
2914 	// get and lock the session
2915 	ProcessSession* session = team->group->Session();
2916 	AutoLocker<ProcessSession> sessionLocker(session);
2917 
2918 	// get the session's field
2919 	return session->controlling_tty;
2920 }
2921 
2922 
2923 status_t
2924 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2925 {
2926 	// lock the team, so its session won't change while we're playing with it
2927 	Thread* thread = thread_get_current_thread();
2928 	Team* team = thread->team;
2929 	TeamLocker teamLocker(team);
2930 
2931 	// get and lock the session
2932 	ProcessSession* session = team->group->Session();
2933 	AutoLocker<ProcessSession> sessionLocker(session);
2934 
2935 	// check given TTY -- must be the controlling tty of the calling process
2936 	if (session->controlling_tty != ttyIndex)
2937 		return ENOTTY;
2938 
2939 	// check given process group -- must belong to our session
2940 	{
2941 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2942 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2943 		if (group == NULL || group->Session() != session)
2944 			return B_BAD_VALUE;
2945 	}
2946 
2947 	// If we are a background group, we can do that unharmed only when we
2948 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2949 	if (session->foreground_group != -1
2950 		&& session->foreground_group != team->group_id
2951 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
2952 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
2953 		InterruptsSpinLocker signalLocker(team->signal_lock);
2954 
2955 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2956 			pid_t groupID = team->group_id;
2957 
2958 			signalLocker.Unlock();
2959 			sessionLocker.Unlock();
2960 			teamLocker.Unlock();
2961 
2962 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2963 			send_signal_to_process_group(groupID, signal, 0);
2964 			return B_INTERRUPTED;
2965 		}
2966 	}
2967 
2968 	session->foreground_group = processGroupID;
2969 
2970 	return B_OK;
2971 }
2972 
2973 
2974 /*!	Removes the specified team from the global team hash, from its process
2975 	group, and from its parent.
2976 	It also moves all of its children to the kernel team.
2977 
2978 	The caller must hold the following locks:
2979 	- \a team's process group's lock,
2980 	- the kernel team's lock,
2981 	- \a team's parent team's lock (might be the kernel team), and
2982 	- \a team's lock.
2983 */
2984 void
2985 team_remove_team(Team* team, pid_t& _signalGroup)
2986 {
2987 	Team* parent = team->parent;
2988 
2989 	// remember how long this team lasted
2990 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2991 		+ team->dead_children.kernel_time;
2992 	parent->dead_children.user_time += team->dead_threads_user_time
2993 		+ team->dead_children.user_time;
2994 
2995 	// remove the team from the hash table
2996 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2997 	sTeamHash.Remove(team);
2998 	sUsedTeams--;
2999 	teamsLocker.Unlock();
3000 
3001 	// The team can no longer be accessed by ID. Navigation to it is still
3002 	// possible from its process group and its parent and children, but that
3003 	// will be rectified shortly.
3004 	team->state = TEAM_STATE_DEATH;
3005 
3006 	// If we're a controlling process (i.e. a session leader with controlling
3007 	// terminal), there's a bit of signalling we have to do. We can't do any of
3008 	// the signaling here due to the bunch of locks we're holding, but we need
3009 	// to determine, whom to signal.
3010 	_signalGroup = -1;
3011 	bool isSessionLeader = false;
3012 	if (team->session_id == team->id
3013 		&& team->group->Session()->controlling_tty >= 0) {
3014 		isSessionLeader = true;
3015 
3016 		ProcessSession* session = team->group->Session();
3017 
3018 		AutoLocker<ProcessSession> sessionLocker(session);
3019 
3020 		session->controlling_tty = -1;
3021 		_signalGroup = session->foreground_group;
3022 	}
3023 
3024 	// remove us from our process group
3025 	remove_team_from_group(team);
3026 
3027 	// move the team's children to the kernel team
3028 	while (Team* child = team->children) {
3029 		// remove the child from the current team and add it to the kernel team
3030 		TeamLocker childLocker(child);
3031 
3032 		remove_team_from_parent(team, child);
3033 		insert_team_into_parent(sKernelTeam, child);
3034 
3035 		// move job control entries too
3036 		sKernelTeam->stopped_children.entries.MoveFrom(
3037 			&team->stopped_children.entries);
3038 		sKernelTeam->continued_children.entries.MoveFrom(
3039 			&team->continued_children.entries);
3040 
3041 		// If the team was a session leader with controlling terminal,
3042 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3043 		// groups with stopped processes. Due to locking complications we can't
3044 		// do that here, so we only check whether we were a reason for the
3045 		// child's process group not being an orphan and, if so, schedule a
3046 		// later check (cf. orphaned_process_group_check()).
3047 		if (isSessionLeader) {
3048 			ProcessGroup* childGroup = child->group;
3049 			if (childGroup->Session()->id == team->session_id
3050 				&& childGroup->id != team->group_id) {
3051 				childGroup->ScheduleOrphanedCheck();
3052 			}
3053 		}
3054 
3055 		// Note, we don't move the dead children entries. Those will be deleted
3056 		// when the team structure is deleted.
3057 	}
3058 
3059 	// remove us from our parent
3060 	remove_team_from_parent(parent, team);
3061 }
3062 
3063 
3064 /*!	Kills all threads but the main thread of the team and shuts down user
3065 	debugging for it.
3066 	To be called on exit of the team's main thread. No locks must be held.
3067 
3068 	\param team The team in question.
3069 	\return The port of the debugger for the team, -1 if none. To be passed to
3070 		team_delete_team().
3071 */
3072 port_id
3073 team_shutdown_team(Team* team)
3074 {
3075 	ASSERT(thread_get_current_thread() == team->main_thread);
3076 
3077 	TeamLocker teamLocker(team);
3078 
3079 	// Make sure debugging changes won't happen anymore.
3080 	port_id debuggerPort = -1;
3081 	while (true) {
3082 		// If a debugger change is in progress for the team, we'll have to
3083 		// wait until it is done.
3084 		ConditionVariableEntry waitForDebuggerEntry;
3085 		bool waitForDebugger = false;
3086 
3087 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3088 
3089 		if (team->debug_info.debugger_changed_condition != NULL) {
3090 			team->debug_info.debugger_changed_condition->Add(
3091 				&waitForDebuggerEntry);
3092 			waitForDebugger = true;
3093 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3094 			// The team is being debugged. That will stop with the termination
3095 			// of the nub thread. Since we set the team state to death, no one
3096 			// can install a debugger anymore. We fetch the debugger's port to
3097 			// send it a message at the bitter end.
3098 			debuggerPort = team->debug_info.debugger_port;
3099 		}
3100 
3101 		debugInfoLocker.Unlock();
3102 
3103 		if (!waitForDebugger)
3104 			break;
3105 
3106 		// wait for the debugger change to be finished
3107 		teamLocker.Unlock();
3108 
3109 		waitForDebuggerEntry.Wait();
3110 
3111 		teamLocker.Lock();
3112 	}
3113 
3114 	// Mark the team as shutting down. That will prevent new threads from being
3115 	// created and debugger changes from taking place.
3116 	team->state = TEAM_STATE_SHUTDOWN;
3117 
3118 	// delete all timers
3119 	team->DeleteUserTimers(false);
3120 
3121 	// deactivate CPU time user timers for the team
3122 	InterruptsSpinLocker timeLocker(team->time_lock);
3123 
3124 	if (team->HasActiveCPUTimeUserTimers())
3125 		team->DeactivateCPUTimeUserTimers();
3126 
3127 	timeLocker.Unlock();
3128 
3129 	// kill all threads but the main thread
3130 	team_death_entry deathEntry;
3131 	deathEntry.condition.Init(team, "team death");
3132 
3133 	while (true) {
3134 		team->death_entry = &deathEntry;
3135 		deathEntry.remaining_threads = 0;
3136 
3137 		Thread* thread = team->thread_list;
3138 		while (thread != NULL) {
3139 			if (thread != team->main_thread) {
3140 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3141 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3142 				deathEntry.remaining_threads++;
3143 			}
3144 
3145 			thread = thread->team_next;
3146 		}
3147 
3148 		if (deathEntry.remaining_threads == 0)
3149 			break;
3150 
3151 		// there are threads to wait for
3152 		ConditionVariableEntry entry;
3153 		deathEntry.condition.Add(&entry);
3154 
3155 		teamLocker.Unlock();
3156 
3157 		entry.Wait();
3158 
3159 		teamLocker.Lock();
3160 	}
3161 
3162 	team->death_entry = NULL;
3163 
3164 	return debuggerPort;
3165 }
3166 
3167 
3168 /*!	Called on team exit to notify threads waiting on the team and free most
3169 	resources associated with it.
3170 	The caller shouldn't hold any locks.
3171 */
3172 void
3173 team_delete_team(Team* team, port_id debuggerPort)
3174 {
3175 	// Not quite in our job description, but work that has been left by
3176 	// team_remove_team() and that can be done now that we're not holding any
3177 	// locks.
3178 	orphaned_process_group_check();
3179 
3180 	team_id teamID = team->id;
3181 
3182 	ASSERT(team->num_threads == 0);
3183 
3184 	// If someone is waiting for this team to be loaded, but it dies
3185 	// unexpectedly before being done, we need to notify the waiting
3186 	// thread now.
3187 
3188 	TeamLocker teamLocker(team);
3189 
3190 	if (team->loading_info) {
3191 		// there's indeed someone waiting
3192 		struct team_loading_info* loadingInfo = team->loading_info;
3193 		team->loading_info = NULL;
3194 
3195 		loadingInfo->result = B_ERROR;
3196 		loadingInfo->done = true;
3197 
3198 		// wake up the waiting thread
3199 		thread_continue(loadingInfo->thread);
3200 	}
3201 
3202 	// notify team watchers
3203 
3204 	{
3205 		// we're not reachable from anyone anymore at this point, so we
3206 		// can safely access the list without any locking
3207 		struct team_watcher* watcher;
3208 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3209 				&team->watcher_list)) != NULL) {
3210 			watcher->hook(teamID, watcher->data);
3211 			free(watcher);
3212 		}
3213 	}
3214 
3215 	teamLocker.Unlock();
3216 
3217 	sNotificationService.Notify(TEAM_REMOVED, team);
3218 
3219 	// free team resources
3220 
3221 	delete_realtime_sem_context(team->realtime_sem_context);
3222 	xsi_sem_undo(team);
3223 	remove_images(team);
3224 	team->address_space->RemoveAndPut();
3225 
3226 	team->ReleaseReference();
3227 
3228 	// notify the debugger, that the team is gone
3229 	user_debug_team_deleted(teamID, debuggerPort);
3230 }
3231 
3232 
3233 Team*
3234 team_get_kernel_team(void)
3235 {
3236 	return sKernelTeam;
3237 }
3238 
3239 
3240 team_id
3241 team_get_kernel_team_id(void)
3242 {
3243 	if (!sKernelTeam)
3244 		return 0;
3245 
3246 	return sKernelTeam->id;
3247 }
3248 
3249 
3250 team_id
3251 team_get_current_team_id(void)
3252 {
3253 	return thread_get_current_thread()->team->id;
3254 }
3255 
3256 
3257 status_t
3258 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3259 {
3260 	if (id == sKernelTeam->id) {
3261 		// we're the kernel team, so we don't have to go through all
3262 		// the hassle (locking and hash lookup)
3263 		*_addressSpace = VMAddressSpace::GetKernel();
3264 		return B_OK;
3265 	}
3266 
3267 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3268 
3269 	Team* team = team_get_team_struct_locked(id);
3270 	if (team == NULL)
3271 		return B_BAD_VALUE;
3272 
3273 	team->address_space->Get();
3274 	*_addressSpace = team->address_space;
3275 	return B_OK;
3276 }
3277 
3278 
3279 /*!	Sets the team's job control state.
3280 	The caller must hold the parent team's lock. Interrupts are allowed to be
3281 	enabled or disabled.
3282 	\a team The team whose job control state shall be set.
3283 	\a newState The new state to be set.
3284 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3285 		the caller is responsible for filling in the following fields of the
3286 		entry before releasing the parent team's lock, unless the new state is
3287 		\c JOB_CONTROL_STATE_NONE:
3288 		- \c signal: The number of the signal causing the state change.
3289 		- \c signaling_user: The real UID of the user sending the signal.
3290 */
3291 void
3292 team_set_job_control_state(Team* team, job_control_state newState,
3293 	Signal* signal)
3294 {
3295 	if (team == NULL || team->job_control_entry == NULL)
3296 		return;
3297 
3298 	// don't touch anything, if the state stays the same or the team is already
3299 	// dead
3300 	job_control_entry* entry = team->job_control_entry;
3301 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3302 		return;
3303 
3304 	T(SetJobControlState(team->id, newState, signal));
3305 
3306 	// remove from the old list
3307 	switch (entry->state) {
3308 		case JOB_CONTROL_STATE_NONE:
3309 			// entry is in no list ATM
3310 			break;
3311 		case JOB_CONTROL_STATE_DEAD:
3312 			// can't get here
3313 			break;
3314 		case JOB_CONTROL_STATE_STOPPED:
3315 			team->parent->stopped_children.entries.Remove(entry);
3316 			break;
3317 		case JOB_CONTROL_STATE_CONTINUED:
3318 			team->parent->continued_children.entries.Remove(entry);
3319 			break;
3320 	}
3321 
3322 	entry->state = newState;
3323 
3324 	if (signal != NULL) {
3325 		entry->signal = signal->Number();
3326 		entry->signaling_user = signal->SendingUser();
3327 	}
3328 
3329 	// add to new list
3330 	team_job_control_children* childList = NULL;
3331 	switch (entry->state) {
3332 		case JOB_CONTROL_STATE_NONE:
3333 			// entry doesn't get into any list
3334 			break;
3335 		case JOB_CONTROL_STATE_DEAD:
3336 			childList = &team->parent->dead_children;
3337 			team->parent->dead_children.count++;
3338 			break;
3339 		case JOB_CONTROL_STATE_STOPPED:
3340 			childList = &team->parent->stopped_children;
3341 			break;
3342 		case JOB_CONTROL_STATE_CONTINUED:
3343 			childList = &team->parent->continued_children;
3344 			break;
3345 	}
3346 
3347 	if (childList != NULL) {
3348 		childList->entries.Add(entry);
3349 		team->parent->dead_children.condition_variable.NotifyAll();
3350 	}
3351 }
3352 
3353 
3354 /*!	Inits the given team's exit information, if not yet initialized, to some
3355 	generic "killed" status.
3356 	The caller must not hold the team's lock. Interrupts must be enabled.
3357 
3358 	\param team The team whose exit info shall be initialized.
3359 */
3360 void
3361 team_init_exit_info_on_error(Team* team)
3362 {
3363 	TeamLocker teamLocker(team);
3364 
3365 	if (!team->exit.initialized) {
3366 		team->exit.reason = CLD_KILLED;
3367 		team->exit.signal = SIGKILL;
3368 		team->exit.signaling_user = geteuid();
3369 		team->exit.status = 0;
3370 		team->exit.initialized = true;
3371 	}
3372 }
3373 
3374 
3375 /*! Adds a hook to the team that is called as soon as this team goes away.
3376 	This call might get public in the future.
3377 */
3378 status_t
3379 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3380 {
3381 	if (hook == NULL || teamID < B_OK)
3382 		return B_BAD_VALUE;
3383 
3384 	// create the watcher object
3385 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3386 	if (watcher == NULL)
3387 		return B_NO_MEMORY;
3388 
3389 	watcher->hook = hook;
3390 	watcher->data = data;
3391 
3392 	// add watcher, if the team isn't already dying
3393 	// get the team
3394 	Team* team = Team::GetAndLock(teamID);
3395 	if (team == NULL) {
3396 		free(watcher);
3397 		return B_BAD_TEAM_ID;
3398 	}
3399 
3400 	list_add_item(&team->watcher_list, watcher);
3401 
3402 	team->UnlockAndReleaseReference();
3403 
3404 	return B_OK;
3405 }
3406 
3407 
3408 status_t
3409 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3410 {
3411 	if (hook == NULL || teamID < 0)
3412 		return B_BAD_VALUE;
3413 
3414 	// get team and remove watcher (if present)
3415 	Team* team = Team::GetAndLock(teamID);
3416 	if (team == NULL)
3417 		return B_BAD_TEAM_ID;
3418 
3419 	// search for watcher
3420 	team_watcher* watcher = NULL;
3421 	while ((watcher = (team_watcher*)list_get_next_item(
3422 			&team->watcher_list, watcher)) != NULL) {
3423 		if (watcher->hook == hook && watcher->data == data) {
3424 			// got it!
3425 			list_remove_item(&team->watcher_list, watcher);
3426 			break;
3427 		}
3428 	}
3429 
3430 	team->UnlockAndReleaseReference();
3431 
3432 	if (watcher == NULL)
3433 		return B_ENTRY_NOT_FOUND;
3434 
3435 	free(watcher);
3436 	return B_OK;
3437 }
3438 
3439 
3440 /*!	Allocates a user_thread structure from the team.
3441 	The team lock must be held, unless the function is called for the team's
3442 	main thread. Interrupts must be enabled.
3443 */
3444 struct user_thread*
3445 team_allocate_user_thread(Team* team)
3446 {
3447 	if (team->user_data == 0)
3448 		return NULL;
3449 
3450 	// take an entry from the free list, if any
3451 	if (struct free_user_thread* entry = team->free_user_threads) {
3452 		user_thread* thread = entry->thread;
3453 		team->free_user_threads = entry->next;
3454 		free(entry);
3455 		return thread;
3456 	}
3457 
3458 	while (true) {
3459 		// enough space left?
3460 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3461 		if (team->user_data_size - team->used_user_data < needed) {
3462 			// try to resize the area
3463 			if (resize_area(team->user_data_area,
3464 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3465 				return NULL;
3466 			}
3467 
3468 			// resized user area successfully -- try to allocate the user_thread
3469 			// again
3470 			team->user_data_size += B_PAGE_SIZE;
3471 			continue;
3472 		}
3473 
3474 		// allocate the user_thread
3475 		user_thread* thread
3476 			= (user_thread*)(team->user_data + team->used_user_data);
3477 		team->used_user_data += needed;
3478 
3479 		return thread;
3480 	}
3481 }
3482 
3483 
3484 /*!	Frees the given user_thread structure.
3485 	The team's lock must not be held. Interrupts must be enabled.
3486 	\param team The team the user thread was allocated from.
3487 	\param userThread The user thread to free.
3488 */
3489 void
3490 team_free_user_thread(Team* team, struct user_thread* userThread)
3491 {
3492 	if (userThread == NULL)
3493 		return;
3494 
3495 	// create a free list entry
3496 	free_user_thread* entry
3497 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3498 	if (entry == NULL) {
3499 		// we have to leak the user thread :-/
3500 		return;
3501 	}
3502 
3503 	// add to free list
3504 	TeamLocker teamLocker(team);
3505 
3506 	entry->thread = userThread;
3507 	entry->next = team->free_user_threads;
3508 	team->free_user_threads = entry;
3509 }
3510 
3511 
3512 //	#pragma mark - Associated data interface
3513 
3514 
3515 AssociatedData::AssociatedData()
3516 	:
3517 	fOwner(NULL)
3518 {
3519 }
3520 
3521 
3522 AssociatedData::~AssociatedData()
3523 {
3524 }
3525 
3526 
3527 void
3528 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3529 {
3530 }
3531 
3532 
3533 AssociatedDataOwner::AssociatedDataOwner()
3534 {
3535 	mutex_init(&fLock, "associated data owner");
3536 }
3537 
3538 
3539 AssociatedDataOwner::~AssociatedDataOwner()
3540 {
3541 	mutex_destroy(&fLock);
3542 }
3543 
3544 
3545 bool
3546 AssociatedDataOwner::AddData(AssociatedData* data)
3547 {
3548 	MutexLocker locker(fLock);
3549 
3550 	if (data->Owner() != NULL)
3551 		return false;
3552 
3553 	data->AcquireReference();
3554 	fList.Add(data);
3555 	data->SetOwner(this);
3556 
3557 	return true;
3558 }
3559 
3560 
3561 bool
3562 AssociatedDataOwner::RemoveData(AssociatedData* data)
3563 {
3564 	MutexLocker locker(fLock);
3565 
3566 	if (data->Owner() != this)
3567 		return false;
3568 
3569 	data->SetOwner(NULL);
3570 	fList.Remove(data);
3571 
3572 	locker.Unlock();
3573 
3574 	data->ReleaseReference();
3575 
3576 	return true;
3577 }
3578 
3579 
3580 void
3581 AssociatedDataOwner::PrepareForDeletion()
3582 {
3583 	MutexLocker locker(fLock);
3584 
3585 	// move all data to a temporary list and unset the owner
3586 	DataList list;
3587 	list.MoveFrom(&fList);
3588 
3589 	for (DataList::Iterator it = list.GetIterator();
3590 		AssociatedData* data = it.Next();) {
3591 		data->SetOwner(NULL);
3592 	}
3593 
3594 	locker.Unlock();
3595 
3596 	// call the notification hooks and release our references
3597 	while (AssociatedData* data = list.RemoveHead()) {
3598 		data->OwnerDeleted(this);
3599 		data->ReleaseReference();
3600 	}
3601 }
3602 
3603 
3604 /*!	Associates data with the current team.
3605 	When the team is deleted, the data object is notified.
3606 	The team acquires a reference to the object.
3607 
3608 	\param data The data object.
3609 	\return \c true on success, \c false otherwise. Fails only when the supplied
3610 		data object is already associated with another owner.
3611 */
3612 bool
3613 team_associate_data(AssociatedData* data)
3614 {
3615 	return thread_get_current_thread()->team->AddData(data);
3616 }
3617 
3618 
3619 /*!	Dissociates data from the current team.
3620 	Balances an earlier call to team_associate_data().
3621 
3622 	\param data The data object.
3623 	\return \c true on success, \c false otherwise. Fails only when the data
3624 		object is not associated with the current team.
3625 */
3626 bool
3627 team_dissociate_data(AssociatedData* data)
3628 {
3629 	return thread_get_current_thread()->team->RemoveData(data);
3630 }
3631 
3632 
3633 //	#pragma mark - Public kernel API
3634 
3635 
3636 thread_id
3637 load_image(int32 argCount, const char** args, const char** env)
3638 {
3639 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3640 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3641 }
3642 
3643 
3644 thread_id
3645 load_image_etc(int32 argCount, const char* const* args,
3646 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3647 {
3648 	// we need to flatten the args and environment
3649 
3650 	if (args == NULL)
3651 		return B_BAD_VALUE;
3652 
3653 	// determine total needed size
3654 	int32 argSize = 0;
3655 	for (int32 i = 0; i < argCount; i++)
3656 		argSize += strlen(args[i]) + 1;
3657 
3658 	int32 envCount = 0;
3659 	int32 envSize = 0;
3660 	while (env != NULL && env[envCount] != NULL)
3661 		envSize += strlen(env[envCount++]) + 1;
3662 
3663 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3664 	if (size > MAX_PROCESS_ARGS_SIZE)
3665 		return B_TOO_MANY_ARGS;
3666 
3667 	// allocate space
3668 	char** flatArgs = (char**)malloc(size);
3669 	if (flatArgs == NULL)
3670 		return B_NO_MEMORY;
3671 
3672 	char** slot = flatArgs;
3673 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3674 
3675 	// copy arguments and environment
3676 	for (int32 i = 0; i < argCount; i++) {
3677 		int32 argSize = strlen(args[i]) + 1;
3678 		memcpy(stringSpace, args[i], argSize);
3679 		*slot++ = stringSpace;
3680 		stringSpace += argSize;
3681 	}
3682 
3683 	*slot++ = NULL;
3684 
3685 	for (int32 i = 0; i < envCount; i++) {
3686 		int32 envSize = strlen(env[i]) + 1;
3687 		memcpy(stringSpace, env[i], envSize);
3688 		*slot++ = stringSpace;
3689 		stringSpace += envSize;
3690 	}
3691 
3692 	*slot++ = NULL;
3693 
3694 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3695 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3696 
3697 	free(flatArgs);
3698 		// load_image_internal() unset our variable if it took over ownership
3699 
3700 	return thread;
3701 }
3702 
3703 
3704 status_t
3705 wait_for_team(team_id id, status_t* _returnCode)
3706 {
3707 	// check whether the team exists
3708 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3709 
3710 	Team* team = team_get_team_struct_locked(id);
3711 	if (team == NULL)
3712 		return B_BAD_TEAM_ID;
3713 
3714 	id = team->id;
3715 
3716 	teamsLocker.Unlock();
3717 
3718 	// wait for the main thread (it has the same ID as the team)
3719 	return wait_for_thread(id, _returnCode);
3720 }
3721 
3722 
3723 status_t
3724 kill_team(team_id id)
3725 {
3726 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3727 
3728 	Team* team = team_get_team_struct_locked(id);
3729 	if (team == NULL)
3730 		return B_BAD_TEAM_ID;
3731 
3732 	id = team->id;
3733 
3734 	teamsLocker.Unlock();
3735 
3736 	if (team == sKernelTeam)
3737 		return B_NOT_ALLOWED;
3738 
3739 	// Just kill the team's main thread (it has same ID as the team). The
3740 	// cleanup code there will take care of the team.
3741 	return kill_thread(id);
3742 }
3743 
3744 
3745 status_t
3746 _get_team_info(team_id id, team_info* info, size_t size)
3747 {
3748 	// get the team
3749 	Team* team = Team::Get(id);
3750 	if (team == NULL)
3751 		return B_BAD_TEAM_ID;
3752 	BReference<Team> teamReference(team, true);
3753 
3754 	// fill in the info
3755 	return fill_team_info(team, info, size);
3756 }
3757 
3758 
3759 status_t
3760 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3761 {
3762 	int32 slot = *cookie;
3763 	if (slot < 1)
3764 		slot = 1;
3765 
3766 	InterruptsSpinLocker locker(sTeamHashLock);
3767 
3768 	team_id lastTeamID = peek_next_thread_id();
3769 		// TODO: This is broken, since the id can wrap around!
3770 
3771 	// get next valid team
3772 	Team* team = NULL;
3773 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3774 		slot++;
3775 
3776 	if (team == NULL)
3777 		return B_BAD_TEAM_ID;
3778 
3779 	// get a reference to the team and unlock
3780 	BReference<Team> teamReference(team);
3781 	locker.Unlock();
3782 
3783 	// fill in the info
3784 	*cookie = ++slot;
3785 	return fill_team_info(team, info, size);
3786 }
3787 
3788 
3789 status_t
3790 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3791 {
3792 	if (size != sizeof(team_usage_info))
3793 		return B_BAD_VALUE;
3794 
3795 	return common_get_team_usage_info(id, who, info, 0);
3796 }
3797 
3798 
3799 pid_t
3800 getpid(void)
3801 {
3802 	return thread_get_current_thread()->team->id;
3803 }
3804 
3805 
3806 pid_t
3807 getppid(void)
3808 {
3809 	Team* team = thread_get_current_thread()->team;
3810 
3811 	TeamLocker teamLocker(team);
3812 
3813 	return team->parent->id;
3814 }
3815 
3816 
3817 pid_t
3818 getpgid(pid_t id)
3819 {
3820 	if (id < 0) {
3821 		errno = EINVAL;
3822 		return -1;
3823 	}
3824 
3825 	if (id == 0) {
3826 		// get process group of the calling process
3827 		Team* team = thread_get_current_thread()->team;
3828 		TeamLocker teamLocker(team);
3829 		return team->group_id;
3830 	}
3831 
3832 	// get the team
3833 	Team* team = Team::GetAndLock(id);
3834 	if (team == NULL) {
3835 		errno = ESRCH;
3836 		return -1;
3837 	}
3838 
3839 	// get the team's process group ID
3840 	pid_t groupID = team->group_id;
3841 
3842 	team->UnlockAndReleaseReference();
3843 
3844 	return groupID;
3845 }
3846 
3847 
3848 pid_t
3849 getsid(pid_t id)
3850 {
3851 	if (id < 0) {
3852 		errno = EINVAL;
3853 		return -1;
3854 	}
3855 
3856 	if (id == 0) {
3857 		// get session of the calling process
3858 		Team* team = thread_get_current_thread()->team;
3859 		TeamLocker teamLocker(team);
3860 		return team->session_id;
3861 	}
3862 
3863 	// get the team
3864 	Team* team = Team::GetAndLock(id);
3865 	if (team == NULL) {
3866 		errno = ESRCH;
3867 		return -1;
3868 	}
3869 
3870 	// get the team's session ID
3871 	pid_t sessionID = team->session_id;
3872 
3873 	team->UnlockAndReleaseReference();
3874 
3875 	return sessionID;
3876 }
3877 
3878 
3879 //	#pragma mark - User syscalls
3880 
3881 
3882 status_t
3883 _user_exec(const char* userPath, const char* const* userFlatArgs,
3884 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3885 {
3886 	// NOTE: Since this function normally doesn't return, don't use automatic
3887 	// variables that need destruction in the function scope.
3888 	char path[B_PATH_NAME_LENGTH];
3889 
3890 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3891 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3892 		return B_BAD_ADDRESS;
3893 
3894 	// copy and relocate the flat arguments
3895 	char** flatArgs;
3896 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3897 		argCount, envCount, flatArgs);
3898 
3899 	if (error == B_OK) {
3900 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3901 			envCount, umask);
3902 			// this one only returns in case of error
3903 	}
3904 
3905 	free(flatArgs);
3906 	return error;
3907 }
3908 
3909 
3910 thread_id
3911 _user_fork(void)
3912 {
3913 	return fork_team();
3914 }
3915 
3916 
3917 pid_t
3918 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3919 {
3920 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3921 		return B_BAD_ADDRESS;
3922 
3923 	siginfo_t info;
3924 	pid_t foundChild = wait_for_child(child, flags, info);
3925 	if (foundChild < 0)
3926 		return syscall_restart_handle_post(foundChild);
3927 
3928 	// copy info back to userland
3929 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3930 		return B_BAD_ADDRESS;
3931 
3932 	return foundChild;
3933 }
3934 
3935 
3936 pid_t
3937 _user_process_info(pid_t process, int32 which)
3938 {
3939 	// we only allow to return the parent of the current process
3940 	if (which == PARENT_ID
3941 		&& process != 0 && process != thread_get_current_thread()->team->id)
3942 		return B_BAD_VALUE;
3943 
3944 	pid_t result;
3945 	switch (which) {
3946 		case SESSION_ID:
3947 			result = getsid(process);
3948 			break;
3949 		case GROUP_ID:
3950 			result = getpgid(process);
3951 			break;
3952 		case PARENT_ID:
3953 			result = getppid();
3954 			break;
3955 		default:
3956 			return B_BAD_VALUE;
3957 	}
3958 
3959 	return result >= 0 ? result : errno;
3960 }
3961 
3962 
3963 pid_t
3964 _user_setpgid(pid_t processID, pid_t groupID)
3965 {
3966 	// setpgid() can be called either by the parent of the target process or
3967 	// by the process itself to do one of two things:
3968 	// * Create a new process group with the target process' ID and the target
3969 	//   process as group leader.
3970 	// * Set the target process' process group to an already existing one in the
3971 	//   same session.
3972 
3973 	if (groupID < 0)
3974 		return B_BAD_VALUE;
3975 
3976 	Team* currentTeam = thread_get_current_thread()->team;
3977 	if (processID == 0)
3978 		processID = currentTeam->id;
3979 
3980 	// if the group ID is not specified, use the target process' ID
3981 	if (groupID == 0)
3982 		groupID = processID;
3983 
3984 	// We loop when running into the following race condition: We create a new
3985 	// process group, because there isn't one with that ID yet, but later when
3986 	// trying to publish it, we find that someone else created and published
3987 	// a group with that ID in the meantime. In that case we just restart the
3988 	// whole action.
3989 	while (true) {
3990 		// Look up the process group by ID. If it doesn't exist yet and we are
3991 		// allowed to create a new one, do that.
3992 		ProcessGroup* group = ProcessGroup::Get(groupID);
3993 		bool newGroup = false;
3994 		if (group == NULL) {
3995 			if (groupID != processID)
3996 				return B_NOT_ALLOWED;
3997 
3998 			group = new(std::nothrow) ProcessGroup(groupID);
3999 			if (group == NULL)
4000 				return B_NO_MEMORY;
4001 
4002 			newGroup = true;
4003 		}
4004 		BReference<ProcessGroup> groupReference(group, true);
4005 
4006 		// get the target team
4007 		Team* team = Team::Get(processID);
4008 		if (team == NULL)
4009 			return ESRCH;
4010 		BReference<Team> teamReference(team, true);
4011 
4012 		// lock the new process group and the team's current process group
4013 		while (true) {
4014 			// lock the team's current process group
4015 			team->LockProcessGroup();
4016 
4017 			ProcessGroup* oldGroup = team->group;
4018 			if (oldGroup == group) {
4019 				// it's the same as the target group, so just bail out
4020 				oldGroup->Unlock();
4021 				return group->id;
4022 			}
4023 
4024 			oldGroup->AcquireReference();
4025 
4026 			// lock the target process group, if locking order allows it
4027 			if (newGroup || group->id > oldGroup->id) {
4028 				group->Lock();
4029 				break;
4030 			}
4031 
4032 			// try to lock
4033 			if (group->TryLock())
4034 				break;
4035 
4036 			// no dice -- unlock the team's current process group and relock in
4037 			// the correct order
4038 			oldGroup->Unlock();
4039 
4040 			group->Lock();
4041 			oldGroup->Lock();
4042 
4043 			// check whether things are still the same
4044 			TeamLocker teamLocker(team);
4045 			if (team->group == oldGroup)
4046 				break;
4047 
4048 			// something changed -- unlock everything and retry
4049 			teamLocker.Unlock();
4050 			oldGroup->Unlock();
4051 			group->Unlock();
4052 			oldGroup->ReleaseReference();
4053 		}
4054 
4055 		// we now have references and locks of both new and old process group
4056 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4057 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4058 		AutoLocker<ProcessGroup> groupLocker(group, true);
4059 
4060 		// also lock the target team and its parent
4061 		team->LockTeamAndParent(false);
4062 		TeamLocker parentLocker(team->parent, true);
4063 		TeamLocker teamLocker(team, true);
4064 
4065 		// perform the checks
4066 		if (team == currentTeam) {
4067 			// we set our own group
4068 
4069 			// we must not change our process group ID if we're a session leader
4070 			if (is_session_leader(currentTeam))
4071 				return B_NOT_ALLOWED;
4072 		} else {
4073 			// Calling team != target team. The target team must be a child of
4074 			// the calling team and in the same session. (If that's the case it
4075 			// isn't a session leader either.)
4076 			if (team->parent != currentTeam
4077 				|| team->session_id != currentTeam->session_id) {
4078 				return B_NOT_ALLOWED;
4079 			}
4080 
4081 			// The call is also supposed to fail on a child, when the child has
4082 			// already executed exec*() [EACCES].
4083 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4084 				return EACCES;
4085 		}
4086 
4087 		// If we created a new process group, publish it now.
4088 		if (newGroup) {
4089 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4090 			if (sGroupHash.Lookup(groupID)) {
4091 				// A group with the group ID appeared since we first checked.
4092 				// Back to square one.
4093 				continue;
4094 			}
4095 
4096 			group->PublishLocked(team->group->Session());
4097 		} else if (group->Session()->id != team->session_id) {
4098 			// The existing target process group belongs to a different session.
4099 			// That's not allowed.
4100 			return B_NOT_ALLOWED;
4101 		}
4102 
4103 		// Everything is ready -- set the group.
4104 		remove_team_from_group(team);
4105 		insert_team_into_group(group, team);
4106 
4107 		// Changing the process group might have changed the situation for a
4108 		// parent waiting in wait_for_child(). Hence we notify it.
4109 		team->parent->dead_children.condition_variable.NotifyAll();
4110 
4111 		return group->id;
4112 	}
4113 }
4114 
4115 
4116 pid_t
4117 _user_setsid(void)
4118 {
4119 	Team* team = thread_get_current_thread()->team;
4120 
4121 	// create a new process group and session
4122 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4123 	if (group == NULL)
4124 		return B_NO_MEMORY;
4125 	BReference<ProcessGroup> groupReference(group, true);
4126 	AutoLocker<ProcessGroup> groupLocker(group);
4127 
4128 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4129 	if (session == NULL)
4130 		return B_NO_MEMORY;
4131 	BReference<ProcessSession> sessionReference(session, true);
4132 
4133 	// lock the team's current process group, parent, and the team itself
4134 	team->LockTeamParentAndProcessGroup();
4135 	BReference<ProcessGroup> oldGroupReference(team->group);
4136 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4137 	TeamLocker parentLocker(team->parent, true);
4138 	TeamLocker teamLocker(team, true);
4139 
4140 	// the team must not already be a process group leader
4141 	if (is_process_group_leader(team))
4142 		return B_NOT_ALLOWED;
4143 
4144 	// remove the team from the old and add it to the new process group
4145 	remove_team_from_group(team);
4146 	group->Publish(session);
4147 	insert_team_into_group(group, team);
4148 
4149 	// Changing the process group might have changed the situation for a
4150 	// parent waiting in wait_for_child(). Hence we notify it.
4151 	team->parent->dead_children.condition_variable.NotifyAll();
4152 
4153 	return group->id;
4154 }
4155 
4156 
4157 status_t
4158 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4159 {
4160 	status_t returnCode;
4161 	status_t status;
4162 
4163 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4164 		return B_BAD_ADDRESS;
4165 
4166 	status = wait_for_team(id, &returnCode);
4167 	if (status >= B_OK && _userReturnCode != NULL) {
4168 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4169 				!= B_OK)
4170 			return B_BAD_ADDRESS;
4171 		return B_OK;
4172 	}
4173 
4174 	return syscall_restart_handle_post(status);
4175 }
4176 
4177 
4178 thread_id
4179 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4180 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4181 	port_id errorPort, uint32 errorToken)
4182 {
4183 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4184 
4185 	if (argCount < 1)
4186 		return B_BAD_VALUE;
4187 
4188 	// copy and relocate the flat arguments
4189 	char** flatArgs;
4190 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4191 		argCount, envCount, flatArgs);
4192 	if (error != B_OK)
4193 		return error;
4194 
4195 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4196 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4197 		errorToken);
4198 
4199 	free(flatArgs);
4200 		// load_image_internal() unset our variable if it took over ownership
4201 
4202 	return thread;
4203 }
4204 
4205 
4206 void
4207 _user_exit_team(status_t returnValue)
4208 {
4209 	Thread* thread = thread_get_current_thread();
4210 	Team* team = thread->team;
4211 
4212 	// set this thread's exit status
4213 	thread->exit.status = returnValue;
4214 
4215 	// set the team exit status
4216 	TeamLocker teamLocker(team);
4217 
4218 	if (!team->exit.initialized) {
4219 		team->exit.reason = CLD_EXITED;
4220 		team->exit.signal = 0;
4221 		team->exit.signaling_user = 0;
4222 		team->exit.status = returnValue;
4223 		team->exit.initialized = true;
4224 	}
4225 
4226 	teamLocker.Unlock();
4227 
4228 	// Stop the thread, if the team is being debugged and that has been
4229 	// requested.
4230 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4231 		user_debug_stop_thread();
4232 
4233 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4234 	// userland. The signal handling code forwards the signal to the main
4235 	// thread (if that's not already this one), which will take the team down.
4236 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4237 	send_signal_to_thread(thread, signal, 0);
4238 }
4239 
4240 
4241 status_t
4242 _user_kill_team(team_id team)
4243 {
4244 	return kill_team(team);
4245 }
4246 
4247 
4248 status_t
4249 _user_get_team_info(team_id id, team_info* userInfo)
4250 {
4251 	status_t status;
4252 	team_info info;
4253 
4254 	if (!IS_USER_ADDRESS(userInfo))
4255 		return B_BAD_ADDRESS;
4256 
4257 	status = _get_team_info(id, &info, sizeof(team_info));
4258 	if (status == B_OK) {
4259 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4260 			return B_BAD_ADDRESS;
4261 	}
4262 
4263 	return status;
4264 }
4265 
4266 
4267 status_t
4268 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4269 {
4270 	status_t status;
4271 	team_info info;
4272 	int32 cookie;
4273 
4274 	if (!IS_USER_ADDRESS(userCookie)
4275 		|| !IS_USER_ADDRESS(userInfo)
4276 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4277 		return B_BAD_ADDRESS;
4278 
4279 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4280 	if (status != B_OK)
4281 		return status;
4282 
4283 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4284 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4285 		return B_BAD_ADDRESS;
4286 
4287 	return status;
4288 }
4289 
4290 
4291 team_id
4292 _user_get_current_team(void)
4293 {
4294 	return team_get_current_team_id();
4295 }
4296 
4297 
4298 status_t
4299 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4300 	size_t size)
4301 {
4302 	if (size != sizeof(team_usage_info))
4303 		return B_BAD_VALUE;
4304 
4305 	team_usage_info info;
4306 	status_t status = common_get_team_usage_info(team, who, &info,
4307 		B_CHECK_PERMISSION);
4308 
4309 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4310 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4311 		return B_BAD_ADDRESS;
4312 	}
4313 
4314 	return status;
4315 }
4316 
4317 
4318 status_t
4319 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4320 	size_t size, size_t* _sizeNeeded)
4321 {
4322 	// check parameters
4323 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4324 		|| (buffer == NULL && size > 0)
4325 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4326 		return B_BAD_ADDRESS;
4327 	}
4328 
4329 	KMessage info;
4330 
4331 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4332 		// allocate memory for a copy of the needed team data
4333 		struct ExtendedTeamData {
4334 			team_id	id;
4335 			pid_t	group_id;
4336 			pid_t	session_id;
4337 			uid_t	real_uid;
4338 			gid_t	real_gid;
4339 			uid_t	effective_uid;
4340 			gid_t	effective_gid;
4341 			char	name[B_OS_NAME_LENGTH];
4342 		};
4343 
4344 		ExtendedTeamData* teamClone
4345 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4346 			// It would be nicer to use new, but then we'd have to use
4347 			// ObjectDeleter and declare the structure outside of the function
4348 			// due to template parameter restrictions.
4349 		if (teamClone == NULL)
4350 			return B_NO_MEMORY;
4351 		MemoryDeleter teamCloneDeleter(teamClone);
4352 
4353 		io_context* ioContext;
4354 		{
4355 			// get the team structure
4356 			Team* team = Team::GetAndLock(teamID);
4357 			if (team == NULL)
4358 				return B_BAD_TEAM_ID;
4359 			BReference<Team> teamReference(team, true);
4360 			TeamLocker teamLocker(team, true);
4361 
4362 			// copy the data
4363 			teamClone->id = team->id;
4364 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4365 			teamClone->group_id = team->group_id;
4366 			teamClone->session_id = team->session_id;
4367 			teamClone->real_uid = team->real_uid;
4368 			teamClone->real_gid = team->real_gid;
4369 			teamClone->effective_uid = team->effective_uid;
4370 			teamClone->effective_gid = team->effective_gid;
4371 
4372 			// also fetch a reference to the I/O context
4373 			ioContext = team->io_context;
4374 			vfs_get_io_context(ioContext);
4375 		}
4376 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4377 			&vfs_put_io_context);
4378 
4379 		// add the basic data to the info message
4380 		if (info.AddInt32("id", teamClone->id) != B_OK
4381 			|| info.AddString("name", teamClone->name) != B_OK
4382 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4383 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4384 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4385 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4386 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4387 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4388 			return B_NO_MEMORY;
4389 		}
4390 
4391 		// get the current working directory from the I/O context
4392 		dev_t cwdDevice;
4393 		ino_t cwdDirectory;
4394 		{
4395 			MutexLocker ioContextLocker(ioContext->io_mutex);
4396 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4397 		}
4398 
4399 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4400 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4401 			return B_NO_MEMORY;
4402 		}
4403 	}
4404 
4405 	// TODO: Support the other flags!
4406 
4407 	// copy the needed size and, if it fits, the message back to userland
4408 	size_t sizeNeeded = info.ContentSize();
4409 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4410 		return B_BAD_ADDRESS;
4411 
4412 	if (sizeNeeded > size)
4413 		return B_BUFFER_OVERFLOW;
4414 
4415 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4416 		return B_BAD_ADDRESS;
4417 
4418 	return B_OK;
4419 }
4420