xref: /haiku/src/system/kernel/team.cpp (revision 991dadd6324f7b7a68e94743a39ebae789823228)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <sem.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
49 #include <syscalls.h>
50 #include <tls.h>
51 #include <tracing.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
55 #include <vfs.h>
56 #include <vm/vm.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
59 
60 #include "TeamThreadTables.h"
61 
62 
63 //#define TRACE_TEAM
64 #ifdef TRACE_TEAM
65 #	define TRACE(x) dprintf x
66 #else
67 #	define TRACE(x) ;
68 #endif
69 
70 
71 struct team_key {
72 	team_id id;
73 };
74 
75 struct team_arg {
76 	char	*path;
77 	char	**flat_args;
78 	size_t	flat_args_size;
79 	uint32	arg_count;
80 	uint32	env_count;
81 	mode_t	umask;
82 	uint32	flags;
83 	port_id	error_port;
84 	uint32	error_token;
85 };
86 
87 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
88 
89 
90 namespace {
91 
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, Team* team);
98 };
99 
100 
101 // #pragma mark - TeamTable
102 
103 
104 typedef BKernel::TeamThreadTable<Team> TeamTable;
105 
106 
107 // #pragma mark - ProcessGroupHashDefinition
108 
109 
110 struct ProcessGroupHashDefinition {
111 	typedef pid_t			KeyType;
112 	typedef	ProcessGroup	ValueType;
113 
114 	size_t HashKey(pid_t key) const
115 	{
116 		return key;
117 	}
118 
119 	size_t Hash(ProcessGroup* value) const
120 	{
121 		return HashKey(value->id);
122 	}
123 
124 	bool Compare(pid_t key, ProcessGroup* value) const
125 	{
126 		return value->id == key;
127 	}
128 
129 	ProcessGroup*& GetLink(ProcessGroup* value) const
130 	{
131 		return value->next;
132 	}
133 };
134 
135 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
136 
137 
138 }	// unnamed namespace
139 
140 
141 // #pragma mark -
142 
143 
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash;
146 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
147 
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash;
150 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
151 
152 static Team* sKernelTeam = NULL;
153 
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups;
157 static mutex sOrphanedCheckLock
158 	= MUTEX_INITIALIZER("orphaned process group check");
159 
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams = 2048;
163 static int32 sUsedTeams = 1;
164 
165 static TeamNotificationService sNotificationService;
166 
167 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
168 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
169 
170 
171 // #pragma mark - TeamListIterator
172 
173 
174 TeamListIterator::TeamListIterator()
175 {
176 	// queue the entry
177 	InterruptsSpinLocker locker(sTeamHashLock);
178 	sTeamHash.InsertIteratorEntry(&fEntry);
179 }
180 
181 
182 TeamListIterator::~TeamListIterator()
183 {
184 	// remove the entry
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	sTeamHash.RemoveIteratorEntry(&fEntry);
187 }
188 
189 
190 Team*
191 TeamListIterator::Next()
192 {
193 	// get the next team -- if there is one, get reference for it
194 	InterruptsSpinLocker locker(sTeamHashLock);
195 	Team* team = sTeamHash.NextElement(&fEntry);
196 	if (team != NULL)
197 		team->AcquireReference();
198 
199 	return team;
200 }
201 
202 
203 // #pragma mark - Tracing
204 
205 
206 #if TEAM_TRACING
207 namespace TeamTracing {
208 
209 class TeamForked : public AbstractTraceEntry {
210 public:
211 	TeamForked(thread_id forkedThread)
212 		:
213 		fForkedThread(forkedThread)
214 	{
215 		Initialized();
216 	}
217 
218 	virtual void AddDump(TraceOutput& out)
219 	{
220 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
221 	}
222 
223 private:
224 	thread_id			fForkedThread;
225 };
226 
227 
228 class ExecTeam : public AbstractTraceEntry {
229 public:
230 	ExecTeam(const char* path, int32 argCount, const char* const* args,
231 			int32 envCount, const char* const* env)
232 		:
233 		fArgCount(argCount),
234 		fArgs(NULL)
235 	{
236 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
237 			false);
238 
239 		// determine the buffer size we need for the args
240 		size_t argBufferSize = 0;
241 		for (int32 i = 0; i < argCount; i++)
242 			argBufferSize += strlen(args[i]) + 1;
243 
244 		// allocate a buffer
245 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
246 		if (fArgs) {
247 			char* buffer = fArgs;
248 			for (int32 i = 0; i < argCount; i++) {
249 				size_t argSize = strlen(args[i]) + 1;
250 				memcpy(buffer, args[i], argSize);
251 				buffer += argSize;
252 			}
253 		}
254 
255 		// ignore env for the time being
256 		(void)envCount;
257 		(void)env;
258 
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		out.Print("team exec, \"%p\", args:", fPath);
265 
266 		if (fArgs != NULL) {
267 			char* args = fArgs;
268 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
269 				out.Print(" \"%s\"", args);
270 				args += strlen(args) + 1;
271 			}
272 		} else
273 			out.Print(" <too long>");
274 	}
275 
276 private:
277 	char*	fPath;
278 	int32	fArgCount;
279 	char*	fArgs;
280 };
281 
282 
283 static const char*
284 job_control_state_name(job_control_state state)
285 {
286 	switch (state) {
287 		case JOB_CONTROL_STATE_NONE:
288 			return "none";
289 		case JOB_CONTROL_STATE_STOPPED:
290 			return "stopped";
291 		case JOB_CONTROL_STATE_CONTINUED:
292 			return "continued";
293 		case JOB_CONTROL_STATE_DEAD:
294 			return "dead";
295 		default:
296 			return "invalid";
297 	}
298 }
299 
300 
301 class SetJobControlState : public AbstractTraceEntry {
302 public:
303 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
304 		:
305 		fTeam(team),
306 		fNewState(newState),
307 		fSignal(signal != NULL ? signal->Number() : 0)
308 	{
309 		Initialized();
310 	}
311 
312 	virtual void AddDump(TraceOutput& out)
313 	{
314 		out.Print("team set job control state, team %" B_PRId32 ", "
315 			"new state: %s, signal: %d",
316 			fTeam, job_control_state_name(fNewState), fSignal);
317 	}
318 
319 private:
320 	team_id				fTeam;
321 	job_control_state	fNewState;
322 	int					fSignal;
323 };
324 
325 
326 class WaitForChild : public AbstractTraceEntry {
327 public:
328 	WaitForChild(pid_t child, uint32 flags)
329 		:
330 		fChild(child),
331 		fFlags(flags)
332 	{
333 		Initialized();
334 	}
335 
336 	virtual void AddDump(TraceOutput& out)
337 	{
338 		out.Print("team wait for child, child: %" B_PRId32 ", "
339 			"flags: %#" B_PRIx32, fChild, fFlags);
340 	}
341 
342 private:
343 	pid_t	fChild;
344 	uint32	fFlags;
345 };
346 
347 
348 class WaitForChildDone : public AbstractTraceEntry {
349 public:
350 	WaitForChildDone(const job_control_entry& entry)
351 		:
352 		fState(entry.state),
353 		fTeam(entry.thread),
354 		fStatus(entry.status),
355 		fReason(entry.reason),
356 		fSignal(entry.signal)
357 	{
358 		Initialized();
359 	}
360 
361 	WaitForChildDone(status_t error)
362 		:
363 		fTeam(error)
364 	{
365 		Initialized();
366 	}
367 
368 	virtual void AddDump(TraceOutput& out)
369 	{
370 		if (fTeam >= 0) {
371 			out.Print("team wait for child done, team: %" B_PRId32 ", "
372 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
373 				fTeam, job_control_state_name(fState), fStatus, fReason,
374 				fSignal);
375 		} else {
376 			out.Print("team wait for child failed, error: "
377 				"%#" B_PRIx32 ", ", fTeam);
378 		}
379 	}
380 
381 private:
382 	job_control_state	fState;
383 	team_id				fTeam;
384 	status_t			fStatus;
385 	uint16				fReason;
386 	uint16				fSignal;
387 };
388 
389 }	// namespace TeamTracing
390 
391 #	define T(x) new(std::nothrow) TeamTracing::x;
392 #else
393 #	define T(x) ;
394 #endif
395 
396 
397 //	#pragma mark - TeamNotificationService
398 
399 
400 TeamNotificationService::TeamNotificationService()
401 	: DefaultNotificationService("teams")
402 {
403 }
404 
405 
406 void
407 TeamNotificationService::Notify(uint32 eventCode, Team* team)
408 {
409 	char eventBuffer[128];
410 	KMessage event;
411 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
412 	event.AddInt32("event", eventCode);
413 	event.AddInt32("team", team->id);
414 	event.AddPointer("teamStruct", team);
415 
416 	DefaultNotificationService::Notify(event, eventCode);
417 }
418 
419 
420 //	#pragma mark - Team
421 
422 
423 Team::Team(team_id id, bool kernel)
424 {
425 	// allocate an ID
426 	this->id = id;
427 	visible = true;
428 	serial_number = -1;
429 
430 	// init mutex
431 	if (kernel) {
432 		mutex_init(&fLock, "Team:kernel");
433 	} else {
434 		char lockName[16];
435 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
436 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
437 	}
438 
439 	hash_next = siblings_next = children = parent = NULL;
440 	fName[0] = '\0';
441 	fArgs[0] = '\0';
442 	num_threads = 0;
443 	io_context = NULL;
444 	address_space = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	thread_list = NULL;
448 	main_thread = NULL;
449 	loading_info = NULL;
450 	state = TEAM_STATE_BIRTH;
451 	flags = 0;
452 	death_entry = NULL;
453 	user_data_area = -1;
454 	user_data = 0;
455 	used_user_data = 0;
456 	user_data_size = 0;
457 	free_user_threads = NULL;
458 
459 	commpage_address = NULL;
460 
461 	supplementary_groups = NULL;
462 	supplementary_group_count = 0;
463 
464 	dead_threads_kernel_time = 0;
465 	dead_threads_user_time = 0;
466 	cpu_clock_offset = 0;
467 
468 	// dead threads
469 	list_init(&dead_threads);
470 	dead_threads_count = 0;
471 
472 	// dead children
473 	dead_children.count = 0;
474 	dead_children.kernel_time = 0;
475 	dead_children.user_time = 0;
476 
477 	// job control entry
478 	job_control_entry = new(nothrow) ::job_control_entry;
479 	if (job_control_entry != NULL) {
480 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
481 		job_control_entry->thread = id;
482 		job_control_entry->team = this;
483 	}
484 
485 	// exit status -- setting initialized to false suffices
486 	exit.initialized = false;
487 
488 	list_init(&sem_list);
489 	list_init_etc(&port_list, port_team_link_offset());
490 	list_init(&image_list);
491 	list_init(&watcher_list);
492 
493 	clear_team_debug_info(&debug_info, true);
494 
495 	// init dead/stopped/continued children condition vars
496 	dead_children.condition_variable.Init(&dead_children, "team children");
497 
498 	B_INITIALIZE_SPINLOCK(&time_lock);
499 	B_INITIALIZE_SPINLOCK(&signal_lock);
500 
501 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
502 		kernel ? -1 : MAX_QUEUED_SIGNALS);
503 	memset(fSignalActions, 0, sizeof(fSignalActions));
504 
505 	fUserDefinedTimerCount = 0;
506 }
507 
508 
509 Team::~Team()
510 {
511 	// get rid of all associated data
512 	PrepareForDeletion();
513 
514 	if (io_context != NULL)
515 		vfs_put_io_context(io_context);
516 	delete_owned_ports(this);
517 	sem_delete_owned_sems(this);
518 
519 	DeleteUserTimers(false);
520 
521 	fPendingSignals.Clear();
522 
523 	if (fQueuedSignalsCounter != NULL)
524 		fQueuedSignalsCounter->ReleaseReference();
525 
526 	while (thread_death_entry* threadDeathEntry
527 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
528 		free(threadDeathEntry);
529 	}
530 
531 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
532 		delete entry;
533 
534 	while (free_user_thread* entry = free_user_threads) {
535 		free_user_threads = entry->next;
536 		free(entry);
537 	}
538 
539 	malloc_referenced_release(supplementary_groups);
540 
541 	delete job_control_entry;
542 		// usually already NULL and transferred to the parent
543 
544 	mutex_destroy(&fLock);
545 }
546 
547 
548 /*static*/ Team*
549 Team::Create(team_id id, const char* name, bool kernel)
550 {
551 	// create the team object
552 	Team* team = new(std::nothrow) Team(id, kernel);
553 	if (team == NULL)
554 		return NULL;
555 	ObjectDeleter<Team> teamDeleter(team);
556 
557 	if (name != NULL)
558 		team->SetName(name);
559 
560 	// check initialization
561 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
562 		return NULL;
563 
564 	// finish initialization (arch specifics)
565 	if (arch_team_init_team_struct(team, kernel) != B_OK)
566 		return NULL;
567 
568 	if (!kernel) {
569 		status_t error = user_timer_create_team_timers(team);
570 		if (error != B_OK)
571 			return NULL;
572 	}
573 
574 	// everything went fine
575 	return teamDeleter.Detach();
576 }
577 
578 
579 /*!	\brief Returns the team with the given ID.
580 	Returns a reference to the team.
581 	Team and thread spinlock must not be held.
582 */
583 /*static*/ Team*
584 Team::Get(team_id id)
585 {
586 	if (id == B_CURRENT_TEAM) {
587 		Team* team = thread_get_current_thread()->team;
588 		team->AcquireReference();
589 		return team;
590 	}
591 
592 	InterruptsSpinLocker locker(sTeamHashLock);
593 	Team* team = sTeamHash.Lookup(id);
594 	if (team != NULL)
595 		team->AcquireReference();
596 	return team;
597 }
598 
599 
600 /*!	\brief Returns the team with the given ID in a locked state.
601 	Returns a reference to the team.
602 	Team and thread spinlock must not be held.
603 */
604 /*static*/ Team*
605 Team::GetAndLock(team_id id)
606 {
607 	// get the team
608 	Team* team = Get(id);
609 	if (team == NULL)
610 		return NULL;
611 
612 	// lock it
613 	team->Lock();
614 
615 	// only return the team, when it isn't already dying
616 	if (team->state >= TEAM_STATE_SHUTDOWN) {
617 		team->Unlock();
618 		team->ReleaseReference();
619 		return NULL;
620 	}
621 
622 	return team;
623 }
624 
625 
626 /*!	Locks the team and its parent team (if any).
627 	The caller must hold a reference to the team or otherwise make sure that
628 	it won't be deleted.
629 	If the team doesn't have a parent, only the team itself is locked. If the
630 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
631 	only the team itself is locked.
632 
633 	\param dontLockParentIfKernel If \c true, the team's parent team is only
634 		locked, if it is not the kernel team.
635 */
636 void
637 Team::LockTeamAndParent(bool dontLockParentIfKernel)
638 {
639 	// The locking order is parent -> child. Since the parent can change as long
640 	// as we don't lock the team, we need to do a trial and error loop.
641 	Lock();
642 
643 	while (true) {
644 		// If the team doesn't have a parent, we're done. Otherwise try to lock
645 		// the parent.This will succeed in most cases, simplifying things.
646 		Team* parent = this->parent;
647 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
648 			|| parent->TryLock()) {
649 			return;
650 		}
651 
652 		// get a temporary reference to the parent, unlock this team, lock the
653 		// parent, and re-lock this team
654 		BReference<Team> parentReference(parent);
655 
656 		Unlock();
657 		parent->Lock();
658 		Lock();
659 
660 		// If the parent hasn't changed in the meantime, we're done.
661 		if (this->parent == parent)
662 			return;
663 
664 		// The parent has changed -- unlock and retry.
665 		parent->Unlock();
666 	}
667 }
668 
669 
670 /*!	Unlocks the team and its parent team (if any).
671 */
672 void
673 Team::UnlockTeamAndParent()
674 {
675 	if (parent != NULL)
676 		parent->Unlock();
677 
678 	Unlock();
679 }
680 
681 
682 /*!	Locks the team, its parent team (if any), and the team's process group.
683 	The caller must hold a reference to the team or otherwise make sure that
684 	it won't be deleted.
685 	If the team doesn't have a parent, only the team itself is locked.
686 */
687 void
688 Team::LockTeamParentAndProcessGroup()
689 {
690 	LockTeamAndProcessGroup();
691 
692 	// We hold the group's and the team's lock, but not the parent team's lock.
693 	// If we have a parent, try to lock it.
694 	if (this->parent == NULL || this->parent->TryLock())
695 		return;
696 
697 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
698 	// the job.
699 	Unlock();
700 	LockTeamAndParent(false);
701 }
702 
703 
704 /*!	Unlocks the team, its parent team (if any), and the team's process group.
705 */
706 void
707 Team::UnlockTeamParentAndProcessGroup()
708 {
709 	group->Unlock();
710 
711 	if (parent != NULL)
712 		parent->Unlock();
713 
714 	Unlock();
715 }
716 
717 
718 void
719 Team::LockTeamAndProcessGroup()
720 {
721 	// The locking order is process group -> child. Since the process group can
722 	// change as long as we don't lock the team, we need to do a trial and error
723 	// loop.
724 	Lock();
725 
726 	while (true) {
727 		// Try to lock the group. This will succeed in most cases, simplifying
728 		// things.
729 		ProcessGroup* group = this->group;
730 		if (group->TryLock())
731 			return;
732 
733 		// get a temporary reference to the group, unlock this team, lock the
734 		// group, and re-lock this team
735 		BReference<ProcessGroup> groupReference(group);
736 
737 		Unlock();
738 		group->Lock();
739 		Lock();
740 
741 		// If the group hasn't changed in the meantime, we're done.
742 		if (this->group == group)
743 			return;
744 
745 		// The group has changed -- unlock and retry.
746 		group->Unlock();
747 	}
748 }
749 
750 
751 void
752 Team::UnlockTeamAndProcessGroup()
753 {
754 	group->Unlock();
755 	Unlock();
756 }
757 
758 
759 void
760 Team::SetName(const char* name)
761 {
762 	if (const char* lastSlash = strrchr(name, '/'))
763 		name = lastSlash + 1;
764 
765 	strlcpy(fName, name, B_OS_NAME_LENGTH);
766 }
767 
768 
769 void
770 Team::SetArgs(const char* args)
771 {
772 	strlcpy(fArgs, args, sizeof(fArgs));
773 }
774 
775 
776 void
777 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
778 {
779 	fArgs[0] = '\0';
780 	strlcpy(fArgs, path, sizeof(fArgs));
781 	for (int i = 0; i < otherArgCount; i++) {
782 		strlcat(fArgs, " ", sizeof(fArgs));
783 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
784 	}
785 }
786 
787 
788 void
789 Team::ResetSignalsOnExec()
790 {
791 	// We are supposed to keep pending signals. Signal actions shall be reset
792 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
793 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
794 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
795 	// flags, but since there aren't any handlers, they make little sense, so
796 	// we clear them.
797 
798 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
799 		struct sigaction& action = SignalActionFor(i);
800 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
801 			action.sa_handler = SIG_DFL;
802 
803 		action.sa_mask = 0;
804 		action.sa_flags = 0;
805 		action.sa_userdata = NULL;
806 	}
807 }
808 
809 
810 void
811 Team::InheritSignalActions(Team* parent)
812 {
813 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
814 }
815 
816 
817 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
818 	ID.
819 
820 	The caller must hold the team's lock.
821 
822 	\param timer The timer to be added. If it doesn't have an ID yet, it is
823 		considered user-defined and will be assigned an ID.
824 	\return \c B_OK, if the timer was added successfully, another error code
825 		otherwise.
826 */
827 status_t
828 Team::AddUserTimer(UserTimer* timer)
829 {
830 	// don't allow addition of timers when already shutting the team down
831 	if (state >= TEAM_STATE_SHUTDOWN)
832 		return B_BAD_TEAM_ID;
833 
834 	// If the timer is user-defined, check timer limit and increment
835 	// user-defined count.
836 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
837 		return EAGAIN;
838 
839 	fUserTimers.AddTimer(timer);
840 
841 	return B_OK;
842 }
843 
844 
845 /*!	Removes the given user timer from the team.
846 
847 	The caller must hold the team's lock.
848 
849 	\param timer The timer to be removed.
850 
851 */
852 void
853 Team::RemoveUserTimer(UserTimer* timer)
854 {
855 	fUserTimers.RemoveTimer(timer);
856 
857 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
858 		UserDefinedTimersRemoved(1);
859 }
860 
861 
862 /*!	Deletes all (or all user-defined) user timers of the team.
863 
864 	Timer's belonging to the team's threads are not affected.
865 	The caller must hold the team's lock.
866 
867 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
868 		otherwise all timers are deleted.
869 */
870 void
871 Team::DeleteUserTimers(bool userDefinedOnly)
872 {
873 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
874 	UserDefinedTimersRemoved(count);
875 }
876 
877 
878 /*!	If not at the limit yet, increments the team's user-defined timer count.
879 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
880 */
881 bool
882 Team::CheckAddUserDefinedTimer()
883 {
884 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
885 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
886 		atomic_add(&fUserDefinedTimerCount, -1);
887 		return false;
888 	}
889 
890 	return true;
891 }
892 
893 
894 /*!	Subtracts the given count for the team's user-defined timer count.
895 	\param count The count to subtract.
896 */
897 void
898 Team::UserDefinedTimersRemoved(int32 count)
899 {
900 	atomic_add(&fUserDefinedTimerCount, -count);
901 }
902 
903 
904 void
905 Team::DeactivateCPUTimeUserTimers()
906 {
907 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
908 		timer->Deactivate();
909 
910 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
911 		timer->Deactivate();
912 }
913 
914 
915 /*!	Returns the team's current total CPU time (kernel + user + offset).
916 
917 	The caller must hold \c time_lock.
918 
919 	\param ignoreCurrentRun If \c true and the current thread is one team's
920 		threads, don't add the time since the last time \c last_time was
921 		updated. Should be used in "thread unscheduled" scheduler callbacks,
922 		since although the thread is still running at that time, its time has
923 		already been stopped.
924 	\return The team's current total CPU time.
925 */
926 bigtime_t
927 Team::CPUTime(bool ignoreCurrentRun) const
928 {
929 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
930 		+ dead_threads_user_time;
931 
932 	Thread* currentThread = thread_get_current_thread();
933 	bigtime_t now = system_time();
934 
935 	for (Thread* thread = thread_list; thread != NULL;
936 			thread = thread->team_next) {
937 		SpinLocker threadTimeLocker(thread->time_lock);
938 		time += thread->kernel_time + thread->user_time;
939 
940 		if (thread->last_time != 0) {
941 			if (!ignoreCurrentRun || thread != currentThread)
942 				time += now - thread->last_time;
943 		}
944 	}
945 
946 	return time;
947 }
948 
949 
950 /*!	Returns the team's current user CPU time.
951 
952 	The caller must hold \c time_lock.
953 
954 	\return The team's current user CPU time.
955 */
956 bigtime_t
957 Team::UserCPUTime() const
958 {
959 	bigtime_t time = dead_threads_user_time;
960 
961 	bigtime_t now = system_time();
962 
963 	for (Thread* thread = thread_list; thread != NULL;
964 			thread = thread->team_next) {
965 		SpinLocker threadTimeLocker(thread->time_lock);
966 		time += thread->user_time;
967 
968 		if (thread->last_time != 0 && !thread->in_kernel)
969 			time += now - thread->last_time;
970 	}
971 
972 	return time;
973 }
974 
975 
976 //	#pragma mark - ProcessGroup
977 
978 
979 ProcessGroup::ProcessGroup(pid_t id)
980 	:
981 	id(id),
982 	teams(NULL),
983 	fSession(NULL),
984 	fInOrphanedCheckList(false)
985 {
986 	char lockName[32];
987 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
988 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
989 }
990 
991 
992 ProcessGroup::~ProcessGroup()
993 {
994 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
995 
996 	// If the group is in the orphaned check list, remove it.
997 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
998 
999 	if (fInOrphanedCheckList)
1000 		sOrphanedCheckProcessGroups.Remove(this);
1001 
1002 	orphanedCheckLocker.Unlock();
1003 
1004 	// remove group from the hash table and from the session
1005 	if (fSession != NULL) {
1006 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1007 		sGroupHash.RemoveUnchecked(this);
1008 		groupHashLocker.Unlock();
1009 
1010 		fSession->ReleaseReference();
1011 	}
1012 
1013 	mutex_destroy(&fLock);
1014 }
1015 
1016 
1017 /*static*/ ProcessGroup*
1018 ProcessGroup::Get(pid_t id)
1019 {
1020 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1021 	ProcessGroup* group = sGroupHash.Lookup(id);
1022 	if (group != NULL)
1023 		group->AcquireReference();
1024 	return group;
1025 }
1026 
1027 
1028 /*!	Adds the group the given session and makes it publicly accessible.
1029 	The caller must not hold the process group hash lock.
1030 */
1031 void
1032 ProcessGroup::Publish(ProcessSession* session)
1033 {
1034 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1035 	PublishLocked(session);
1036 }
1037 
1038 
1039 /*!	Adds the group to the given session and makes it publicly accessible.
1040 	The caller must hold the process group hash lock.
1041 */
1042 void
1043 ProcessGroup::PublishLocked(ProcessSession* session)
1044 {
1045 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1046 
1047 	fSession = session;
1048 	fSession->AcquireReference();
1049 
1050 	sGroupHash.InsertUnchecked(this);
1051 }
1052 
1053 
1054 /*!	Checks whether the process group is orphaned.
1055 	The caller must hold the group's lock.
1056 	\return \c true, if the group is orphaned, \c false otherwise.
1057 */
1058 bool
1059 ProcessGroup::IsOrphaned() const
1060 {
1061 	// Orphaned Process Group: "A process group in which the parent of every
1062 	// member is either itself a member of the group or is not a member of the
1063 	// group's session." (Open Group Base Specs Issue 7)
1064 	bool orphaned = true;
1065 
1066 	Team* team = teams;
1067 	while (orphaned && team != NULL) {
1068 		team->LockTeamAndParent(false);
1069 
1070 		Team* parent = team->parent;
1071 		if (parent != NULL && parent->group_id != id
1072 			&& parent->session_id == fSession->id) {
1073 			orphaned = false;
1074 		}
1075 
1076 		team->UnlockTeamAndParent();
1077 
1078 		team = team->group_next;
1079 	}
1080 
1081 	return orphaned;
1082 }
1083 
1084 
1085 void
1086 ProcessGroup::ScheduleOrphanedCheck()
1087 {
1088 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1089 
1090 	if (!fInOrphanedCheckList) {
1091 		sOrphanedCheckProcessGroups.Add(this);
1092 		fInOrphanedCheckList = true;
1093 	}
1094 }
1095 
1096 
1097 void
1098 ProcessGroup::UnsetOrphanedCheck()
1099 {
1100 	fInOrphanedCheckList = false;
1101 }
1102 
1103 
1104 //	#pragma mark - ProcessSession
1105 
1106 
1107 ProcessSession::ProcessSession(pid_t id)
1108 	:
1109 	id(id),
1110 	controlling_tty(-1),
1111 	foreground_group(-1)
1112 {
1113 	char lockName[32];
1114 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1115 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1116 }
1117 
1118 
1119 ProcessSession::~ProcessSession()
1120 {
1121 	mutex_destroy(&fLock);
1122 }
1123 
1124 
1125 //	#pragma mark - KDL functions
1126 
1127 
1128 static void
1129 _dump_team_info(Team* team)
1130 {
1131 	kprintf("TEAM: %p\n", team);
1132 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1133 		team->id);
1134 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1135 	kprintf("name:             '%s'\n", team->Name());
1136 	kprintf("args:             '%s'\n", team->Args());
1137 	kprintf("hash_next:        %p\n", team->hash_next);
1138 	kprintf("parent:           %p", team->parent);
1139 	if (team->parent != NULL) {
1140 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1141 	} else
1142 		kprintf("\n");
1143 
1144 	kprintf("children:         %p\n", team->children);
1145 	kprintf("num_threads:      %d\n", team->num_threads);
1146 	kprintf("state:            %d\n", team->state);
1147 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1148 	kprintf("io_context:       %p\n", team->io_context);
1149 	if (team->address_space)
1150 		kprintf("address_space:    %p\n", team->address_space);
1151 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1152 		(void*)team->user_data, team->user_data_area);
1153 	kprintf("free user thread: %p\n", team->free_user_threads);
1154 	kprintf("main_thread:      %p\n", team->main_thread);
1155 	kprintf("thread_list:      %p\n", team->thread_list);
1156 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1157 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1158 }
1159 
1160 
1161 static int
1162 dump_team_info(int argc, char** argv)
1163 {
1164 	ulong arg;
1165 	bool found = false;
1166 
1167 	if (argc < 2) {
1168 		Thread* thread = thread_get_current_thread();
1169 		if (thread != NULL && thread->team != NULL)
1170 			_dump_team_info(thread->team);
1171 		else
1172 			kprintf("No current team!\n");
1173 		return 0;
1174 	}
1175 
1176 	arg = strtoul(argv[1], NULL, 0);
1177 	if (IS_KERNEL_ADDRESS(arg)) {
1178 		// semi-hack
1179 		_dump_team_info((Team*)arg);
1180 		return 0;
1181 	}
1182 
1183 	// walk through the thread list, trying to match name or id
1184 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1185 		Team* team = it.Next();) {
1186 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1187 			|| team->id == (team_id)arg) {
1188 			_dump_team_info(team);
1189 			found = true;
1190 			break;
1191 		}
1192 	}
1193 
1194 	if (!found)
1195 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1196 	return 0;
1197 }
1198 
1199 
1200 static int
1201 dump_teams(int argc, char** argv)
1202 {
1203 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1204 		B_PRINTF_POINTER_WIDTH, "parent");
1205 
1206 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1207 		Team* team = it.Next();) {
1208 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 
1215 //	#pragma mark - Private functions
1216 
1217 
1218 /*!	Inserts team \a team into the child list of team \a parent.
1219 
1220 	The caller must hold the lock of both \a parent and \a team.
1221 
1222 	\param parent The parent team.
1223 	\param team The team to be inserted into \a parent's child list.
1224 */
1225 static void
1226 insert_team_into_parent(Team* parent, Team* team)
1227 {
1228 	ASSERT(parent != NULL);
1229 
1230 	team->siblings_next = parent->children;
1231 	parent->children = team;
1232 	team->parent = parent;
1233 }
1234 
1235 
1236 /*!	Removes team \a team from the child list of team \a parent.
1237 
1238 	The caller must hold the lock of both \a parent and \a team.
1239 
1240 	\param parent The parent team.
1241 	\param team The team to be removed from \a parent's child list.
1242 */
1243 static void
1244 remove_team_from_parent(Team* parent, Team* team)
1245 {
1246 	Team* child;
1247 	Team* last = NULL;
1248 
1249 	for (child = parent->children; child != NULL;
1250 			child = child->siblings_next) {
1251 		if (child == team) {
1252 			if (last == NULL)
1253 				parent->children = child->siblings_next;
1254 			else
1255 				last->siblings_next = child->siblings_next;
1256 
1257 			team->parent = NULL;
1258 			break;
1259 		}
1260 		last = child;
1261 	}
1262 }
1263 
1264 
1265 /*!	Returns whether the given team is a session leader.
1266 	The caller must hold the team's lock or its process group's lock.
1267 */
1268 static bool
1269 is_session_leader(Team* team)
1270 {
1271 	return team->session_id == team->id;
1272 }
1273 
1274 
1275 /*!	Returns whether the given team is a process group leader.
1276 	The caller must hold the team's lock or its process group's lock.
1277 */
1278 static bool
1279 is_process_group_leader(Team* team)
1280 {
1281 	return team->group_id == team->id;
1282 }
1283 
1284 
1285 /*!	Inserts the given team into the given process group.
1286 	The caller must hold the process group's lock, the team's lock, and the
1287 	team's parent's lock.
1288 */
1289 static void
1290 insert_team_into_group(ProcessGroup* group, Team* team)
1291 {
1292 	team->group = group;
1293 	team->group_id = group->id;
1294 	team->session_id = group->Session()->id;
1295 
1296 	team->group_next = group->teams;
1297 	group->teams = team;
1298 	group->AcquireReference();
1299 }
1300 
1301 
1302 /*!	Removes the given team from its process group.
1303 
1304 	The caller must hold the process group's lock, the team's lock, and the
1305 	team's parent's lock. Interrupts must be enabled.
1306 
1307 	\param team The team that'll be removed from its process group.
1308 */
1309 static void
1310 remove_team_from_group(Team* team)
1311 {
1312 	ProcessGroup* group = team->group;
1313 	Team* current;
1314 	Team* last = NULL;
1315 
1316 	// the team must be in a process group to let this function have any effect
1317 	if  (group == NULL)
1318 		return;
1319 
1320 	for (current = group->teams; current != NULL;
1321 			current = current->group_next) {
1322 		if (current == team) {
1323 			if (last == NULL)
1324 				group->teams = current->group_next;
1325 			else
1326 				last->group_next = current->group_next;
1327 
1328 			team->group = NULL;
1329 			break;
1330 		}
1331 		last = current;
1332 	}
1333 
1334 	team->group = NULL;
1335 	team->group_next = NULL;
1336 
1337 	group->ReleaseReference();
1338 }
1339 
1340 
1341 static status_t
1342 create_team_user_data(Team* team, void* exactAddress = NULL)
1343 {
1344 	void* address;
1345 	uint32 addressSpec;
1346 
1347 	if (exactAddress != NULL) {
1348 		address = exactAddress;
1349 		addressSpec = B_EXACT_ADDRESS;
1350 	} else {
1351 		address = (void*)KERNEL_USER_DATA_BASE;
1352 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1353 	}
1354 
1355 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1356 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1357 
1358 	virtual_address_restrictions virtualRestrictions = {};
1359 	if (result == B_OK || exactAddress != NULL) {
1360 		if (exactAddress != NULL)
1361 			virtualRestrictions.address = exactAddress;
1362 		else
1363 			virtualRestrictions.address = address;
1364 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1365 	} else {
1366 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1367 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1368 	}
1369 
1370 	physical_address_restrictions physicalRestrictions = {};
1371 	team->user_data_area = create_area_etc(team->id, "user area",
1372 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1373 		&virtualRestrictions, &physicalRestrictions, &address);
1374 	if (team->user_data_area < 0)
1375 		return team->user_data_area;
1376 
1377 	team->user_data = (addr_t)address;
1378 	team->used_user_data = 0;
1379 	team->user_data_size = kTeamUserDataInitialSize;
1380 	team->free_user_threads = NULL;
1381 
1382 	return B_OK;
1383 }
1384 
1385 
1386 static void
1387 delete_team_user_data(Team* team)
1388 {
1389 	if (team->user_data_area >= 0) {
1390 		vm_delete_area(team->id, team->user_data_area, true);
1391 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1392 			kTeamUserDataReservedSize);
1393 
1394 		team->user_data = 0;
1395 		team->used_user_data = 0;
1396 		team->user_data_size = 0;
1397 		team->user_data_area = -1;
1398 		while (free_user_thread* entry = team->free_user_threads) {
1399 			team->free_user_threads = entry->next;
1400 			free(entry);
1401 		}
1402 	}
1403 }
1404 
1405 
1406 static status_t
1407 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1408 	int32 argCount, int32 envCount, char**& _flatArgs)
1409 {
1410 	if (argCount < 0 || envCount < 0)
1411 		return B_BAD_VALUE;
1412 
1413 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1414 		return B_TOO_MANY_ARGS;
1415 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1416 		return B_BAD_VALUE;
1417 
1418 	if (!IS_USER_ADDRESS(userFlatArgs))
1419 		return B_BAD_ADDRESS;
1420 
1421 	// allocate kernel memory
1422 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1423 	if (flatArgs == NULL)
1424 		return B_NO_MEMORY;
1425 
1426 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1427 		free(flatArgs);
1428 		return B_BAD_ADDRESS;
1429 	}
1430 
1431 	// check and relocate the array
1432 	status_t error = B_OK;
1433 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1434 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1435 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1436 		if (i == argCount || i == argCount + envCount + 1) {
1437 			// check array null termination
1438 			if (flatArgs[i] != NULL) {
1439 				error = B_BAD_VALUE;
1440 				break;
1441 			}
1442 		} else {
1443 			// check string
1444 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1445 			size_t maxLen = stringEnd - arg;
1446 			if (arg < stringBase || arg >= stringEnd
1447 					|| strnlen(arg, maxLen) == maxLen) {
1448 				error = B_BAD_VALUE;
1449 				break;
1450 			}
1451 
1452 			flatArgs[i] = arg;
1453 		}
1454 	}
1455 
1456 	if (error == B_OK)
1457 		_flatArgs = flatArgs;
1458 	else
1459 		free(flatArgs);
1460 
1461 	return error;
1462 }
1463 
1464 
1465 static void
1466 free_team_arg(struct team_arg* teamArg)
1467 {
1468 	if (teamArg != NULL) {
1469 		free(teamArg->flat_args);
1470 		free(teamArg->path);
1471 		free(teamArg);
1472 	}
1473 }
1474 
1475 
1476 static status_t
1477 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1478 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1479 	port_id port, uint32 token)
1480 {
1481 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1482 	if (teamArg == NULL)
1483 		return B_NO_MEMORY;
1484 
1485 	teamArg->path = strdup(path);
1486 	if (teamArg->path == NULL) {
1487 		free(teamArg);
1488 		return B_NO_MEMORY;
1489 	}
1490 
1491 	// copy the args over
1492 	teamArg->flat_args = flatArgs;
1493 	teamArg->flat_args_size = flatArgsSize;
1494 	teamArg->arg_count = argCount;
1495 	teamArg->env_count = envCount;
1496 	teamArg->flags = 0;
1497 	teamArg->umask = umask;
1498 	teamArg->error_port = port;
1499 	teamArg->error_token = token;
1500 
1501 	// determine the flags from the environment
1502 	const char* const* env = flatArgs + argCount + 1;
1503 	for (int32 i = 0; i < envCount; i++) {
1504 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1505 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1506 			break;
1507 		}
1508 	}
1509 
1510 	*_teamArg = teamArg;
1511 	return B_OK;
1512 }
1513 
1514 
1515 static status_t
1516 team_create_thread_start_internal(void* args)
1517 {
1518 	status_t err;
1519 	Thread* thread;
1520 	Team* team;
1521 	struct team_arg* teamArgs = (struct team_arg*)args;
1522 	const char* path;
1523 	addr_t entry;
1524 	char** userArgs;
1525 	char** userEnv;
1526 	struct user_space_program_args* programArgs;
1527 	uint32 argCount, envCount;
1528 
1529 	thread = thread_get_current_thread();
1530 	team = thread->team;
1531 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1532 
1533 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1534 		thread->id));
1535 
1536 	// Main stack area layout is currently as follows (starting from 0):
1537 	//
1538 	// size								| usage
1539 	// ---------------------------------+--------------------------------
1540 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1541 	// TLS_SIZE							| TLS data
1542 	// sizeof(user_space_program_args)	| argument structure for the runtime
1543 	//									| loader
1544 	// flat arguments size				| flat process arguments and environment
1545 
1546 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1547 	// the heap
1548 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1549 
1550 	argCount = teamArgs->arg_count;
1551 	envCount = teamArgs->env_count;
1552 
1553 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1554 		+ thread->user_stack_size + TLS_SIZE);
1555 
1556 	userArgs = (char**)(programArgs + 1);
1557 	userEnv = userArgs + argCount + 1;
1558 	path = teamArgs->path;
1559 
1560 	if (user_strlcpy(programArgs->program_path, path,
1561 				sizeof(programArgs->program_path)) < B_OK
1562 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1563 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1564 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1565 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1566 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1567 				sizeof(port_id)) < B_OK
1568 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1569 				sizeof(uint32)) < B_OK
1570 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1571 		|| user_memcpy(userArgs, teamArgs->flat_args,
1572 				teamArgs->flat_args_size) < B_OK) {
1573 		// the team deletion process will clean this mess
1574 		free_team_arg(teamArgs);
1575 		return B_BAD_ADDRESS;
1576 	}
1577 
1578 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1579 
1580 	// set team args and update state
1581 	team->Lock();
1582 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1583 	team->state = TEAM_STATE_NORMAL;
1584 	team->Unlock();
1585 
1586 	free_team_arg(teamArgs);
1587 		// the arguments are already on the user stack, we no longer need
1588 		// them in this form
1589 
1590 	// Clone commpage area
1591 	area_id commPageArea = clone_commpage_area(team->id,
1592 		&team->commpage_address);
1593 	if (commPageArea  < B_OK) {
1594 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1595 			strerror(commPageArea)));
1596 		return commPageArea;
1597 	}
1598 
1599 	// Register commpage image
1600 	image_id commPageImage = get_commpage_image();
1601 	image_info imageInfo;
1602 	err = get_image_info(commPageImage, &imageInfo);
1603 	if (err != B_OK) {
1604 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1605 			strerror(err)));
1606 		return err;
1607 	}
1608 	imageInfo.text = team->commpage_address;
1609 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1610 	if (image < 0) {
1611 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1612 			strerror(image)));
1613 		return image;
1614 	}
1615 
1616 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1617 	// automatic variables with function scope will never be destroyed.
1618 	{
1619 		// find runtime_loader path
1620 		KPath runtimeLoaderPath;
1621 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1622 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1623 		if (err < B_OK) {
1624 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1625 				strerror(err)));
1626 			return err;
1627 		}
1628 		runtimeLoaderPath.UnlockBuffer();
1629 		err = runtimeLoaderPath.Append("runtime_loader");
1630 
1631 		if (err == B_OK) {
1632 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1633 				&entry);
1634 		}
1635 	}
1636 
1637 	if (err < B_OK) {
1638 		// Luckily, we don't have to clean up the mess we created - that's
1639 		// done for us by the normal team deletion process
1640 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1641 			"%s\n", strerror(err)));
1642 		return err;
1643 	}
1644 
1645 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1646 
1647 	// enter userspace -- returns only in case of error
1648 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1649 		programArgs, team->commpage_address);
1650 }
1651 
1652 
1653 static status_t
1654 team_create_thread_start(void* args)
1655 {
1656 	team_create_thread_start_internal(args);
1657 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1658 	thread_exit();
1659 		// does not return
1660 	return B_OK;
1661 }
1662 
1663 
1664 static thread_id
1665 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1666 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1667 	port_id errorPort, uint32 errorToken)
1668 {
1669 	char** flatArgs = _flatArgs;
1670 	thread_id thread;
1671 	status_t status;
1672 	struct team_arg* teamArgs;
1673 	struct team_loading_info loadingInfo;
1674 	io_context* parentIOContext = NULL;
1675 	team_id teamID;
1676 
1677 	if (flatArgs == NULL || argCount == 0)
1678 		return B_BAD_VALUE;
1679 
1680 	const char* path = flatArgs[0];
1681 
1682 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1683 		"\n", path, flatArgs, argCount));
1684 
1685 	// cut the path from the main thread name
1686 	const char* threadName = strrchr(path, '/');
1687 	if (threadName != NULL)
1688 		threadName++;
1689 	else
1690 		threadName = path;
1691 
1692 	// create the main thread object
1693 	Thread* mainThread;
1694 	status = Thread::Create(threadName, mainThread);
1695 	if (status != B_OK)
1696 		return status;
1697 	BReference<Thread> mainThreadReference(mainThread, true);
1698 
1699 	// create team object
1700 	Team* team = Team::Create(mainThread->id, path, false);
1701 	if (team == NULL)
1702 		return B_NO_MEMORY;
1703 	BReference<Team> teamReference(team, true);
1704 
1705 	if (flags & B_WAIT_TILL_LOADED) {
1706 		loadingInfo.thread = thread_get_current_thread();
1707 		loadingInfo.result = B_ERROR;
1708 		loadingInfo.done = false;
1709 		team->loading_info = &loadingInfo;
1710 	}
1711 
1712 	// get the parent team
1713 	Team* parent = Team::Get(parentID);
1714 	if (parent == NULL)
1715 		return B_BAD_TEAM_ID;
1716 	BReference<Team> parentReference(parent, true);
1717 
1718 	parent->LockTeamAndProcessGroup();
1719 	team->Lock();
1720 
1721 	// inherit the parent's user/group
1722 	inherit_parent_user_and_group(team, parent);
1723 
1724  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1725 
1726 	sTeamHash.Insert(team);
1727 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1728 	if (!teamLimitReached)
1729 		sUsedTeams++;
1730 
1731 	teamsLocker.Unlock();
1732 
1733 	insert_team_into_parent(parent, team);
1734 	insert_team_into_group(parent->group, team);
1735 
1736 	// get a reference to the parent's I/O context -- we need it to create ours
1737 	parentIOContext = parent->io_context;
1738 	vfs_get_io_context(parentIOContext);
1739 
1740 	team->Unlock();
1741 	parent->UnlockTeamAndProcessGroup();
1742 
1743 	// notify team listeners
1744 	sNotificationService.Notify(TEAM_ADDED, team);
1745 
1746 	// check the executable's set-user/group-id permission
1747 	update_set_id_user_and_group(team, path);
1748 
1749 	if (teamLimitReached) {
1750 		status = B_NO_MORE_TEAMS;
1751 		goto err1;
1752 	}
1753 
1754 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1755 		envCount, (mode_t)-1, errorPort, errorToken);
1756 	if (status != B_OK)
1757 		goto err1;
1758 
1759 	_flatArgs = NULL;
1760 		// args are owned by the team_arg structure now
1761 
1762 	// create a new io_context for this team
1763 	team->io_context = vfs_new_io_context(parentIOContext, true);
1764 	if (!team->io_context) {
1765 		status = B_NO_MEMORY;
1766 		goto err2;
1767 	}
1768 
1769 	// We don't need the parent's I/O context any longer.
1770 	vfs_put_io_context(parentIOContext);
1771 	parentIOContext = NULL;
1772 
1773 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1774 	vfs_exec_io_context(team->io_context);
1775 
1776 	// create an address space for this team
1777 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1778 		&team->address_space);
1779 	if (status != B_OK)
1780 		goto err2;
1781 
1782 	team->address_space->SetRandomizingEnabled(
1783 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1784 
1785 	// create the user data area
1786 	status = create_team_user_data(team);
1787 	if (status != B_OK)
1788 		goto err4;
1789 
1790 	// In case we start the main thread, we shouldn't access the team object
1791 	// afterwards, so cache the team's ID.
1792 	teamID = team->id;
1793 
1794 	// Create a kernel thread, but under the context of the new team
1795 	// The new thread will take over ownership of teamArgs.
1796 	{
1797 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1798 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1799 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1800 			+ teamArgs->flat_args_size;
1801 		thread = thread_create_thread(threadAttributes, false);
1802 		if (thread < 0) {
1803 			status = thread;
1804 			goto err5;
1805 		}
1806 	}
1807 
1808 	// The team has been created successfully, so we keep the reference. Or
1809 	// more precisely: It's owned by the team's main thread, now.
1810 	teamReference.Detach();
1811 
1812 	// wait for the loader of the new team to finish its work
1813 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1814 		if (mainThread != NULL) {
1815 			// resume the team's main thread
1816 			thread_continue(mainThread);
1817 		}
1818 
1819 		// Now suspend ourselves until loading is finished. We will be woken
1820 		// either by the thread, when it finished or aborted loading, or when
1821 		// the team is going to die (e.g. is killed). In either case the one
1822 		// setting `loadingInfo.done' is responsible for removing the info from
1823 		// the team structure.
1824 		while (!loadingInfo.done)
1825 			thread_suspend();
1826 
1827 		if (loadingInfo.result < B_OK)
1828 			return loadingInfo.result;
1829 	}
1830 
1831 	// notify the debugger
1832 	user_debug_team_created(teamID);
1833 
1834 	return thread;
1835 
1836 err5:
1837 	delete_team_user_data(team);
1838 err4:
1839 	team->address_space->Put();
1840 err2:
1841 	free_team_arg(teamArgs);
1842 err1:
1843 	if (parentIOContext != NULL)
1844 		vfs_put_io_context(parentIOContext);
1845 
1846 	// Remove the team structure from the process group, the parent team, and
1847 	// the team hash table and delete the team structure.
1848 	parent->LockTeamAndProcessGroup();
1849 	team->Lock();
1850 
1851 	remove_team_from_group(team);
1852 	remove_team_from_parent(team->parent, team);
1853 
1854 	team->Unlock();
1855 	parent->UnlockTeamAndProcessGroup();
1856 
1857 	teamsLocker.Lock();
1858 	sTeamHash.Remove(team);
1859 	if (!teamLimitReached)
1860 		sUsedTeams--;
1861 	teamsLocker.Unlock();
1862 
1863 	sNotificationService.Notify(TEAM_REMOVED, team);
1864 
1865 	return status;
1866 }
1867 
1868 
1869 /*!	Almost shuts down the current team and loads a new image into it.
1870 	If successful, this function does not return and will takeover ownership of
1871 	the arguments provided.
1872 	This function may only be called in a userland team (caused by one of the
1873 	exec*() syscalls).
1874 */
1875 static status_t
1876 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1877 	int32 argCount, int32 envCount, mode_t umask)
1878 {
1879 	// NOTE: Since this function normally doesn't return, don't use automatic
1880 	// variables that need destruction in the function scope.
1881 	char** flatArgs = _flatArgs;
1882 	Team* team = thread_get_current_thread()->team;
1883 	struct team_arg* teamArgs;
1884 	const char* threadName;
1885 	thread_id nubThreadID = -1;
1886 
1887 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1888 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1889 		team->id));
1890 
1891 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1892 
1893 	// switching the kernel at run time is probably not a good idea :)
1894 	if (team == team_get_kernel_team())
1895 		return B_NOT_ALLOWED;
1896 
1897 	// we currently need to be single threaded here
1898 	// TODO: maybe we should just kill all other threads and
1899 	//	make the current thread the team's main thread?
1900 	Thread* currentThread = thread_get_current_thread();
1901 	if (currentThread != team->main_thread)
1902 		return B_NOT_ALLOWED;
1903 
1904 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1905 	// We iterate through the thread list to make sure that there's no other
1906 	// thread.
1907 	TeamLocker teamLocker(team);
1908 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1909 
1910 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1911 		nubThreadID = team->debug_info.nub_thread;
1912 
1913 	debugInfoLocker.Unlock();
1914 
1915 	for (Thread* thread = team->thread_list; thread != NULL;
1916 			thread = thread->team_next) {
1917 		if (thread != team->main_thread && thread->id != nubThreadID)
1918 			return B_NOT_ALLOWED;
1919 	}
1920 
1921 	team->DeleteUserTimers(true);
1922 	team->ResetSignalsOnExec();
1923 
1924 	teamLocker.Unlock();
1925 
1926 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1927 		argCount, envCount, umask, -1, 0);
1928 	if (status != B_OK)
1929 		return status;
1930 
1931 	_flatArgs = NULL;
1932 		// args are owned by the team_arg structure now
1933 
1934 	// TODO: remove team resources if there are any left
1935 	// thread_atkernel_exit() might not be called at all
1936 
1937 	thread_reset_for_exec();
1938 
1939 	user_debug_prepare_for_exec();
1940 
1941 	delete_team_user_data(team);
1942 	vm_delete_areas(team->address_space, false);
1943 	xsi_sem_undo(team);
1944 	delete_owned_ports(team);
1945 	sem_delete_owned_sems(team);
1946 	remove_images(team);
1947 	vfs_exec_io_context(team->io_context);
1948 	delete_realtime_sem_context(team->realtime_sem_context);
1949 	team->realtime_sem_context = NULL;
1950 
1951 	// update ASLR
1952 	team->address_space->SetRandomizingEnabled(
1953 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1954 
1955 	status = create_team_user_data(team);
1956 	if (status != B_OK) {
1957 		// creating the user data failed -- we're toast
1958 		free_team_arg(teamArgs);
1959 		exit_thread(status);
1960 		return status;
1961 	}
1962 
1963 	user_debug_finish_after_exec();
1964 
1965 	// rename the team
1966 
1967 	team->Lock();
1968 	team->SetName(path);
1969 	team->Unlock();
1970 
1971 	// cut the path from the team name and rename the main thread, too
1972 	threadName = strrchr(path, '/');
1973 	if (threadName != NULL)
1974 		threadName++;
1975 	else
1976 		threadName = path;
1977 	rename_thread(thread_get_current_thread_id(), threadName);
1978 
1979 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1980 
1981 	// Update user/group according to the executable's set-user/group-id
1982 	// permission.
1983 	update_set_id_user_and_group(team, path);
1984 
1985 	user_debug_team_exec();
1986 
1987 	// notify team listeners
1988 	sNotificationService.Notify(TEAM_EXEC, team);
1989 
1990 	// get a user thread for the thread
1991 	user_thread* userThread = team_allocate_user_thread(team);
1992 		// cannot fail (the allocation for the team would have failed already)
1993 	ThreadLocker currentThreadLocker(currentThread);
1994 	currentThread->user_thread = userThread;
1995 	currentThreadLocker.Unlock();
1996 
1997 	// create the user stack for the thread
1998 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1999 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2000 	if (status == B_OK) {
2001 		// prepare the stack, load the runtime loader, and enter userspace
2002 		team_create_thread_start(teamArgs);
2003 			// does never return
2004 	} else
2005 		free_team_arg(teamArgs);
2006 
2007 	// Sorry, we have to kill ourselves, there is no way out anymore
2008 	// (without any areas left and all that).
2009 	exit_thread(status);
2010 
2011 	// We return a status here since the signal that is sent by the
2012 	// call above is not immediately handled.
2013 	return B_ERROR;
2014 }
2015 
2016 
2017 static thread_id
2018 fork_team(void)
2019 {
2020 	Thread* parentThread = thread_get_current_thread();
2021 	Team* parentTeam = parentThread->team;
2022 	Team* team;
2023 	arch_fork_arg* forkArgs;
2024 	struct area_info info;
2025 	thread_id threadID;
2026 	status_t status;
2027 	ssize_t areaCookie;
2028 	int32 imageCookie;
2029 
2030 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2031 
2032 	if (parentTeam == team_get_kernel_team())
2033 		return B_NOT_ALLOWED;
2034 
2035 	// create a new team
2036 	// TODO: this is very similar to load_image_internal() - maybe we can do
2037 	// something about it :)
2038 
2039 	// create the main thread object
2040 	Thread* thread;
2041 	status = Thread::Create(parentThread->name, thread);
2042 	if (status != B_OK)
2043 		return status;
2044 	BReference<Thread> threadReference(thread, true);
2045 
2046 	// create the team object
2047 	team = Team::Create(thread->id, NULL, false);
2048 	if (team == NULL)
2049 		return B_NO_MEMORY;
2050 
2051 	parentTeam->LockTeamAndProcessGroup();
2052 	team->Lock();
2053 
2054 	team->SetName(parentTeam->Name());
2055 	team->SetArgs(parentTeam->Args());
2056 
2057 	team->commpage_address = parentTeam->commpage_address;
2058 
2059 	// Inherit the parent's user/group.
2060 	inherit_parent_user_and_group(team, parentTeam);
2061 
2062 	// inherit signal handlers
2063 	team->InheritSignalActions(parentTeam);
2064 
2065 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2066 
2067 	sTeamHash.Insert(team);
2068 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2069 	if (!teamLimitReached)
2070 		sUsedTeams++;
2071 
2072 	teamsLocker.Unlock();
2073 
2074 	insert_team_into_parent(parentTeam, team);
2075 	insert_team_into_group(parentTeam->group, team);
2076 
2077 	team->Unlock();
2078 	parentTeam->UnlockTeamAndProcessGroup();
2079 
2080 	// notify team listeners
2081 	sNotificationService.Notify(TEAM_ADDED, team);
2082 
2083 	// inherit some team debug flags
2084 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2085 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2086 
2087 	if (teamLimitReached) {
2088 		status = B_NO_MORE_TEAMS;
2089 		goto err1;
2090 	}
2091 
2092 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2093 	if (forkArgs == NULL) {
2094 		status = B_NO_MEMORY;
2095 		goto err1;
2096 	}
2097 
2098 	// create a new io_context for this team
2099 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2100 	if (!team->io_context) {
2101 		status = B_NO_MEMORY;
2102 		goto err2;
2103 	}
2104 
2105 	// duplicate the realtime sem context
2106 	if (parentTeam->realtime_sem_context) {
2107 		team->realtime_sem_context = clone_realtime_sem_context(
2108 			parentTeam->realtime_sem_context);
2109 		if (team->realtime_sem_context == NULL) {
2110 			status = B_NO_MEMORY;
2111 			goto err2;
2112 		}
2113 	}
2114 
2115 	// create an address space for this team
2116 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2117 		&team->address_space);
2118 	if (status < B_OK)
2119 		goto err3;
2120 
2121 	// copy all areas of the team
2122 	// TODO: should be able to handle stack areas differently (ie. don't have
2123 	// them copy-on-write)
2124 
2125 	areaCookie = 0;
2126 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2127 		if (info.area == parentTeam->user_data_area) {
2128 			// don't clone the user area; just create a new one
2129 			status = create_team_user_data(team, info.address);
2130 			if (status != B_OK)
2131 				break;
2132 
2133 			thread->user_thread = team_allocate_user_thread(team);
2134 		} else {
2135 			void* address;
2136 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2137 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2138 			if (area < B_OK) {
2139 				status = area;
2140 				break;
2141 			}
2142 
2143 			if (info.area == parentThread->user_stack_area)
2144 				thread->user_stack_area = area;
2145 		}
2146 	}
2147 
2148 	if (status < B_OK)
2149 		goto err4;
2150 
2151 	if (thread->user_thread == NULL) {
2152 #if KDEBUG
2153 		panic("user data area not found, parent area is %" B_PRId32,
2154 			parentTeam->user_data_area);
2155 #endif
2156 		status = B_ERROR;
2157 		goto err4;
2158 	}
2159 
2160 	thread->user_stack_base = parentThread->user_stack_base;
2161 	thread->user_stack_size = parentThread->user_stack_size;
2162 	thread->user_local_storage = parentThread->user_local_storage;
2163 	thread->sig_block_mask = parentThread->sig_block_mask;
2164 	thread->signal_stack_base = parentThread->signal_stack_base;
2165 	thread->signal_stack_size = parentThread->signal_stack_size;
2166 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2167 
2168 	arch_store_fork_frame(forkArgs);
2169 
2170 	// copy image list
2171 	image_info imageInfo;
2172 	imageCookie = 0;
2173 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2174 			== B_OK) {
2175 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2176 		if (image < 0)
2177 			goto err5;
2178 	}
2179 
2180 	// create the main thread
2181 	{
2182 		ThreadCreationAttributes threadCreationAttributes(NULL,
2183 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2184 		threadCreationAttributes.forkArgs = forkArgs;
2185 		threadID = thread_create_thread(threadCreationAttributes, false);
2186 		if (threadID < 0) {
2187 			status = threadID;
2188 			goto err5;
2189 		}
2190 	}
2191 
2192 	// notify the debugger
2193 	user_debug_team_created(team->id);
2194 
2195 	T(TeamForked(threadID));
2196 
2197 	resume_thread(threadID);
2198 	return threadID;
2199 
2200 err5:
2201 	remove_images(team);
2202 err4:
2203 	team->address_space->RemoveAndPut();
2204 err3:
2205 	delete_realtime_sem_context(team->realtime_sem_context);
2206 err2:
2207 	free(forkArgs);
2208 err1:
2209 	// Remove the team structure from the process group, the parent team, and
2210 	// the team hash table and delete the team structure.
2211 	parentTeam->LockTeamAndProcessGroup();
2212 	team->Lock();
2213 
2214 	remove_team_from_group(team);
2215 	remove_team_from_parent(team->parent, team);
2216 
2217 	team->Unlock();
2218 	parentTeam->UnlockTeamAndProcessGroup();
2219 
2220 	teamsLocker.Lock();
2221 	sTeamHash.Remove(team);
2222 	if (!teamLimitReached)
2223 		sUsedTeams--;
2224 	teamsLocker.Unlock();
2225 
2226 	sNotificationService.Notify(TEAM_REMOVED, team);
2227 
2228 	team->ReleaseReference();
2229 
2230 	return status;
2231 }
2232 
2233 
2234 /*!	Returns if the specified team \a parent has any children belonging to the
2235 	process group with the specified ID \a groupID.
2236 	The caller must hold \a parent's lock.
2237 */
2238 static bool
2239 has_children_in_group(Team* parent, pid_t groupID)
2240 {
2241 	for (Team* child = parent->children; child != NULL;
2242 			child = child->siblings_next) {
2243 		TeamLocker childLocker(child);
2244 		if (child->group_id == groupID)
2245 			return true;
2246 	}
2247 
2248 	return false;
2249 }
2250 
2251 
2252 /*!	Returns the first job control entry from \a children, which matches \a id.
2253 	\a id can be:
2254 	- \code > 0 \endcode: Matching an entry with that team ID.
2255 	- \code == -1 \endcode: Matching any entry.
2256 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2257 	\c 0 is an invalid value for \a id.
2258 
2259 	The caller must hold the lock of the team that \a children belongs to.
2260 
2261 	\param children The job control entry list to check.
2262 	\param id The match criterion.
2263 	\return The first matching entry or \c NULL, if none matches.
2264 */
2265 static job_control_entry*
2266 get_job_control_entry(team_job_control_children& children, pid_t id)
2267 {
2268 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2269 		 job_control_entry* entry = it.Next();) {
2270 
2271 		if (id > 0) {
2272 			if (entry->thread == id)
2273 				return entry;
2274 		} else if (id == -1) {
2275 			return entry;
2276 		} else {
2277 			pid_t processGroup
2278 				= (entry->team ? entry->team->group_id : entry->group_id);
2279 			if (processGroup == -id)
2280 				return entry;
2281 		}
2282 	}
2283 
2284 	return NULL;
2285 }
2286 
2287 
2288 /*!	Returns the first job control entry from one of team's dead, continued, or
2289     stopped children which matches \a id.
2290 	\a id can be:
2291 	- \code > 0 \endcode: Matching an entry with that team ID.
2292 	- \code == -1 \endcode: Matching any entry.
2293 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2294 	\c 0 is an invalid value for \a id.
2295 
2296 	The caller must hold \a team's lock.
2297 
2298 	\param team The team whose dead, stopped, and continued child lists shall be
2299 		checked.
2300 	\param id The match criterion.
2301 	\param flags Specifies which children shall be considered. Dead children
2302 		always are. Stopped children are considered when \a flags is ORed
2303 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2304 		bitwise with \c WCONTINUED.
2305 	\return The first matching entry or \c NULL, if none matches.
2306 */
2307 static job_control_entry*
2308 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2309 {
2310 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2311 
2312 	if (entry == NULL && (flags & WCONTINUED) != 0)
2313 		entry = get_job_control_entry(team->continued_children, id);
2314 
2315 	if (entry == NULL && (flags & WUNTRACED) != 0)
2316 		entry = get_job_control_entry(team->stopped_children, id);
2317 
2318 	return entry;
2319 }
2320 
2321 
2322 job_control_entry::job_control_entry()
2323 	:
2324 	has_group_ref(false)
2325 {
2326 }
2327 
2328 
2329 job_control_entry::~job_control_entry()
2330 {
2331 	if (has_group_ref) {
2332 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2333 
2334 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2335 		if (group == NULL) {
2336 			panic("job_control_entry::~job_control_entry(): unknown group "
2337 				"ID: %" B_PRId32, group_id);
2338 			return;
2339 		}
2340 
2341 		groupHashLocker.Unlock();
2342 
2343 		group->ReleaseReference();
2344 	}
2345 }
2346 
2347 
2348 /*!	Invoked when the owning team is dying, initializing the entry according to
2349 	the dead state.
2350 
2351 	The caller must hold the owning team's lock and the scheduler lock.
2352 */
2353 void
2354 job_control_entry::InitDeadState()
2355 {
2356 	if (team != NULL) {
2357 		ASSERT(team->exit.initialized);
2358 
2359 		group_id = team->group_id;
2360 		team->group->AcquireReference();
2361 		has_group_ref = true;
2362 
2363 		thread = team->id;
2364 		status = team->exit.status;
2365 		reason = team->exit.reason;
2366 		signal = team->exit.signal;
2367 		signaling_user = team->exit.signaling_user;
2368 
2369 		team = NULL;
2370 	}
2371 }
2372 
2373 
2374 job_control_entry&
2375 job_control_entry::operator=(const job_control_entry& other)
2376 {
2377 	state = other.state;
2378 	thread = other.thread;
2379 	signal = other.signal;
2380 	has_group_ref = false;
2381 	signaling_user = other.signaling_user;
2382 	team = other.team;
2383 	group_id = other.group_id;
2384 	status = other.status;
2385 	reason = other.reason;
2386 
2387 	return *this;
2388 }
2389 
2390 
2391 /*! This is the kernel backend for waitid().
2392 */
2393 static thread_id
2394 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2395 {
2396 	Thread* thread = thread_get_current_thread();
2397 	Team* team = thread->team;
2398 	struct job_control_entry foundEntry;
2399 	struct job_control_entry* freeDeathEntry = NULL;
2400 	status_t status = B_OK;
2401 
2402 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2403 		child, flags));
2404 
2405 	T(WaitForChild(child, flags));
2406 
2407 	pid_t originalChild = child;
2408 
2409 	bool ignoreFoundEntries = false;
2410 	bool ignoreFoundEntriesChecked = false;
2411 
2412 	while (true) {
2413 		// lock the team
2414 		TeamLocker teamLocker(team);
2415 
2416 		// A 0 child argument means to wait for all children in the process
2417 		// group of the calling team.
2418 		child = originalChild == 0 ? -team->group_id : originalChild;
2419 
2420 		// check whether any condition holds
2421 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2422 
2423 		// If we don't have an entry yet, check whether there are any children
2424 		// complying to the process group specification at all.
2425 		if (entry == NULL) {
2426 			// No success yet -- check whether there are any children complying
2427 			// to the process group specification at all.
2428 			bool childrenExist = false;
2429 			if (child == -1) {
2430 				childrenExist = team->children != NULL;
2431 			} else if (child < -1) {
2432 				childrenExist = has_children_in_group(team, -child);
2433 			} else {
2434 				if (Team* childTeam = Team::Get(child)) {
2435 					BReference<Team> childTeamReference(childTeam, true);
2436 					TeamLocker childTeamLocker(childTeam);
2437 					childrenExist = childTeam->parent == team;
2438 				}
2439 			}
2440 
2441 			if (!childrenExist) {
2442 				// there is no child we could wait for
2443 				status = ECHILD;
2444 			} else {
2445 				// the children we're waiting for are still running
2446 				status = B_WOULD_BLOCK;
2447 			}
2448 		} else {
2449 			// got something
2450 			foundEntry = *entry;
2451 
2452 			// unless WNOWAIT has been specified, "consume" the wait state
2453 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2454 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2455 					// The child is dead. Reap its death entry.
2456 					freeDeathEntry = entry;
2457 					team->dead_children.entries.Remove(entry);
2458 					team->dead_children.count--;
2459 				} else {
2460 					// The child is well. Reset its job control state.
2461 					team_set_job_control_state(entry->team,
2462 						JOB_CONTROL_STATE_NONE, NULL);
2463 				}
2464 			}
2465 		}
2466 
2467 		// If we haven't got anything yet, prepare for waiting for the
2468 		// condition variable.
2469 		ConditionVariableEntry deadWaitEntry;
2470 
2471 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2472 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2473 
2474 		teamLocker.Unlock();
2475 
2476 		// we got our entry and can return to our caller
2477 		if (status == B_OK) {
2478 			if (ignoreFoundEntries) {
2479 				// ... unless we shall ignore found entries
2480 				delete freeDeathEntry;
2481 				freeDeathEntry = NULL;
2482 				continue;
2483 			}
2484 
2485 			break;
2486 		}
2487 
2488 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2489 			T(WaitForChildDone(status));
2490 			return status;
2491 		}
2492 
2493 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2494 		if (status == B_INTERRUPTED) {
2495 			T(WaitForChildDone(status));
2496 			return status;
2497 		}
2498 
2499 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2500 		// all our children are dead and fail with ECHILD. We check the
2501 		// condition at this point.
2502 		if (!ignoreFoundEntriesChecked) {
2503 			teamLocker.Lock();
2504 
2505 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2506 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2507 				|| handler.sa_handler == SIG_IGN) {
2508 				ignoreFoundEntries = true;
2509 			}
2510 
2511 			teamLocker.Unlock();
2512 
2513 			ignoreFoundEntriesChecked = true;
2514 		}
2515 	}
2516 
2517 	delete freeDeathEntry;
2518 
2519 	// When we got here, we have a valid death entry, and already got
2520 	// unregistered from the team or group. Fill in the returned info.
2521 	memset(&_info, 0, sizeof(_info));
2522 	_info.si_signo = SIGCHLD;
2523 	_info.si_pid = foundEntry.thread;
2524 	_info.si_uid = foundEntry.signaling_user;
2525 	// TODO: Fill in si_errno?
2526 
2527 	switch (foundEntry.state) {
2528 		case JOB_CONTROL_STATE_DEAD:
2529 			_info.si_code = foundEntry.reason;
2530 			_info.si_status = foundEntry.reason == CLD_EXITED
2531 				? foundEntry.status : foundEntry.signal;
2532 			break;
2533 		case JOB_CONTROL_STATE_STOPPED:
2534 			_info.si_code = CLD_STOPPED;
2535 			_info.si_status = foundEntry.signal;
2536 			break;
2537 		case JOB_CONTROL_STATE_CONTINUED:
2538 			_info.si_code = CLD_CONTINUED;
2539 			_info.si_status = 0;
2540 			break;
2541 		case JOB_CONTROL_STATE_NONE:
2542 			// can't happen
2543 			break;
2544 	}
2545 
2546 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2547 	// status is available.
2548 	TeamLocker teamLocker(team);
2549 	InterruptsSpinLocker signalLocker(team->signal_lock);
2550 	SpinLocker threadCreationLocker(gThreadCreationLock);
2551 
2552 	if (is_team_signal_blocked(team, SIGCHLD)) {
2553 		if (get_job_control_entry(team, child, flags) == NULL)
2554 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2555 	}
2556 
2557 	threadCreationLocker.Unlock();
2558 	signalLocker.Unlock();
2559 	teamLocker.Unlock();
2560 
2561 	// When the team is dead, the main thread continues to live in the kernel
2562 	// team for a very short time. To avoid surprises for the caller we rather
2563 	// wait until the thread is really gone.
2564 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2565 		wait_for_thread(foundEntry.thread, NULL);
2566 
2567 	T(WaitForChildDone(foundEntry));
2568 
2569 	return foundEntry.thread;
2570 }
2571 
2572 
2573 /*! Fills the team_info structure with information from the specified team.
2574 	Interrupts must be enabled. The team must not be locked.
2575 */
2576 static status_t
2577 fill_team_info(Team* team, team_info* info, size_t size)
2578 {
2579 	if (size != sizeof(team_info))
2580 		return B_BAD_VALUE;
2581 
2582 	// TODO: Set more informations for team_info
2583 	memset(info, 0, size);
2584 
2585 	info->team = team->id;
2586 		// immutable
2587 	info->image_count = count_images(team);
2588 		// protected by sImageMutex
2589 
2590 	TeamLocker teamLocker(team);
2591 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2592 
2593 	info->thread_count = team->num_threads;
2594 	//info->area_count =
2595 	info->debugger_nub_thread = team->debug_info.nub_thread;
2596 	info->debugger_nub_port = team->debug_info.nub_port;
2597 	info->uid = team->effective_uid;
2598 	info->gid = team->effective_gid;
2599 
2600 	strlcpy(info->args, team->Args(), sizeof(info->args));
2601 	info->argc = 1;
2602 
2603 	return B_OK;
2604 }
2605 
2606 
2607 /*!	Returns whether the process group contains stopped processes.
2608 	The caller must hold the process group's lock.
2609 */
2610 static bool
2611 process_group_has_stopped_processes(ProcessGroup* group)
2612 {
2613 	Team* team = group->teams;
2614 	while (team != NULL) {
2615 		// the parent team's lock guards the job control entry -- acquire it
2616 		team->LockTeamAndParent(false);
2617 
2618 		if (team->job_control_entry != NULL
2619 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2620 			team->UnlockTeamAndParent();
2621 			return true;
2622 		}
2623 
2624 		team->UnlockTeamAndParent();
2625 
2626 		team = team->group_next;
2627 	}
2628 
2629 	return false;
2630 }
2631 
2632 
2633 /*!	Iterates through all process groups queued in team_remove_team() and signals
2634 	those that are orphaned and have stopped processes.
2635 	The caller must not hold any team or process group locks.
2636 */
2637 static void
2638 orphaned_process_group_check()
2639 {
2640 	// process as long as there are groups in the list
2641 	while (true) {
2642 		// remove the head from the list
2643 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2644 
2645 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2646 		if (group == NULL)
2647 			return;
2648 
2649 		group->UnsetOrphanedCheck();
2650 		BReference<ProcessGroup> groupReference(group);
2651 
2652 		orphanedCheckLocker.Unlock();
2653 
2654 		AutoLocker<ProcessGroup> groupLocker(group);
2655 
2656 		// If the group is orphaned and contains stopped processes, we're
2657 		// supposed to send SIGHUP + SIGCONT.
2658 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2659 			Thread* currentThread = thread_get_current_thread();
2660 
2661 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2662 			send_signal_to_process_group_locked(group, signal, 0);
2663 
2664 			signal.SetNumber(SIGCONT);
2665 			send_signal_to_process_group_locked(group, signal, 0);
2666 		}
2667 	}
2668 }
2669 
2670 
2671 static status_t
2672 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2673 	uint32 flags)
2674 {
2675 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2676 		return B_BAD_VALUE;
2677 
2678 	// get the team
2679 	Team* team = Team::GetAndLock(id);
2680 	if (team == NULL)
2681 		return B_BAD_TEAM_ID;
2682 	BReference<Team> teamReference(team, true);
2683 	TeamLocker teamLocker(team, true);
2684 
2685 	if ((flags & B_CHECK_PERMISSION) != 0) {
2686 		uid_t uid = geteuid();
2687 		if (uid != 0 && uid != team->effective_uid)
2688 			return B_NOT_ALLOWED;
2689 	}
2690 
2691 	bigtime_t kernelTime = 0;
2692 	bigtime_t userTime = 0;
2693 
2694 	switch (who) {
2695 		case B_TEAM_USAGE_SELF:
2696 		{
2697 			Thread* thread = team->thread_list;
2698 
2699 			for (; thread != NULL; thread = thread->team_next) {
2700 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2701 				kernelTime += thread->kernel_time;
2702 				userTime += thread->user_time;
2703 			}
2704 
2705 			kernelTime += team->dead_threads_kernel_time;
2706 			userTime += team->dead_threads_user_time;
2707 			break;
2708 		}
2709 
2710 		case B_TEAM_USAGE_CHILDREN:
2711 		{
2712 			Team* child = team->children;
2713 			for (; child != NULL; child = child->siblings_next) {
2714 				TeamLocker childLocker(child);
2715 
2716 				Thread* thread = team->thread_list;
2717 
2718 				for (; thread != NULL; thread = thread->team_next) {
2719 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2720 					kernelTime += thread->kernel_time;
2721 					userTime += thread->user_time;
2722 				}
2723 
2724 				kernelTime += child->dead_threads_kernel_time;
2725 				userTime += child->dead_threads_user_time;
2726 			}
2727 
2728 			kernelTime += team->dead_children.kernel_time;
2729 			userTime += team->dead_children.user_time;
2730 			break;
2731 		}
2732 	}
2733 
2734 	info->kernel_time = kernelTime;
2735 	info->user_time = userTime;
2736 
2737 	return B_OK;
2738 }
2739 
2740 
2741 //	#pragma mark - Private kernel API
2742 
2743 
2744 status_t
2745 team_init(kernel_args* args)
2746 {
2747 	// create the team hash table
2748 	new(&sTeamHash) TeamTable;
2749 	if (sTeamHash.Init(64) != B_OK)
2750 		panic("Failed to init team hash table!");
2751 
2752 	new(&sGroupHash) ProcessGroupHashTable;
2753 	if (sGroupHash.Init() != B_OK)
2754 		panic("Failed to init process group hash table!");
2755 
2756 	// create initial session and process groups
2757 
2758 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2759 	if (session == NULL)
2760 		panic("Could not create initial session.\n");
2761 	BReference<ProcessSession> sessionReference(session, true);
2762 
2763 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2764 	if (group == NULL)
2765 		panic("Could not create initial process group.\n");
2766 	BReference<ProcessGroup> groupReference(group, true);
2767 
2768 	group->Publish(session);
2769 
2770 	// create the kernel team
2771 	sKernelTeam = Team::Create(1, "kernel_team", true);
2772 	if (sKernelTeam == NULL)
2773 		panic("could not create kernel team!\n");
2774 	sKernelTeam->SetArgs(sKernelTeam->Name());
2775 	sKernelTeam->state = TEAM_STATE_NORMAL;
2776 
2777 	sKernelTeam->saved_set_uid = 0;
2778 	sKernelTeam->real_uid = 0;
2779 	sKernelTeam->effective_uid = 0;
2780 	sKernelTeam->saved_set_gid = 0;
2781 	sKernelTeam->real_gid = 0;
2782 	sKernelTeam->effective_gid = 0;
2783 	sKernelTeam->supplementary_groups = NULL;
2784 	sKernelTeam->supplementary_group_count = 0;
2785 
2786 	insert_team_into_group(group, sKernelTeam);
2787 
2788 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2789 	if (sKernelTeam->io_context == NULL)
2790 		panic("could not create io_context for kernel team!\n");
2791 
2792 	// stick it in the team hash
2793 	sTeamHash.Insert(sKernelTeam);
2794 
2795 	add_debugger_command_etc("team", &dump_team_info,
2796 		"Dump info about a particular team",
2797 		"[ <id> | <address> | <name> ]\n"
2798 		"Prints information about the specified team. If no argument is given\n"
2799 		"the current team is selected.\n"
2800 		"  <id>       - The ID of the team.\n"
2801 		"  <address>  - The address of the team structure.\n"
2802 		"  <name>     - The team's name.\n", 0);
2803 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2804 		"\n"
2805 		"Prints a list of all existing teams.\n", 0);
2806 
2807 	new(&sNotificationService) TeamNotificationService();
2808 
2809 	sNotificationService.Register();
2810 
2811 	return B_OK;
2812 }
2813 
2814 
2815 int32
2816 team_max_teams(void)
2817 {
2818 	return sMaxTeams;
2819 }
2820 
2821 
2822 int32
2823 team_used_teams(void)
2824 {
2825 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2826 	return sUsedTeams;
2827 }
2828 
2829 
2830 /*! Returns a death entry of a child team specified by ID (if any).
2831 	The caller must hold the team's lock.
2832 
2833 	\param team The team whose dead children list to check.
2834 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2835 	\param _deleteEntry Return variable, indicating whether the caller needs to
2836 		delete the returned entry.
2837 	\return The death entry of the matching team, or \c NULL, if no death entry
2838 		for the team was found.
2839 */
2840 job_control_entry*
2841 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2842 {
2843 	if (child <= 0)
2844 		return NULL;
2845 
2846 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2847 		child);
2848 	if (entry) {
2849 		// remove the entry only, if the caller is the parent of the found team
2850 		if (team_get_current_team_id() == entry->thread) {
2851 			team->dead_children.entries.Remove(entry);
2852 			team->dead_children.count--;
2853 			*_deleteEntry = true;
2854 		} else {
2855 			*_deleteEntry = false;
2856 		}
2857 	}
2858 
2859 	return entry;
2860 }
2861 
2862 
2863 /*! Quick check to see if we have a valid team ID. */
2864 bool
2865 team_is_valid(team_id id)
2866 {
2867 	if (id <= 0)
2868 		return false;
2869 
2870 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2871 
2872 	return team_get_team_struct_locked(id) != NULL;
2873 }
2874 
2875 
2876 Team*
2877 team_get_team_struct_locked(team_id id)
2878 {
2879 	return sTeamHash.Lookup(id);
2880 }
2881 
2882 
2883 void
2884 team_set_controlling_tty(int32 ttyIndex)
2885 {
2886 	// lock the team, so its session won't change while we're playing with it
2887 	Team* team = thread_get_current_thread()->team;
2888 	TeamLocker teamLocker(team);
2889 
2890 	// get and lock the session
2891 	ProcessSession* session = team->group->Session();
2892 	AutoLocker<ProcessSession> sessionLocker(session);
2893 
2894 	// set the session's fields
2895 	session->controlling_tty = ttyIndex;
2896 	session->foreground_group = -1;
2897 }
2898 
2899 
2900 int32
2901 team_get_controlling_tty()
2902 {
2903 	// lock the team, so its session won't change while we're playing with it
2904 	Team* team = thread_get_current_thread()->team;
2905 	TeamLocker teamLocker(team);
2906 
2907 	// get and lock the session
2908 	ProcessSession* session = team->group->Session();
2909 	AutoLocker<ProcessSession> sessionLocker(session);
2910 
2911 	// get the session's field
2912 	return session->controlling_tty;
2913 }
2914 
2915 
2916 status_t
2917 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2918 {
2919 	// lock the team, so its session won't change while we're playing with it
2920 	Thread* thread = thread_get_current_thread();
2921 	Team* team = thread->team;
2922 	TeamLocker teamLocker(team);
2923 
2924 	// get and lock the session
2925 	ProcessSession* session = team->group->Session();
2926 	AutoLocker<ProcessSession> sessionLocker(session);
2927 
2928 	// check given TTY -- must be the controlling tty of the calling process
2929 	if (session->controlling_tty != ttyIndex)
2930 		return ENOTTY;
2931 
2932 	// check given process group -- must belong to our session
2933 	{
2934 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2935 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2936 		if (group == NULL || group->Session() != session)
2937 			return B_BAD_VALUE;
2938 	}
2939 
2940 	// If we are a background group, we can do that unharmed only when we
2941 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2942 	if (session->foreground_group != -1
2943 		&& session->foreground_group != team->group_id
2944 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2945 		InterruptsSpinLocker signalLocker(team->signal_lock);
2946 
2947 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2948 			pid_t groupID = team->group_id;
2949 
2950 			signalLocker.Unlock();
2951 			sessionLocker.Unlock();
2952 			teamLocker.Unlock();
2953 
2954 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2955 			send_signal_to_process_group(groupID, signal, 0);
2956 			return B_INTERRUPTED;
2957 		}
2958 	}
2959 
2960 	session->foreground_group = processGroupID;
2961 
2962 	return B_OK;
2963 }
2964 
2965 
2966 /*!	Removes the specified team from the global team hash, from its process
2967 	group, and from its parent.
2968 	It also moves all of its children to the kernel team.
2969 
2970 	The caller must hold the following locks:
2971 	- \a team's process group's lock,
2972 	- the kernel team's lock,
2973 	- \a team's parent team's lock (might be the kernel team), and
2974 	- \a team's lock.
2975 */
2976 void
2977 team_remove_team(Team* team, pid_t& _signalGroup)
2978 {
2979 	Team* parent = team->parent;
2980 
2981 	// remember how long this team lasted
2982 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2983 		+ team->dead_children.kernel_time;
2984 	parent->dead_children.user_time += team->dead_threads_user_time
2985 		+ team->dead_children.user_time;
2986 
2987 	// remove the team from the hash table
2988 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2989 	sTeamHash.Remove(team);
2990 	sUsedTeams--;
2991 	teamsLocker.Unlock();
2992 
2993 	// The team can no longer be accessed by ID. Navigation to it is still
2994 	// possible from its process group and its parent and children, but that
2995 	// will be rectified shortly.
2996 	team->state = TEAM_STATE_DEATH;
2997 
2998 	// If we're a controlling process (i.e. a session leader with controlling
2999 	// terminal), there's a bit of signalling we have to do. We can't do any of
3000 	// the signaling here due to the bunch of locks we're holding, but we need
3001 	// to determine, whom to signal.
3002 	_signalGroup = -1;
3003 	bool isSessionLeader = false;
3004 	if (team->session_id == team->id
3005 		&& team->group->Session()->controlling_tty >= 0) {
3006 		isSessionLeader = true;
3007 
3008 		ProcessSession* session = team->group->Session();
3009 
3010 		AutoLocker<ProcessSession> sessionLocker(session);
3011 
3012 		session->controlling_tty = -1;
3013 		_signalGroup = session->foreground_group;
3014 	}
3015 
3016 	// remove us from our process group
3017 	remove_team_from_group(team);
3018 
3019 	// move the team's children to the kernel team
3020 	while (Team* child = team->children) {
3021 		// remove the child from the current team and add it to the kernel team
3022 		TeamLocker childLocker(child);
3023 
3024 		remove_team_from_parent(team, child);
3025 		insert_team_into_parent(sKernelTeam, child);
3026 
3027 		// move job control entries too
3028 		sKernelTeam->stopped_children.entries.MoveFrom(
3029 			&team->stopped_children.entries);
3030 		sKernelTeam->continued_children.entries.MoveFrom(
3031 			&team->continued_children.entries);
3032 
3033 		// If the team was a session leader with controlling terminal,
3034 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3035 		// groups with stopped processes. Due to locking complications we can't
3036 		// do that here, so we only check whether we were a reason for the
3037 		// child's process group not being an orphan and, if so, schedule a
3038 		// later check (cf. orphaned_process_group_check()).
3039 		if (isSessionLeader) {
3040 			ProcessGroup* childGroup = child->group;
3041 			if (childGroup->Session()->id == team->session_id
3042 				&& childGroup->id != team->group_id) {
3043 				childGroup->ScheduleOrphanedCheck();
3044 			}
3045 		}
3046 
3047 		// Note, we don't move the dead children entries. Those will be deleted
3048 		// when the team structure is deleted.
3049 	}
3050 
3051 	// remove us from our parent
3052 	remove_team_from_parent(parent, team);
3053 }
3054 
3055 
3056 /*!	Kills all threads but the main thread of the team and shuts down user
3057 	debugging for it.
3058 	To be called on exit of the team's main thread. No locks must be held.
3059 
3060 	\param team The team in question.
3061 	\return The port of the debugger for the team, -1 if none. To be passed to
3062 		team_delete_team().
3063 */
3064 port_id
3065 team_shutdown_team(Team* team)
3066 {
3067 	ASSERT(thread_get_current_thread() == team->main_thread);
3068 
3069 	TeamLocker teamLocker(team);
3070 
3071 	// Make sure debugging changes won't happen anymore.
3072 	port_id debuggerPort = -1;
3073 	while (true) {
3074 		// If a debugger change is in progress for the team, we'll have to
3075 		// wait until it is done.
3076 		ConditionVariableEntry waitForDebuggerEntry;
3077 		bool waitForDebugger = false;
3078 
3079 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3080 
3081 		if (team->debug_info.debugger_changed_condition != NULL) {
3082 			team->debug_info.debugger_changed_condition->Add(
3083 				&waitForDebuggerEntry);
3084 			waitForDebugger = true;
3085 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3086 			// The team is being debugged. That will stop with the termination
3087 			// of the nub thread. Since we set the team state to death, no one
3088 			// can install a debugger anymore. We fetch the debugger's port to
3089 			// send it a message at the bitter end.
3090 			debuggerPort = team->debug_info.debugger_port;
3091 		}
3092 
3093 		debugInfoLocker.Unlock();
3094 
3095 		if (!waitForDebugger)
3096 			break;
3097 
3098 		// wait for the debugger change to be finished
3099 		teamLocker.Unlock();
3100 
3101 		waitForDebuggerEntry.Wait();
3102 
3103 		teamLocker.Lock();
3104 	}
3105 
3106 	// Mark the team as shutting down. That will prevent new threads from being
3107 	// created and debugger changes from taking place.
3108 	team->state = TEAM_STATE_SHUTDOWN;
3109 
3110 	// delete all timers
3111 	team->DeleteUserTimers(false);
3112 
3113 	// deactivate CPU time user timers for the team
3114 	InterruptsSpinLocker timeLocker(team->time_lock);
3115 
3116 	if (team->HasActiveCPUTimeUserTimers())
3117 		team->DeactivateCPUTimeUserTimers();
3118 
3119 	timeLocker.Unlock();
3120 
3121 	// kill all threads but the main thread
3122 	team_death_entry deathEntry;
3123 	deathEntry.condition.Init(team, "team death");
3124 
3125 	while (true) {
3126 		team->death_entry = &deathEntry;
3127 		deathEntry.remaining_threads = 0;
3128 
3129 		Thread* thread = team->thread_list;
3130 		while (thread != NULL) {
3131 			if (thread != team->main_thread) {
3132 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3133 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3134 				deathEntry.remaining_threads++;
3135 			}
3136 
3137 			thread = thread->team_next;
3138 		}
3139 
3140 		if (deathEntry.remaining_threads == 0)
3141 			break;
3142 
3143 		// there are threads to wait for
3144 		ConditionVariableEntry entry;
3145 		deathEntry.condition.Add(&entry);
3146 
3147 		teamLocker.Unlock();
3148 
3149 		entry.Wait();
3150 
3151 		teamLocker.Lock();
3152 	}
3153 
3154 	team->death_entry = NULL;
3155 
3156 	return debuggerPort;
3157 }
3158 
3159 
3160 /*!	Called on team exit to notify threads waiting on the team and free most
3161 	resources associated with it.
3162 	The caller shouldn't hold any locks.
3163 */
3164 void
3165 team_delete_team(Team* team, port_id debuggerPort)
3166 {
3167 	// Not quite in our job description, but work that has been left by
3168 	// team_remove_team() and that can be done now that we're not holding any
3169 	// locks.
3170 	orphaned_process_group_check();
3171 
3172 	team_id teamID = team->id;
3173 
3174 	ASSERT(team->num_threads == 0);
3175 
3176 	// If someone is waiting for this team to be loaded, but it dies
3177 	// unexpectedly before being done, we need to notify the waiting
3178 	// thread now.
3179 
3180 	TeamLocker teamLocker(team);
3181 
3182 	if (team->loading_info) {
3183 		// there's indeed someone waiting
3184 		struct team_loading_info* loadingInfo = team->loading_info;
3185 		team->loading_info = NULL;
3186 
3187 		loadingInfo->result = B_ERROR;
3188 		loadingInfo->done = true;
3189 
3190 		// wake up the waiting thread
3191 		thread_continue(loadingInfo->thread);
3192 	}
3193 
3194 	// notify team watchers
3195 
3196 	{
3197 		// we're not reachable from anyone anymore at this point, so we
3198 		// can safely access the list without any locking
3199 		struct team_watcher* watcher;
3200 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3201 				&team->watcher_list)) != NULL) {
3202 			watcher->hook(teamID, watcher->data);
3203 			free(watcher);
3204 		}
3205 	}
3206 
3207 	teamLocker.Unlock();
3208 
3209 	sNotificationService.Notify(TEAM_REMOVED, team);
3210 
3211 	// free team resources
3212 
3213 	delete_realtime_sem_context(team->realtime_sem_context);
3214 	xsi_sem_undo(team);
3215 	remove_images(team);
3216 	team->address_space->RemoveAndPut();
3217 
3218 	team->ReleaseReference();
3219 
3220 	// notify the debugger, that the team is gone
3221 	user_debug_team_deleted(teamID, debuggerPort);
3222 }
3223 
3224 
3225 Team*
3226 team_get_kernel_team(void)
3227 {
3228 	return sKernelTeam;
3229 }
3230 
3231 
3232 team_id
3233 team_get_kernel_team_id(void)
3234 {
3235 	if (!sKernelTeam)
3236 		return 0;
3237 
3238 	return sKernelTeam->id;
3239 }
3240 
3241 
3242 team_id
3243 team_get_current_team_id(void)
3244 {
3245 	return thread_get_current_thread()->team->id;
3246 }
3247 
3248 
3249 status_t
3250 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3251 {
3252 	if (id == sKernelTeam->id) {
3253 		// we're the kernel team, so we don't have to go through all
3254 		// the hassle (locking and hash lookup)
3255 		*_addressSpace = VMAddressSpace::GetKernel();
3256 		return B_OK;
3257 	}
3258 
3259 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3260 
3261 	Team* team = team_get_team_struct_locked(id);
3262 	if (team == NULL)
3263 		return B_BAD_VALUE;
3264 
3265 	team->address_space->Get();
3266 	*_addressSpace = team->address_space;
3267 	return B_OK;
3268 }
3269 
3270 
3271 /*!	Sets the team's job control state.
3272 	The caller must hold the parent team's lock. Interrupts are allowed to be
3273 	enabled or disabled.
3274 	\a team The team whose job control state shall be set.
3275 	\a newState The new state to be set.
3276 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3277 		the caller is responsible for filling in the following fields of the
3278 		entry before releasing the parent team's lock, unless the new state is
3279 		\c JOB_CONTROL_STATE_NONE:
3280 		- \c signal: The number of the signal causing the state change.
3281 		- \c signaling_user: The real UID of the user sending the signal.
3282 */
3283 void
3284 team_set_job_control_state(Team* team, job_control_state newState,
3285 	Signal* signal)
3286 {
3287 	if (team == NULL || team->job_control_entry == NULL)
3288 		return;
3289 
3290 	// don't touch anything, if the state stays the same or the team is already
3291 	// dead
3292 	job_control_entry* entry = team->job_control_entry;
3293 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3294 		return;
3295 
3296 	T(SetJobControlState(team->id, newState, signal));
3297 
3298 	// remove from the old list
3299 	switch (entry->state) {
3300 		case JOB_CONTROL_STATE_NONE:
3301 			// entry is in no list ATM
3302 			break;
3303 		case JOB_CONTROL_STATE_DEAD:
3304 			// can't get here
3305 			break;
3306 		case JOB_CONTROL_STATE_STOPPED:
3307 			team->parent->stopped_children.entries.Remove(entry);
3308 			break;
3309 		case JOB_CONTROL_STATE_CONTINUED:
3310 			team->parent->continued_children.entries.Remove(entry);
3311 			break;
3312 	}
3313 
3314 	entry->state = newState;
3315 
3316 	if (signal != NULL) {
3317 		entry->signal = signal->Number();
3318 		entry->signaling_user = signal->SendingUser();
3319 	}
3320 
3321 	// add to new list
3322 	team_job_control_children* childList = NULL;
3323 	switch (entry->state) {
3324 		case JOB_CONTROL_STATE_NONE:
3325 			// entry doesn't get into any list
3326 			break;
3327 		case JOB_CONTROL_STATE_DEAD:
3328 			childList = &team->parent->dead_children;
3329 			team->parent->dead_children.count++;
3330 			break;
3331 		case JOB_CONTROL_STATE_STOPPED:
3332 			childList = &team->parent->stopped_children;
3333 			break;
3334 		case JOB_CONTROL_STATE_CONTINUED:
3335 			childList = &team->parent->continued_children;
3336 			break;
3337 	}
3338 
3339 	if (childList != NULL) {
3340 		childList->entries.Add(entry);
3341 		team->parent->dead_children.condition_variable.NotifyAll();
3342 	}
3343 }
3344 
3345 
3346 /*!	Inits the given team's exit information, if not yet initialized, to some
3347 	generic "killed" status.
3348 	The caller must not hold the team's lock. Interrupts must be enabled.
3349 
3350 	\param team The team whose exit info shall be initialized.
3351 */
3352 void
3353 team_init_exit_info_on_error(Team* team)
3354 {
3355 	TeamLocker teamLocker(team);
3356 
3357 	if (!team->exit.initialized) {
3358 		team->exit.reason = CLD_KILLED;
3359 		team->exit.signal = SIGKILL;
3360 		team->exit.signaling_user = geteuid();
3361 		team->exit.status = 0;
3362 		team->exit.initialized = true;
3363 	}
3364 }
3365 
3366 
3367 /*! Adds a hook to the team that is called as soon as this team goes away.
3368 	This call might get public in the future.
3369 */
3370 status_t
3371 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3372 {
3373 	if (hook == NULL || teamID < B_OK)
3374 		return B_BAD_VALUE;
3375 
3376 	// create the watcher object
3377 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3378 	if (watcher == NULL)
3379 		return B_NO_MEMORY;
3380 
3381 	watcher->hook = hook;
3382 	watcher->data = data;
3383 
3384 	// add watcher, if the team isn't already dying
3385 	// get the team
3386 	Team* team = Team::GetAndLock(teamID);
3387 	if (team == NULL) {
3388 		free(watcher);
3389 		return B_BAD_TEAM_ID;
3390 	}
3391 
3392 	list_add_item(&team->watcher_list, watcher);
3393 
3394 	team->UnlockAndReleaseReference();
3395 
3396 	return B_OK;
3397 }
3398 
3399 
3400 status_t
3401 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3402 {
3403 	if (hook == NULL || teamID < 0)
3404 		return B_BAD_VALUE;
3405 
3406 	// get team and remove watcher (if present)
3407 	Team* team = Team::GetAndLock(teamID);
3408 	if (team == NULL)
3409 		return B_BAD_TEAM_ID;
3410 
3411 	// search for watcher
3412 	team_watcher* watcher = NULL;
3413 	while ((watcher = (team_watcher*)list_get_next_item(
3414 			&team->watcher_list, watcher)) != NULL) {
3415 		if (watcher->hook == hook && watcher->data == data) {
3416 			// got it!
3417 			list_remove_item(&team->watcher_list, watcher);
3418 			break;
3419 		}
3420 	}
3421 
3422 	team->UnlockAndReleaseReference();
3423 
3424 	if (watcher == NULL)
3425 		return B_ENTRY_NOT_FOUND;
3426 
3427 	free(watcher);
3428 	return B_OK;
3429 }
3430 
3431 
3432 /*!	Allocates a user_thread structure from the team.
3433 	The team lock must be held, unless the function is called for the team's
3434 	main thread. Interrupts must be enabled.
3435 */
3436 struct user_thread*
3437 team_allocate_user_thread(Team* team)
3438 {
3439 	if (team->user_data == 0)
3440 		return NULL;
3441 
3442 	// take an entry from the free list, if any
3443 	if (struct free_user_thread* entry = team->free_user_threads) {
3444 		user_thread* thread = entry->thread;
3445 		team->free_user_threads = entry->next;
3446 		free(entry);
3447 		return thread;
3448 	}
3449 
3450 	while (true) {
3451 		// enough space left?
3452 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3453 		if (team->user_data_size - team->used_user_data < needed) {
3454 			// try to resize the area
3455 			if (resize_area(team->user_data_area,
3456 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3457 				return NULL;
3458 			}
3459 
3460 			// resized user area successfully -- try to allocate the user_thread
3461 			// again
3462 			team->user_data_size += B_PAGE_SIZE;
3463 			continue;
3464 		}
3465 
3466 		// allocate the user_thread
3467 		user_thread* thread
3468 			= (user_thread*)(team->user_data + team->used_user_data);
3469 		team->used_user_data += needed;
3470 
3471 		return thread;
3472 	}
3473 }
3474 
3475 
3476 /*!	Frees the given user_thread structure.
3477 	The team's lock must not be held. Interrupts must be enabled.
3478 	\param team The team the user thread was allocated from.
3479 	\param userThread The user thread to free.
3480 */
3481 void
3482 team_free_user_thread(Team* team, struct user_thread* userThread)
3483 {
3484 	if (userThread == NULL)
3485 		return;
3486 
3487 	// create a free list entry
3488 	free_user_thread* entry
3489 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3490 	if (entry == NULL) {
3491 		// we have to leak the user thread :-/
3492 		return;
3493 	}
3494 
3495 	// add to free list
3496 	TeamLocker teamLocker(team);
3497 
3498 	entry->thread = userThread;
3499 	entry->next = team->free_user_threads;
3500 	team->free_user_threads = entry;
3501 }
3502 
3503 
3504 //	#pragma mark - Associated data interface
3505 
3506 
3507 AssociatedData::AssociatedData()
3508 	:
3509 	fOwner(NULL)
3510 {
3511 }
3512 
3513 
3514 AssociatedData::~AssociatedData()
3515 {
3516 }
3517 
3518 
3519 void
3520 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3521 {
3522 }
3523 
3524 
3525 AssociatedDataOwner::AssociatedDataOwner()
3526 {
3527 	mutex_init(&fLock, "associated data owner");
3528 }
3529 
3530 
3531 AssociatedDataOwner::~AssociatedDataOwner()
3532 {
3533 	mutex_destroy(&fLock);
3534 }
3535 
3536 
3537 bool
3538 AssociatedDataOwner::AddData(AssociatedData* data)
3539 {
3540 	MutexLocker locker(fLock);
3541 
3542 	if (data->Owner() != NULL)
3543 		return false;
3544 
3545 	data->AcquireReference();
3546 	fList.Add(data);
3547 	data->SetOwner(this);
3548 
3549 	return true;
3550 }
3551 
3552 
3553 bool
3554 AssociatedDataOwner::RemoveData(AssociatedData* data)
3555 {
3556 	MutexLocker locker(fLock);
3557 
3558 	if (data->Owner() != this)
3559 		return false;
3560 
3561 	data->SetOwner(NULL);
3562 	fList.Remove(data);
3563 
3564 	locker.Unlock();
3565 
3566 	data->ReleaseReference();
3567 
3568 	return true;
3569 }
3570 
3571 
3572 void
3573 AssociatedDataOwner::PrepareForDeletion()
3574 {
3575 	MutexLocker locker(fLock);
3576 
3577 	// move all data to a temporary list and unset the owner
3578 	DataList list;
3579 	list.MoveFrom(&fList);
3580 
3581 	for (DataList::Iterator it = list.GetIterator();
3582 		AssociatedData* data = it.Next();) {
3583 		data->SetOwner(NULL);
3584 	}
3585 
3586 	locker.Unlock();
3587 
3588 	// call the notification hooks and release our references
3589 	while (AssociatedData* data = list.RemoveHead()) {
3590 		data->OwnerDeleted(this);
3591 		data->ReleaseReference();
3592 	}
3593 }
3594 
3595 
3596 /*!	Associates data with the current team.
3597 	When the team is deleted, the data object is notified.
3598 	The team acquires a reference to the object.
3599 
3600 	\param data The data object.
3601 	\return \c true on success, \c false otherwise. Fails only when the supplied
3602 		data object is already associated with another owner.
3603 */
3604 bool
3605 team_associate_data(AssociatedData* data)
3606 {
3607 	return thread_get_current_thread()->team->AddData(data);
3608 }
3609 
3610 
3611 /*!	Dissociates data from the current team.
3612 	Balances an earlier call to team_associate_data().
3613 
3614 	\param data The data object.
3615 	\return \c true on success, \c false otherwise. Fails only when the data
3616 		object is not associated with the current team.
3617 */
3618 bool
3619 team_dissociate_data(AssociatedData* data)
3620 {
3621 	return thread_get_current_thread()->team->RemoveData(data);
3622 }
3623 
3624 
3625 //	#pragma mark - Public kernel API
3626 
3627 
3628 thread_id
3629 load_image(int32 argCount, const char** args, const char** env)
3630 {
3631 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3632 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3633 }
3634 
3635 
3636 thread_id
3637 load_image_etc(int32 argCount, const char* const* args,
3638 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3639 {
3640 	// we need to flatten the args and environment
3641 
3642 	if (args == NULL)
3643 		return B_BAD_VALUE;
3644 
3645 	// determine total needed size
3646 	int32 argSize = 0;
3647 	for (int32 i = 0; i < argCount; i++)
3648 		argSize += strlen(args[i]) + 1;
3649 
3650 	int32 envCount = 0;
3651 	int32 envSize = 0;
3652 	while (env != NULL && env[envCount] != NULL)
3653 		envSize += strlen(env[envCount++]) + 1;
3654 
3655 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3656 	if (size > MAX_PROCESS_ARGS_SIZE)
3657 		return B_TOO_MANY_ARGS;
3658 
3659 	// allocate space
3660 	char** flatArgs = (char**)malloc(size);
3661 	if (flatArgs == NULL)
3662 		return B_NO_MEMORY;
3663 
3664 	char** slot = flatArgs;
3665 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3666 
3667 	// copy arguments and environment
3668 	for (int32 i = 0; i < argCount; i++) {
3669 		int32 argSize = strlen(args[i]) + 1;
3670 		memcpy(stringSpace, args[i], argSize);
3671 		*slot++ = stringSpace;
3672 		stringSpace += argSize;
3673 	}
3674 
3675 	*slot++ = NULL;
3676 
3677 	for (int32 i = 0; i < envCount; i++) {
3678 		int32 envSize = strlen(env[i]) + 1;
3679 		memcpy(stringSpace, env[i], envSize);
3680 		*slot++ = stringSpace;
3681 		stringSpace += envSize;
3682 	}
3683 
3684 	*slot++ = NULL;
3685 
3686 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3687 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3688 
3689 	free(flatArgs);
3690 		// load_image_internal() unset our variable if it took over ownership
3691 
3692 	return thread;
3693 }
3694 
3695 
3696 status_t
3697 wait_for_team(team_id id, status_t* _returnCode)
3698 {
3699 	// check whether the team exists
3700 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3701 
3702 	Team* team = team_get_team_struct_locked(id);
3703 	if (team == NULL)
3704 		return B_BAD_TEAM_ID;
3705 
3706 	id = team->id;
3707 
3708 	teamsLocker.Unlock();
3709 
3710 	// wait for the main thread (it has the same ID as the team)
3711 	return wait_for_thread(id, _returnCode);
3712 }
3713 
3714 
3715 status_t
3716 kill_team(team_id id)
3717 {
3718 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3719 
3720 	Team* team = team_get_team_struct_locked(id);
3721 	if (team == NULL)
3722 		return B_BAD_TEAM_ID;
3723 
3724 	id = team->id;
3725 
3726 	teamsLocker.Unlock();
3727 
3728 	if (team == sKernelTeam)
3729 		return B_NOT_ALLOWED;
3730 
3731 	// Just kill the team's main thread (it has same ID as the team). The
3732 	// cleanup code there will take care of the team.
3733 	return kill_thread(id);
3734 }
3735 
3736 
3737 status_t
3738 _get_team_info(team_id id, team_info* info, size_t size)
3739 {
3740 	// get the team
3741 	Team* team = Team::Get(id);
3742 	if (team == NULL)
3743 		return B_BAD_TEAM_ID;
3744 	BReference<Team> teamReference(team, true);
3745 
3746 	// fill in the info
3747 	return fill_team_info(team, info, size);
3748 }
3749 
3750 
3751 status_t
3752 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3753 {
3754 	int32 slot = *cookie;
3755 	if (slot < 1)
3756 		slot = 1;
3757 
3758 	InterruptsSpinLocker locker(sTeamHashLock);
3759 
3760 	team_id lastTeamID = peek_next_thread_id();
3761 		// TODO: This is broken, since the id can wrap around!
3762 
3763 	// get next valid team
3764 	Team* team = NULL;
3765 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3766 		slot++;
3767 
3768 	if (team == NULL)
3769 		return B_BAD_TEAM_ID;
3770 
3771 	// get a reference to the team and unlock
3772 	BReference<Team> teamReference(team);
3773 	locker.Unlock();
3774 
3775 	// fill in the info
3776 	*cookie = ++slot;
3777 	return fill_team_info(team, info, size);
3778 }
3779 
3780 
3781 status_t
3782 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3783 {
3784 	if (size != sizeof(team_usage_info))
3785 		return B_BAD_VALUE;
3786 
3787 	return common_get_team_usage_info(id, who, info, 0);
3788 }
3789 
3790 
3791 pid_t
3792 getpid(void)
3793 {
3794 	return thread_get_current_thread()->team->id;
3795 }
3796 
3797 
3798 pid_t
3799 getppid(void)
3800 {
3801 	Team* team = thread_get_current_thread()->team;
3802 
3803 	TeamLocker teamLocker(team);
3804 
3805 	return team->parent->id;
3806 }
3807 
3808 
3809 pid_t
3810 getpgid(pid_t id)
3811 {
3812 	if (id < 0) {
3813 		errno = EINVAL;
3814 		return -1;
3815 	}
3816 
3817 	if (id == 0) {
3818 		// get process group of the calling process
3819 		Team* team = thread_get_current_thread()->team;
3820 		TeamLocker teamLocker(team);
3821 		return team->group_id;
3822 	}
3823 
3824 	// get the team
3825 	Team* team = Team::GetAndLock(id);
3826 	if (team == NULL) {
3827 		errno = ESRCH;
3828 		return -1;
3829 	}
3830 
3831 	// get the team's process group ID
3832 	pid_t groupID = team->group_id;
3833 
3834 	team->UnlockAndReleaseReference();
3835 
3836 	return groupID;
3837 }
3838 
3839 
3840 pid_t
3841 getsid(pid_t id)
3842 {
3843 	if (id < 0) {
3844 		errno = EINVAL;
3845 		return -1;
3846 	}
3847 
3848 	if (id == 0) {
3849 		// get session of the calling process
3850 		Team* team = thread_get_current_thread()->team;
3851 		TeamLocker teamLocker(team);
3852 		return team->session_id;
3853 	}
3854 
3855 	// get the team
3856 	Team* team = Team::GetAndLock(id);
3857 	if (team == NULL) {
3858 		errno = ESRCH;
3859 		return -1;
3860 	}
3861 
3862 	// get the team's session ID
3863 	pid_t sessionID = team->session_id;
3864 
3865 	team->UnlockAndReleaseReference();
3866 
3867 	return sessionID;
3868 }
3869 
3870 
3871 //	#pragma mark - User syscalls
3872 
3873 
3874 status_t
3875 _user_exec(const char* userPath, const char* const* userFlatArgs,
3876 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3877 {
3878 	// NOTE: Since this function normally doesn't return, don't use automatic
3879 	// variables that need destruction in the function scope.
3880 	char path[B_PATH_NAME_LENGTH];
3881 
3882 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3883 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3884 		return B_BAD_ADDRESS;
3885 
3886 	// copy and relocate the flat arguments
3887 	char** flatArgs;
3888 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3889 		argCount, envCount, flatArgs);
3890 
3891 	if (error == B_OK) {
3892 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3893 			envCount, umask);
3894 			// this one only returns in case of error
3895 	}
3896 
3897 	free(flatArgs);
3898 	return error;
3899 }
3900 
3901 
3902 thread_id
3903 _user_fork(void)
3904 {
3905 	return fork_team();
3906 }
3907 
3908 
3909 pid_t
3910 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3911 {
3912 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3913 		return B_BAD_ADDRESS;
3914 
3915 	siginfo_t info;
3916 	pid_t foundChild = wait_for_child(child, flags, info);
3917 	if (foundChild < 0)
3918 		return syscall_restart_handle_post(foundChild);
3919 
3920 	// copy info back to userland
3921 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3922 		return B_BAD_ADDRESS;
3923 
3924 	return foundChild;
3925 }
3926 
3927 
3928 pid_t
3929 _user_process_info(pid_t process, int32 which)
3930 {
3931 	// we only allow to return the parent of the current process
3932 	if (which == PARENT_ID
3933 		&& process != 0 && process != thread_get_current_thread()->team->id)
3934 		return B_BAD_VALUE;
3935 
3936 	pid_t result;
3937 	switch (which) {
3938 		case SESSION_ID:
3939 			result = getsid(process);
3940 			break;
3941 		case GROUP_ID:
3942 			result = getpgid(process);
3943 			break;
3944 		case PARENT_ID:
3945 			result = getppid();
3946 			break;
3947 		default:
3948 			return B_BAD_VALUE;
3949 	}
3950 
3951 	return result >= 0 ? result : errno;
3952 }
3953 
3954 
3955 pid_t
3956 _user_setpgid(pid_t processID, pid_t groupID)
3957 {
3958 	// setpgid() can be called either by the parent of the target process or
3959 	// by the process itself to do one of two things:
3960 	// * Create a new process group with the target process' ID and the target
3961 	//   process as group leader.
3962 	// * Set the target process' process group to an already existing one in the
3963 	//   same session.
3964 
3965 	if (groupID < 0)
3966 		return B_BAD_VALUE;
3967 
3968 	Team* currentTeam = thread_get_current_thread()->team;
3969 	if (processID == 0)
3970 		processID = currentTeam->id;
3971 
3972 	// if the group ID is not specified, use the target process' ID
3973 	if (groupID == 0)
3974 		groupID = processID;
3975 
3976 	// We loop when running into the following race condition: We create a new
3977 	// process group, because there isn't one with that ID yet, but later when
3978 	// trying to publish it, we find that someone else created and published
3979 	// a group with that ID in the meantime. In that case we just restart the
3980 	// whole action.
3981 	while (true) {
3982 		// Look up the process group by ID. If it doesn't exist yet and we are
3983 		// allowed to create a new one, do that.
3984 		ProcessGroup* group = ProcessGroup::Get(groupID);
3985 		bool newGroup = false;
3986 		if (group == NULL) {
3987 			if (groupID != processID)
3988 				return B_NOT_ALLOWED;
3989 
3990 			group = new(std::nothrow) ProcessGroup(groupID);
3991 			if (group == NULL)
3992 				return B_NO_MEMORY;
3993 
3994 			newGroup = true;
3995 		}
3996 		BReference<ProcessGroup> groupReference(group, true);
3997 
3998 		// get the target team
3999 		Team* team = Team::Get(processID);
4000 		if (team == NULL)
4001 			return ESRCH;
4002 		BReference<Team> teamReference(team, true);
4003 
4004 		// lock the new process group and the team's current process group
4005 		while (true) {
4006 			// lock the team's current process group
4007 			team->LockProcessGroup();
4008 
4009 			ProcessGroup* oldGroup = team->group;
4010 			if (oldGroup == group) {
4011 				// it's the same as the target group, so just bail out
4012 				oldGroup->Unlock();
4013 				return group->id;
4014 			}
4015 
4016 			oldGroup->AcquireReference();
4017 
4018 			// lock the target process group, if locking order allows it
4019 			if (newGroup || group->id > oldGroup->id) {
4020 				group->Lock();
4021 				break;
4022 			}
4023 
4024 			// try to lock
4025 			if (group->TryLock())
4026 				break;
4027 
4028 			// no dice -- unlock the team's current process group and relock in
4029 			// the correct order
4030 			oldGroup->Unlock();
4031 
4032 			group->Lock();
4033 			oldGroup->Lock();
4034 
4035 			// check whether things are still the same
4036 			TeamLocker teamLocker(team);
4037 			if (team->group == oldGroup)
4038 				break;
4039 
4040 			// something changed -- unlock everything and retry
4041 			teamLocker.Unlock();
4042 			oldGroup->Unlock();
4043 			group->Unlock();
4044 			oldGroup->ReleaseReference();
4045 		}
4046 
4047 		// we now have references and locks of both new and old process group
4048 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4049 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4050 		AutoLocker<ProcessGroup> groupLocker(group, true);
4051 
4052 		// also lock the target team and its parent
4053 		team->LockTeamAndParent(false);
4054 		TeamLocker parentLocker(team->parent, true);
4055 		TeamLocker teamLocker(team, true);
4056 
4057 		// perform the checks
4058 		if (team == currentTeam) {
4059 			// we set our own group
4060 
4061 			// we must not change our process group ID if we're a session leader
4062 			if (is_session_leader(currentTeam))
4063 				return B_NOT_ALLOWED;
4064 		} else {
4065 			// Calling team != target team. The target team must be a child of
4066 			// the calling team and in the same session. (If that's the case it
4067 			// isn't a session leader either.)
4068 			if (team->parent != currentTeam
4069 				|| team->session_id != currentTeam->session_id) {
4070 				return B_NOT_ALLOWED;
4071 			}
4072 
4073 			// The call is also supposed to fail on a child, when the child has
4074 			// already executed exec*() [EACCES].
4075 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4076 				return EACCES;
4077 		}
4078 
4079 		// If we created a new process group, publish it now.
4080 		if (newGroup) {
4081 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4082 			if (sGroupHash.Lookup(groupID)) {
4083 				// A group with the group ID appeared since we first checked.
4084 				// Back to square one.
4085 				continue;
4086 			}
4087 
4088 			group->PublishLocked(team->group->Session());
4089 		} else if (group->Session()->id != team->session_id) {
4090 			// The existing target process group belongs to a different session.
4091 			// That's not allowed.
4092 			return B_NOT_ALLOWED;
4093 		}
4094 
4095 		// Everything is ready -- set the group.
4096 		remove_team_from_group(team);
4097 		insert_team_into_group(group, team);
4098 
4099 		// Changing the process group might have changed the situation for a
4100 		// parent waiting in wait_for_child(). Hence we notify it.
4101 		team->parent->dead_children.condition_variable.NotifyAll();
4102 
4103 		return group->id;
4104 	}
4105 }
4106 
4107 
4108 pid_t
4109 _user_setsid(void)
4110 {
4111 	Team* team = thread_get_current_thread()->team;
4112 
4113 	// create a new process group and session
4114 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4115 	if (group == NULL)
4116 		return B_NO_MEMORY;
4117 	BReference<ProcessGroup> groupReference(group, true);
4118 	AutoLocker<ProcessGroup> groupLocker(group);
4119 
4120 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4121 	if (session == NULL)
4122 		return B_NO_MEMORY;
4123 	BReference<ProcessSession> sessionReference(session, true);
4124 
4125 	// lock the team's current process group, parent, and the team itself
4126 	team->LockTeamParentAndProcessGroup();
4127 	BReference<ProcessGroup> oldGroupReference(team->group);
4128 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4129 	TeamLocker parentLocker(team->parent, true);
4130 	TeamLocker teamLocker(team, true);
4131 
4132 	// the team must not already be a process group leader
4133 	if (is_process_group_leader(team))
4134 		return B_NOT_ALLOWED;
4135 
4136 	// remove the team from the old and add it to the new process group
4137 	remove_team_from_group(team);
4138 	group->Publish(session);
4139 	insert_team_into_group(group, team);
4140 
4141 	// Changing the process group might have changed the situation for a
4142 	// parent waiting in wait_for_child(). Hence we notify it.
4143 	team->parent->dead_children.condition_variable.NotifyAll();
4144 
4145 	return group->id;
4146 }
4147 
4148 
4149 status_t
4150 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4151 {
4152 	status_t returnCode;
4153 	status_t status;
4154 
4155 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4156 		return B_BAD_ADDRESS;
4157 
4158 	status = wait_for_team(id, &returnCode);
4159 	if (status >= B_OK && _userReturnCode != NULL) {
4160 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4161 				!= B_OK)
4162 			return B_BAD_ADDRESS;
4163 		return B_OK;
4164 	}
4165 
4166 	return syscall_restart_handle_post(status);
4167 }
4168 
4169 
4170 thread_id
4171 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4172 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4173 	port_id errorPort, uint32 errorToken)
4174 {
4175 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4176 
4177 	if (argCount < 1)
4178 		return B_BAD_VALUE;
4179 
4180 	// copy and relocate the flat arguments
4181 	char** flatArgs;
4182 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4183 		argCount, envCount, flatArgs);
4184 	if (error != B_OK)
4185 		return error;
4186 
4187 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4188 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4189 		errorToken);
4190 
4191 	free(flatArgs);
4192 		// load_image_internal() unset our variable if it took over ownership
4193 
4194 	return thread;
4195 }
4196 
4197 
4198 void
4199 _user_exit_team(status_t returnValue)
4200 {
4201 	Thread* thread = thread_get_current_thread();
4202 	Team* team = thread->team;
4203 
4204 	// set this thread's exit status
4205 	thread->exit.status = returnValue;
4206 
4207 	// set the team exit status
4208 	TeamLocker teamLocker(team);
4209 
4210 	if (!team->exit.initialized) {
4211 		team->exit.reason = CLD_EXITED;
4212 		team->exit.signal = 0;
4213 		team->exit.signaling_user = 0;
4214 		team->exit.status = returnValue;
4215 		team->exit.initialized = true;
4216 	}
4217 
4218 	teamLocker.Unlock();
4219 
4220 	// Stop the thread, if the team is being debugged and that has been
4221 	// requested.
4222 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4223 		user_debug_stop_thread();
4224 
4225 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4226 	// userland. The signal handling code forwards the signal to the main
4227 	// thread (if that's not already this one), which will take the team down.
4228 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4229 	send_signal_to_thread(thread, signal, 0);
4230 }
4231 
4232 
4233 status_t
4234 _user_kill_team(team_id team)
4235 {
4236 	return kill_team(team);
4237 }
4238 
4239 
4240 status_t
4241 _user_get_team_info(team_id id, team_info* userInfo)
4242 {
4243 	status_t status;
4244 	team_info info;
4245 
4246 	if (!IS_USER_ADDRESS(userInfo))
4247 		return B_BAD_ADDRESS;
4248 
4249 	status = _get_team_info(id, &info, sizeof(team_info));
4250 	if (status == B_OK) {
4251 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4252 			return B_BAD_ADDRESS;
4253 	}
4254 
4255 	return status;
4256 }
4257 
4258 
4259 status_t
4260 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4261 {
4262 	status_t status;
4263 	team_info info;
4264 	int32 cookie;
4265 
4266 	if (!IS_USER_ADDRESS(userCookie)
4267 		|| !IS_USER_ADDRESS(userInfo)
4268 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4269 		return B_BAD_ADDRESS;
4270 
4271 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4272 	if (status != B_OK)
4273 		return status;
4274 
4275 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4276 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4277 		return B_BAD_ADDRESS;
4278 
4279 	return status;
4280 }
4281 
4282 
4283 team_id
4284 _user_get_current_team(void)
4285 {
4286 	return team_get_current_team_id();
4287 }
4288 
4289 
4290 status_t
4291 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4292 	size_t size)
4293 {
4294 	if (size != sizeof(team_usage_info))
4295 		return B_BAD_VALUE;
4296 
4297 	team_usage_info info;
4298 	status_t status = common_get_team_usage_info(team, who, &info,
4299 		B_CHECK_PERMISSION);
4300 
4301 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4302 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4303 		return B_BAD_ADDRESS;
4304 	}
4305 
4306 	return status;
4307 }
4308 
4309 
4310 status_t
4311 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4312 	size_t size, size_t* _sizeNeeded)
4313 {
4314 	// check parameters
4315 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4316 		|| (buffer == NULL && size > 0)
4317 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4318 		return B_BAD_ADDRESS;
4319 	}
4320 
4321 	KMessage info;
4322 
4323 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4324 		// allocate memory for a copy of the needed team data
4325 		struct ExtendedTeamData {
4326 			team_id	id;
4327 			pid_t	group_id;
4328 			pid_t	session_id;
4329 			uid_t	real_uid;
4330 			gid_t	real_gid;
4331 			uid_t	effective_uid;
4332 			gid_t	effective_gid;
4333 			char	name[B_OS_NAME_LENGTH];
4334 		};
4335 
4336 		ExtendedTeamData* teamClone
4337 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4338 			// It would be nicer to use new, but then we'd have to use
4339 			// ObjectDeleter and declare the structure outside of the function
4340 			// due to template parameter restrictions.
4341 		if (teamClone == NULL)
4342 			return B_NO_MEMORY;
4343 		MemoryDeleter teamCloneDeleter(teamClone);
4344 
4345 		io_context* ioContext;
4346 		{
4347 			// get the team structure
4348 			Team* team = Team::GetAndLock(teamID);
4349 			if (team == NULL)
4350 				return B_BAD_TEAM_ID;
4351 			BReference<Team> teamReference(team, true);
4352 			TeamLocker teamLocker(team, true);
4353 
4354 			// copy the data
4355 			teamClone->id = team->id;
4356 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4357 			teamClone->group_id = team->group_id;
4358 			teamClone->session_id = team->session_id;
4359 			teamClone->real_uid = team->real_uid;
4360 			teamClone->real_gid = team->real_gid;
4361 			teamClone->effective_uid = team->effective_uid;
4362 			teamClone->effective_gid = team->effective_gid;
4363 
4364 			// also fetch a reference to the I/O context
4365 			ioContext = team->io_context;
4366 			vfs_get_io_context(ioContext);
4367 		}
4368 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4369 			&vfs_put_io_context);
4370 
4371 		// add the basic data to the info message
4372 		if (info.AddInt32("id", teamClone->id) != B_OK
4373 			|| info.AddString("name", teamClone->name) != B_OK
4374 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4375 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4376 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4377 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4378 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4379 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4380 			return B_NO_MEMORY;
4381 		}
4382 
4383 		// get the current working directory from the I/O context
4384 		dev_t cwdDevice;
4385 		ino_t cwdDirectory;
4386 		{
4387 			MutexLocker ioContextLocker(ioContext->io_mutex);
4388 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4389 		}
4390 
4391 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4392 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4393 			return B_NO_MEMORY;
4394 		}
4395 	}
4396 
4397 	// TODO: Support the other flags!
4398 
4399 	// copy the needed size and, if it fits, the message back to userland
4400 	size_t sizeNeeded = info.ContentSize();
4401 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4402 		return B_BAD_ADDRESS;
4403 
4404 	if (sizeNeeded > size)
4405 		return B_BUFFER_OVERFLOW;
4406 
4407 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4408 		return B_BAD_ADDRESS;
4409 
4410 	return B_OK;
4411 }
4412