xref: /haiku/src/system/kernel/team.cpp (revision 4a3268e14fff4dd5a456d824b48ce6503368e4c1)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <commpage.h>
30 #include <boot_device.h>
31 #include <elf.h>
32 #include <file_cache.h>
33 #include <find_directory_private.h>
34 #include <fs/KPath.h>
35 #include <heap.h>
36 #include <int.h>
37 #include <kernel.h>
38 #include <kimage.h>
39 #include <kscheduler.h>
40 #include <ksignal.h>
41 #include <Notifications.h>
42 #include <port.h>
43 #include <posix/realtime_sem.h>
44 #include <posix/xsi_semaphore.h>
45 #include <sem.h>
46 #include <syscall_process_info.h>
47 #include <syscall_restart.h>
48 #include <syscalls.h>
49 #include <tls.h>
50 #include <tracing.h>
51 #include <user_runtime.h>
52 #include <user_thread.h>
53 #include <usergroup.h>
54 #include <vfs.h>
55 #include <vm/vm.h>
56 #include <vm/VMAddressSpace.h>
57 #include <util/AutoLock.h>
58 
59 #include "TeamThreadTables.h"
60 
61 
62 //#define TRACE_TEAM
63 #ifdef TRACE_TEAM
64 #	define TRACE(x) dprintf x
65 #else
66 #	define TRACE(x) ;
67 #endif
68 
69 
70 struct team_key {
71 	team_id id;
72 };
73 
74 struct team_arg {
75 	char	*path;
76 	char	**flat_args;
77 	size_t	flat_args_size;
78 	uint32	arg_count;
79 	uint32	env_count;
80 	mode_t	umask;
81 	uint32	flags;
82 	port_id	error_port;
83 	uint32	error_token;
84 };
85 
86 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
87 
88 
89 namespace {
90 
91 
92 class TeamNotificationService : public DefaultNotificationService {
93 public:
94 							TeamNotificationService();
95 
96 			void			Notify(uint32 eventCode, Team* team);
97 };
98 
99 
100 // #pragma mark - TeamTable
101 
102 
103 typedef BKernel::TeamThreadTable<Team> TeamTable;
104 
105 
106 // #pragma mark - ProcessGroupHashDefinition
107 
108 
109 struct ProcessGroupHashDefinition {
110 	typedef pid_t			KeyType;
111 	typedef	ProcessGroup	ValueType;
112 
113 	size_t HashKey(pid_t key) const
114 	{
115 		return key;
116 	}
117 
118 	size_t Hash(ProcessGroup* value) const
119 	{
120 		return HashKey(value->id);
121 	}
122 
123 	bool Compare(pid_t key, ProcessGroup* value) const
124 	{
125 		return value->id == key;
126 	}
127 
128 	ProcessGroup*& GetLink(ProcessGroup* value) const
129 	{
130 		return value->next;
131 	}
132 };
133 
134 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
135 
136 
137 }	// unnamed namespace
138 
139 
140 // #pragma mark -
141 
142 
143 // the team_id -> Team hash table and the lock protecting it
144 static TeamTable sTeamHash;
145 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
146 
147 // the pid_t -> ProcessGroup hash table and the lock protecting it
148 static ProcessGroupHashTable sGroupHash;
149 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
150 
151 static Team* sKernelTeam = NULL;
152 
153 // A list of process groups of children of dying session leaders that need to
154 // be signalled, if they have become orphaned and contain stopped processes.
155 static ProcessGroupList sOrphanedCheckProcessGroups;
156 static mutex sOrphanedCheckLock
157 	= MUTEX_INITIALIZER("orphaned process group check");
158 
159 // some arbitrarily chosen limits -- should probably depend on the available
160 // memory (the limit is not yet enforced)
161 static int32 sMaxTeams = 2048;
162 static int32 sUsedTeams = 1;
163 
164 static TeamNotificationService sNotificationService;
165 
166 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
167 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
168 
169 
170 // #pragma mark - TeamListIterator
171 
172 
173 TeamListIterator::TeamListIterator()
174 {
175 	// queue the entry
176 	InterruptsSpinLocker locker(sTeamHashLock);
177 	sTeamHash.InsertIteratorEntry(&fEntry);
178 }
179 
180 
181 TeamListIterator::~TeamListIterator()
182 {
183 	// remove the entry
184 	InterruptsSpinLocker locker(sTeamHashLock);
185 	sTeamHash.RemoveIteratorEntry(&fEntry);
186 }
187 
188 
189 Team*
190 TeamListIterator::Next()
191 {
192 	// get the next team -- if there is one, get reference for it
193 	InterruptsSpinLocker locker(sTeamHashLock);
194 	Team* team = sTeamHash.NextElement(&fEntry);
195 	if (team != NULL)
196 		team->AcquireReference();
197 
198 	return team;
199 }
200 
201 
202 // #pragma mark - Tracing
203 
204 
205 #if TEAM_TRACING
206 namespace TeamTracing {
207 
208 class TeamForked : public AbstractTraceEntry {
209 public:
210 	TeamForked(thread_id forkedThread)
211 		:
212 		fForkedThread(forkedThread)
213 	{
214 		Initialized();
215 	}
216 
217 	virtual void AddDump(TraceOutput& out)
218 	{
219 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
220 	}
221 
222 private:
223 	thread_id			fForkedThread;
224 };
225 
226 
227 class ExecTeam : public AbstractTraceEntry {
228 public:
229 	ExecTeam(const char* path, int32 argCount, const char* const* args,
230 			int32 envCount, const char* const* env)
231 		:
232 		fArgCount(argCount),
233 		fArgs(NULL)
234 	{
235 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
236 			false);
237 
238 		// determine the buffer size we need for the args
239 		size_t argBufferSize = 0;
240 		for (int32 i = 0; i < argCount; i++)
241 			argBufferSize += strlen(args[i]) + 1;
242 
243 		// allocate a buffer
244 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
245 		if (fArgs) {
246 			char* buffer = fArgs;
247 			for (int32 i = 0; i < argCount; i++) {
248 				size_t argSize = strlen(args[i]) + 1;
249 				memcpy(buffer, args[i], argSize);
250 				buffer += argSize;
251 			}
252 		}
253 
254 		// ignore env for the time being
255 		(void)envCount;
256 		(void)env;
257 
258 		Initialized();
259 	}
260 
261 	virtual void AddDump(TraceOutput& out)
262 	{
263 		out.Print("team exec, \"%p\", args:", fPath);
264 
265 		if (fArgs != NULL) {
266 			char* args = fArgs;
267 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
268 				out.Print(" \"%s\"", args);
269 				args += strlen(args) + 1;
270 			}
271 		} else
272 			out.Print(" <too long>");
273 	}
274 
275 private:
276 	char*	fPath;
277 	int32	fArgCount;
278 	char*	fArgs;
279 };
280 
281 
282 static const char*
283 job_control_state_name(job_control_state state)
284 {
285 	switch (state) {
286 		case JOB_CONTROL_STATE_NONE:
287 			return "none";
288 		case JOB_CONTROL_STATE_STOPPED:
289 			return "stopped";
290 		case JOB_CONTROL_STATE_CONTINUED:
291 			return "continued";
292 		case JOB_CONTROL_STATE_DEAD:
293 			return "dead";
294 		default:
295 			return "invalid";
296 	}
297 }
298 
299 
300 class SetJobControlState : public AbstractTraceEntry {
301 public:
302 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
303 		:
304 		fTeam(team),
305 		fNewState(newState),
306 		fSignal(signal != NULL ? signal->Number() : 0)
307 	{
308 		Initialized();
309 	}
310 
311 	virtual void AddDump(TraceOutput& out)
312 	{
313 		out.Print("team set job control state, team %" B_PRId32 ", "
314 			"new state: %s, signal: %d",
315 			fTeam, job_control_state_name(fNewState), fSignal);
316 	}
317 
318 private:
319 	team_id				fTeam;
320 	job_control_state	fNewState;
321 	int					fSignal;
322 };
323 
324 
325 class WaitForChild : public AbstractTraceEntry {
326 public:
327 	WaitForChild(pid_t child, uint32 flags)
328 		:
329 		fChild(child),
330 		fFlags(flags)
331 	{
332 		Initialized();
333 	}
334 
335 	virtual void AddDump(TraceOutput& out)
336 	{
337 		out.Print("team wait for child, child: %" B_PRId32 ", "
338 			"flags: %#" B_PRIx32, fChild, fFlags);
339 	}
340 
341 private:
342 	pid_t	fChild;
343 	uint32	fFlags;
344 };
345 
346 
347 class WaitForChildDone : public AbstractTraceEntry {
348 public:
349 	WaitForChildDone(const job_control_entry& entry)
350 		:
351 		fState(entry.state),
352 		fTeam(entry.thread),
353 		fStatus(entry.status),
354 		fReason(entry.reason),
355 		fSignal(entry.signal)
356 	{
357 		Initialized();
358 	}
359 
360 	WaitForChildDone(status_t error)
361 		:
362 		fTeam(error)
363 	{
364 		Initialized();
365 	}
366 
367 	virtual void AddDump(TraceOutput& out)
368 	{
369 		if (fTeam >= 0) {
370 			out.Print("team wait for child done, team: %" B_PRId32 ", "
371 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
372 				fTeam, job_control_state_name(fState), fStatus, fReason,
373 				fSignal);
374 		} else {
375 			out.Print("team wait for child failed, error: "
376 				"%#" B_PRIx32 ", ", fTeam);
377 		}
378 	}
379 
380 private:
381 	job_control_state	fState;
382 	team_id				fTeam;
383 	status_t			fStatus;
384 	uint16				fReason;
385 	uint16				fSignal;
386 };
387 
388 }	// namespace TeamTracing
389 
390 #	define T(x) new(std::nothrow) TeamTracing::x;
391 #else
392 #	define T(x) ;
393 #endif
394 
395 
396 //	#pragma mark - TeamNotificationService
397 
398 
399 TeamNotificationService::TeamNotificationService()
400 	: DefaultNotificationService("teams")
401 {
402 }
403 
404 
405 void
406 TeamNotificationService::Notify(uint32 eventCode, Team* team)
407 {
408 	char eventBuffer[128];
409 	KMessage event;
410 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
411 	event.AddInt32("event", eventCode);
412 	event.AddInt32("team", team->id);
413 	event.AddPointer("teamStruct", team);
414 
415 	DefaultNotificationService::Notify(event, eventCode);
416 }
417 
418 
419 //	#pragma mark - Team
420 
421 
422 Team::Team(team_id id, bool kernel)
423 {
424 	// allocate an ID
425 	this->id = id;
426 	visible = true;
427 	serial_number = -1;
428 
429 	// init mutex
430 	if (kernel) {
431 		mutex_init(&fLock, "Team:kernel");
432 	} else {
433 		char lockName[16];
434 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
435 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
436 	}
437 
438 	hash_next = siblings_next = children = parent = NULL;
439 	fName[0] = '\0';
440 	fArgs[0] = '\0';
441 	num_threads = 0;
442 	io_context = NULL;
443 	address_space = NULL;
444 	realtime_sem_context = NULL;
445 	xsi_sem_context = NULL;
446 	thread_list = NULL;
447 	main_thread = NULL;
448 	loading_info = NULL;
449 	state = TEAM_STATE_BIRTH;
450 	flags = 0;
451 	death_entry = NULL;
452 	user_data_area = -1;
453 	user_data = 0;
454 	used_user_data = 0;
455 	user_data_size = 0;
456 	free_user_threads = NULL;
457 
458 	commpage_address = NULL;
459 
460 	supplementary_groups = NULL;
461 	supplementary_group_count = 0;
462 
463 	dead_threads_kernel_time = 0;
464 	dead_threads_user_time = 0;
465 	cpu_clock_offset = 0;
466 
467 	// dead threads
468 	list_init(&dead_threads);
469 	dead_threads_count = 0;
470 
471 	// dead children
472 	dead_children.count = 0;
473 	dead_children.kernel_time = 0;
474 	dead_children.user_time = 0;
475 
476 	// job control entry
477 	job_control_entry = new(nothrow) ::job_control_entry;
478 	if (job_control_entry != NULL) {
479 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
480 		job_control_entry->thread = id;
481 		job_control_entry->team = this;
482 	}
483 
484 	// exit status -- setting initialized to false suffices
485 	exit.initialized = false;
486 
487 	list_init(&sem_list);
488 	list_init_etc(&port_list, port_team_link_offset());
489 	list_init(&image_list);
490 	list_init(&watcher_list);
491 
492 	clear_team_debug_info(&debug_info, true);
493 
494 	// init dead/stopped/continued children condition vars
495 	dead_children.condition_variable.Init(&dead_children, "team children");
496 
497 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
498 		kernel ? -1 : MAX_QUEUED_SIGNALS);
499 	memset(fSignalActions, 0, sizeof(fSignalActions));
500 
501 	fUserDefinedTimerCount = 0;
502 }
503 
504 
505 Team::~Team()
506 {
507 	// get rid of all associated data
508 	PrepareForDeletion();
509 
510 	if (io_context != NULL)
511 		vfs_put_io_context(io_context);
512 	delete_owned_ports(this);
513 	sem_delete_owned_sems(this);
514 
515 	DeleteUserTimers(false);
516 
517 	fPendingSignals.Clear();
518 
519 	if (fQueuedSignalsCounter != NULL)
520 		fQueuedSignalsCounter->ReleaseReference();
521 
522 	while (thread_death_entry* threadDeathEntry
523 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
524 		free(threadDeathEntry);
525 	}
526 
527 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
528 		delete entry;
529 
530 	while (free_user_thread* entry = free_user_threads) {
531 		free_user_threads = entry->next;
532 		free(entry);
533 	}
534 
535 	malloc_referenced_release(supplementary_groups);
536 
537 	delete job_control_entry;
538 		// usually already NULL and transferred to the parent
539 
540 	mutex_destroy(&fLock);
541 }
542 
543 
544 /*static*/ Team*
545 Team::Create(team_id id, const char* name, bool kernel)
546 {
547 	// create the team object
548 	Team* team = new(std::nothrow) Team(id, kernel);
549 	if (team == NULL)
550 		return NULL;
551 	ObjectDeleter<Team> teamDeleter(team);
552 
553 	if (name != NULL)
554 		team->SetName(name);
555 
556 	// check initialization
557 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
558 		return NULL;
559 
560 	// finish initialization (arch specifics)
561 	if (arch_team_init_team_struct(team, kernel) != B_OK)
562 		return NULL;
563 
564 	if (!kernel) {
565 		status_t error = user_timer_create_team_timers(team);
566 		if (error != B_OK)
567 			return NULL;
568 	}
569 
570 	// everything went fine
571 	return teamDeleter.Detach();
572 }
573 
574 
575 /*!	\brief Returns the team with the given ID.
576 	Returns a reference to the team.
577 	Team and thread spinlock must not be held.
578 */
579 /*static*/ Team*
580 Team::Get(team_id id)
581 {
582 	if (id == B_CURRENT_TEAM) {
583 		Team* team = thread_get_current_thread()->team;
584 		team->AcquireReference();
585 		return team;
586 	}
587 
588 	InterruptsSpinLocker locker(sTeamHashLock);
589 	Team* team = sTeamHash.Lookup(id);
590 	if (team != NULL)
591 		team->AcquireReference();
592 	return team;
593 }
594 
595 
596 /*!	\brief Returns the team with the given ID in a locked state.
597 	Returns a reference to the team.
598 	Team and thread spinlock must not be held.
599 */
600 /*static*/ Team*
601 Team::GetAndLock(team_id id)
602 {
603 	// get the team
604 	Team* team = Get(id);
605 	if (team == NULL)
606 		return NULL;
607 
608 	// lock it
609 	team->Lock();
610 
611 	// only return the team, when it isn't already dying
612 	if (team->state >= TEAM_STATE_SHUTDOWN) {
613 		team->Unlock();
614 		team->ReleaseReference();
615 		return NULL;
616 	}
617 
618 	return team;
619 }
620 
621 
622 /*!	Locks the team and its parent team (if any).
623 	The caller must hold a reference to the team or otherwise make sure that
624 	it won't be deleted.
625 	If the team doesn't have a parent, only the team itself is locked. If the
626 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
627 	only the team itself is locked.
628 
629 	\param dontLockParentIfKernel If \c true, the team's parent team is only
630 		locked, if it is not the kernel team.
631 */
632 void
633 Team::LockTeamAndParent(bool dontLockParentIfKernel)
634 {
635 	// The locking order is parent -> child. Since the parent can change as long
636 	// as we don't lock the team, we need to do a trial and error loop.
637 	Lock();
638 
639 	while (true) {
640 		// If the team doesn't have a parent, we're done. Otherwise try to lock
641 		// the parent.This will succeed in most cases, simplifying things.
642 		Team* parent = this->parent;
643 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
644 			|| parent->TryLock()) {
645 			return;
646 		}
647 
648 		// get a temporary reference to the parent, unlock this team, lock the
649 		// parent, and re-lock this team
650 		BReference<Team> parentReference(parent);
651 
652 		Unlock();
653 		parent->Lock();
654 		Lock();
655 
656 		// If the parent hasn't changed in the meantime, we're done.
657 		if (this->parent == parent)
658 			return;
659 
660 		// The parent has changed -- unlock and retry.
661 		parent->Unlock();
662 	}
663 }
664 
665 
666 /*!	Unlocks the team and its parent team (if any).
667 */
668 void
669 Team::UnlockTeamAndParent()
670 {
671 	if (parent != NULL)
672 		parent->Unlock();
673 
674 	Unlock();
675 }
676 
677 
678 /*!	Locks the team, its parent team (if any), and the team's process group.
679 	The caller must hold a reference to the team or otherwise make sure that
680 	it won't be deleted.
681 	If the team doesn't have a parent, only the team itself is locked.
682 */
683 void
684 Team::LockTeamParentAndProcessGroup()
685 {
686 	LockTeamAndProcessGroup();
687 
688 	// We hold the group's and the team's lock, but not the parent team's lock.
689 	// If we have a parent, try to lock it.
690 	if (this->parent == NULL || this->parent->TryLock())
691 		return;
692 
693 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
694 	// the job.
695 	Unlock();
696 	LockTeamAndParent(false);
697 }
698 
699 
700 /*!	Unlocks the team, its parent team (if any), and the team's process group.
701 */
702 void
703 Team::UnlockTeamParentAndProcessGroup()
704 {
705 	group->Unlock();
706 
707 	if (parent != NULL)
708 		parent->Unlock();
709 
710 	Unlock();
711 }
712 
713 
714 void
715 Team::LockTeamAndProcessGroup()
716 {
717 	// The locking order is process group -> child. Since the process group can
718 	// change as long as we don't lock the team, we need to do a trial and error
719 	// loop.
720 	Lock();
721 
722 	while (true) {
723 		// Try to lock the group. This will succeed in most cases, simplifying
724 		// things.
725 		ProcessGroup* group = this->group;
726 		if (group->TryLock())
727 			return;
728 
729 		// get a temporary reference to the group, unlock this team, lock the
730 		// group, and re-lock this team
731 		BReference<ProcessGroup> groupReference(group);
732 
733 		Unlock();
734 		group->Lock();
735 		Lock();
736 
737 		// If the group hasn't changed in the meantime, we're done.
738 		if (this->group == group)
739 			return;
740 
741 		// The group has changed -- unlock and retry.
742 		group->Unlock();
743 	}
744 }
745 
746 
747 void
748 Team::UnlockTeamAndProcessGroup()
749 {
750 	group->Unlock();
751 	Unlock();
752 }
753 
754 
755 void
756 Team::SetName(const char* name)
757 {
758 	if (const char* lastSlash = strrchr(name, '/'))
759 		name = lastSlash + 1;
760 
761 	strlcpy(fName, name, B_OS_NAME_LENGTH);
762 }
763 
764 
765 void
766 Team::SetArgs(const char* args)
767 {
768 	strlcpy(fArgs, args, sizeof(fArgs));
769 }
770 
771 
772 void
773 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
774 {
775 	fArgs[0] = '\0';
776 	strlcpy(fArgs, path, sizeof(fArgs));
777 	for (int i = 0; i < otherArgCount; i++) {
778 		strlcat(fArgs, " ", sizeof(fArgs));
779 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
780 	}
781 }
782 
783 
784 void
785 Team::ResetSignalsOnExec()
786 {
787 	// We are supposed to keep pending signals. Signal actions shall be reset
788 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
789 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
790 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
791 	// flags, but since there aren't any handlers, they make little sense, so
792 	// we clear them.
793 
794 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
795 		struct sigaction& action = SignalActionFor(i);
796 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
797 			action.sa_handler = SIG_DFL;
798 
799 		action.sa_mask = 0;
800 		action.sa_flags = 0;
801 		action.sa_userdata = NULL;
802 	}
803 }
804 
805 
806 void
807 Team::InheritSignalActions(Team* parent)
808 {
809 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
810 }
811 
812 
813 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
814 	ID.
815 
816 	The caller must hold the team's lock.
817 
818 	\param timer The timer to be added. If it doesn't have an ID yet, it is
819 		considered user-defined and will be assigned an ID.
820 	\return \c B_OK, if the timer was added successfully, another error code
821 		otherwise.
822 */
823 status_t
824 Team::AddUserTimer(UserTimer* timer)
825 {
826 	// don't allow addition of timers when already shutting the team down
827 	if (state >= TEAM_STATE_SHUTDOWN)
828 		return B_BAD_TEAM_ID;
829 
830 	// If the timer is user-defined, check timer limit and increment
831 	// user-defined count.
832 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
833 		return EAGAIN;
834 
835 	fUserTimers.AddTimer(timer);
836 
837 	return B_OK;
838 }
839 
840 
841 /*!	Removes the given user timer from the team.
842 
843 	The caller must hold the team's lock.
844 
845 	\param timer The timer to be removed.
846 
847 */
848 void
849 Team::RemoveUserTimer(UserTimer* timer)
850 {
851 	fUserTimers.RemoveTimer(timer);
852 
853 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
854 		UserDefinedTimersRemoved(1);
855 }
856 
857 
858 /*!	Deletes all (or all user-defined) user timers of the team.
859 
860 	Timer's belonging to the team's threads are not affected.
861 	The caller must hold the team's lock.
862 
863 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
864 		otherwise all timers are deleted.
865 */
866 void
867 Team::DeleteUserTimers(bool userDefinedOnly)
868 {
869 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
870 	UserDefinedTimersRemoved(count);
871 }
872 
873 
874 /*!	If not at the limit yet, increments the team's user-defined timer count.
875 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
876 */
877 bool
878 Team::CheckAddUserDefinedTimer()
879 {
880 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
881 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
882 		atomic_add(&fUserDefinedTimerCount, -1);
883 		return false;
884 	}
885 
886 	return true;
887 }
888 
889 
890 /*!	Subtracts the given count for the team's user-defined timer count.
891 	\param count The count to subtract.
892 */
893 void
894 Team::UserDefinedTimersRemoved(int32 count)
895 {
896 	atomic_add(&fUserDefinedTimerCount, -count);
897 }
898 
899 
900 void
901 Team::DeactivateCPUTimeUserTimers()
902 {
903 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
904 		timer->Deactivate();
905 
906 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
907 		timer->Deactivate();
908 }
909 
910 
911 /*!	Returns the team's current total CPU time (kernel + user + offset).
912 
913 	The caller must hold the scheduler lock.
914 
915 	\param ignoreCurrentRun If \c true and the current thread is one team's
916 		threads, don't add the time since the last time \c last_time was
917 		updated. Should be used in "thread unscheduled" scheduler callbacks,
918 		since although the thread is still running at that time, its time has
919 		already been stopped.
920 	\return The team's current total CPU time.
921 */
922 bigtime_t
923 Team::CPUTime(bool ignoreCurrentRun) const
924 {
925 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
926 		+ dead_threads_user_time;
927 
928 	Thread* currentThread = thread_get_current_thread();
929 	bigtime_t now = system_time();
930 
931 	for (Thread* thread = thread_list; thread != NULL;
932 			thread = thread->team_next) {
933 		SpinLocker threadTimeLocker(thread->time_lock);
934 		time += thread->kernel_time + thread->user_time;
935 
936 		if (thread->IsRunning()) {
937 			if (!ignoreCurrentRun || thread != currentThread)
938 				time += now - thread->last_time;
939 		}
940 	}
941 
942 	return time;
943 }
944 
945 
946 /*!	Returns the team's current user CPU time.
947 
948 	The caller must hold the scheduler lock.
949 
950 	\return The team's current user CPU time.
951 */
952 bigtime_t
953 Team::UserCPUTime() const
954 {
955 	bigtime_t time = dead_threads_user_time;
956 
957 	bigtime_t now = system_time();
958 
959 	for (Thread* thread = thread_list; thread != NULL;
960 			thread = thread->team_next) {
961 		SpinLocker threadTimeLocker(thread->time_lock);
962 		time += thread->user_time;
963 
964 		if (thread->IsRunning() && !thread->in_kernel)
965 			time += now - thread->last_time;
966 	}
967 
968 	return time;
969 }
970 
971 
972 //	#pragma mark - ProcessGroup
973 
974 
975 ProcessGroup::ProcessGroup(pid_t id)
976 	:
977 	id(id),
978 	teams(NULL),
979 	fSession(NULL),
980 	fInOrphanedCheckList(false)
981 {
982 	char lockName[32];
983 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
984 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
985 }
986 
987 
988 ProcessGroup::~ProcessGroup()
989 {
990 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
991 
992 	// If the group is in the orphaned check list, remove it.
993 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
994 
995 	if (fInOrphanedCheckList)
996 		sOrphanedCheckProcessGroups.Remove(this);
997 
998 	orphanedCheckLocker.Unlock();
999 
1000 	// remove group from the hash table and from the session
1001 	if (fSession != NULL) {
1002 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1003 		sGroupHash.RemoveUnchecked(this);
1004 		groupHashLocker.Unlock();
1005 
1006 		fSession->ReleaseReference();
1007 	}
1008 
1009 	mutex_destroy(&fLock);
1010 }
1011 
1012 
1013 /*static*/ ProcessGroup*
1014 ProcessGroup::Get(pid_t id)
1015 {
1016 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1017 	ProcessGroup* group = sGroupHash.Lookup(id);
1018 	if (group != NULL)
1019 		group->AcquireReference();
1020 	return group;
1021 }
1022 
1023 
1024 /*!	Adds the group the given session and makes it publicly accessible.
1025 	The caller must not hold the process group hash lock.
1026 */
1027 void
1028 ProcessGroup::Publish(ProcessSession* session)
1029 {
1030 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1031 	PublishLocked(session);
1032 }
1033 
1034 
1035 /*!	Adds the group to the given session and makes it publicly accessible.
1036 	The caller must hold the process group hash lock.
1037 */
1038 void
1039 ProcessGroup::PublishLocked(ProcessSession* session)
1040 {
1041 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1042 
1043 	fSession = session;
1044 	fSession->AcquireReference();
1045 
1046 	sGroupHash.InsertUnchecked(this);
1047 }
1048 
1049 
1050 /*!	Checks whether the process group is orphaned.
1051 	The caller must hold the group's lock.
1052 	\return \c true, if the group is orphaned, \c false otherwise.
1053 */
1054 bool
1055 ProcessGroup::IsOrphaned() const
1056 {
1057 	// Orphaned Process Group: "A process group in which the parent of every
1058 	// member is either itself a member of the group or is not a member of the
1059 	// group's session." (Open Group Base Specs Issue 7)
1060 	bool orphaned = true;
1061 
1062 	Team* team = teams;
1063 	while (orphaned && team != NULL) {
1064 		team->LockTeamAndParent(false);
1065 
1066 		Team* parent = team->parent;
1067 		if (parent != NULL && parent->group_id != id
1068 			&& parent->session_id == fSession->id) {
1069 			orphaned = false;
1070 		}
1071 
1072 		team->UnlockTeamAndParent();
1073 
1074 		team = team->group_next;
1075 	}
1076 
1077 	return orphaned;
1078 }
1079 
1080 
1081 void
1082 ProcessGroup::ScheduleOrphanedCheck()
1083 {
1084 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1085 
1086 	if (!fInOrphanedCheckList) {
1087 		sOrphanedCheckProcessGroups.Add(this);
1088 		fInOrphanedCheckList = true;
1089 	}
1090 }
1091 
1092 
1093 void
1094 ProcessGroup::UnsetOrphanedCheck()
1095 {
1096 	fInOrphanedCheckList = false;
1097 }
1098 
1099 
1100 //	#pragma mark - ProcessSession
1101 
1102 
1103 ProcessSession::ProcessSession(pid_t id)
1104 	:
1105 	id(id),
1106 	controlling_tty(-1),
1107 	foreground_group(-1)
1108 {
1109 	char lockName[32];
1110 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1111 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1112 }
1113 
1114 
1115 ProcessSession::~ProcessSession()
1116 {
1117 	mutex_destroy(&fLock);
1118 }
1119 
1120 
1121 //	#pragma mark - KDL functions
1122 
1123 
1124 static void
1125 _dump_team_info(Team* team)
1126 {
1127 	kprintf("TEAM: %p\n", team);
1128 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1129 		team->id);
1130 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1131 	kprintf("name:             '%s'\n", team->Name());
1132 	kprintf("args:             '%s'\n", team->Args());
1133 	kprintf("hash_next:        %p\n", team->hash_next);
1134 	kprintf("parent:           %p", team->parent);
1135 	if (team->parent != NULL) {
1136 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1137 	} else
1138 		kprintf("\n");
1139 
1140 	kprintf("children:         %p\n", team->children);
1141 	kprintf("num_threads:      %d\n", team->num_threads);
1142 	kprintf("state:            %d\n", team->state);
1143 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1144 	kprintf("io_context:       %p\n", team->io_context);
1145 	if (team->address_space)
1146 		kprintf("address_space:    %p\n", team->address_space);
1147 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1148 		(void*)team->user_data, team->user_data_area);
1149 	kprintf("free user thread: %p\n", team->free_user_threads);
1150 	kprintf("main_thread:      %p\n", team->main_thread);
1151 	kprintf("thread_list:      %p\n", team->thread_list);
1152 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1153 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1154 }
1155 
1156 
1157 static int
1158 dump_team_info(int argc, char** argv)
1159 {
1160 	ulong arg;
1161 	bool found = false;
1162 
1163 	if (argc < 2) {
1164 		Thread* thread = thread_get_current_thread();
1165 		if (thread != NULL && thread->team != NULL)
1166 			_dump_team_info(thread->team);
1167 		else
1168 			kprintf("No current team!\n");
1169 		return 0;
1170 	}
1171 
1172 	arg = strtoul(argv[1], NULL, 0);
1173 	if (IS_KERNEL_ADDRESS(arg)) {
1174 		// semi-hack
1175 		_dump_team_info((Team*)arg);
1176 		return 0;
1177 	}
1178 
1179 	// walk through the thread list, trying to match name or id
1180 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1181 		Team* team = it.Next();) {
1182 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1183 			|| team->id == (team_id)arg) {
1184 			_dump_team_info(team);
1185 			found = true;
1186 			break;
1187 		}
1188 	}
1189 
1190 	if (!found)
1191 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1192 	return 0;
1193 }
1194 
1195 
1196 static int
1197 dump_teams(int argc, char** argv)
1198 {
1199 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1200 		B_PRINTF_POINTER_WIDTH, "parent");
1201 
1202 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1203 		Team* team = it.Next();) {
1204 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 
1211 //	#pragma mark - Private functions
1212 
1213 
1214 /*!	Inserts team \a team into the child list of team \a parent.
1215 
1216 	The caller must hold the lock of both \a parent and \a team.
1217 
1218 	\param parent The parent team.
1219 	\param team The team to be inserted into \a parent's child list.
1220 */
1221 static void
1222 insert_team_into_parent(Team* parent, Team* team)
1223 {
1224 	ASSERT(parent != NULL);
1225 
1226 	team->siblings_next = parent->children;
1227 	parent->children = team;
1228 	team->parent = parent;
1229 }
1230 
1231 
1232 /*!	Removes team \a team from the child list of team \a parent.
1233 
1234 	The caller must hold the lock of both \a parent and \a team.
1235 
1236 	\param parent The parent team.
1237 	\param team The team to be removed from \a parent's child list.
1238 */
1239 static void
1240 remove_team_from_parent(Team* parent, Team* team)
1241 {
1242 	Team* child;
1243 	Team* last = NULL;
1244 
1245 	for (child = parent->children; child != NULL;
1246 			child = child->siblings_next) {
1247 		if (child == team) {
1248 			if (last == NULL)
1249 				parent->children = child->siblings_next;
1250 			else
1251 				last->siblings_next = child->siblings_next;
1252 
1253 			team->parent = NULL;
1254 			break;
1255 		}
1256 		last = child;
1257 	}
1258 }
1259 
1260 
1261 /*!	Returns whether the given team is a session leader.
1262 	The caller must hold the team's lock or its process group's lock.
1263 */
1264 static bool
1265 is_session_leader(Team* team)
1266 {
1267 	return team->session_id == team->id;
1268 }
1269 
1270 
1271 /*!	Returns whether the given team is a process group leader.
1272 	The caller must hold the team's lock or its process group's lock.
1273 */
1274 static bool
1275 is_process_group_leader(Team* team)
1276 {
1277 	return team->group_id == team->id;
1278 }
1279 
1280 
1281 /*!	Inserts the given team into the given process group.
1282 	The caller must hold the process group's lock, the team's lock, and the
1283 	team's parent's lock.
1284 */
1285 static void
1286 insert_team_into_group(ProcessGroup* group, Team* team)
1287 {
1288 	team->group = group;
1289 	team->group_id = group->id;
1290 	team->session_id = group->Session()->id;
1291 
1292 	team->group_next = group->teams;
1293 	group->teams = team;
1294 	group->AcquireReference();
1295 }
1296 
1297 
1298 /*!	Removes the given team from its process group.
1299 
1300 	The caller must hold the process group's lock, the team's lock, and the
1301 	team's parent's lock. Interrupts must be enabled.
1302 
1303 	\param team The team that'll be removed from its process group.
1304 */
1305 static void
1306 remove_team_from_group(Team* team)
1307 {
1308 	ProcessGroup* group = team->group;
1309 	Team* current;
1310 	Team* last = NULL;
1311 
1312 	// the team must be in a process group to let this function have any effect
1313 	if  (group == NULL)
1314 		return;
1315 
1316 	for (current = group->teams; current != NULL;
1317 			current = current->group_next) {
1318 		if (current == team) {
1319 			if (last == NULL)
1320 				group->teams = current->group_next;
1321 			else
1322 				last->group_next = current->group_next;
1323 
1324 			team->group = NULL;
1325 			break;
1326 		}
1327 		last = current;
1328 	}
1329 
1330 	team->group = NULL;
1331 	team->group_next = NULL;
1332 
1333 	group->ReleaseReference();
1334 }
1335 
1336 
1337 static status_t
1338 create_team_user_data(Team* team, void* exactAddress = NULL)
1339 {
1340 	void* address;
1341 	uint32 addressSpec;
1342 
1343 	if (exactAddress != NULL) {
1344 		address = exactAddress;
1345 		addressSpec = B_EXACT_ADDRESS;
1346 	} else {
1347 		address = (void*)KERNEL_USER_DATA_BASE;
1348 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1349 	}
1350 
1351 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1352 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1353 
1354 	virtual_address_restrictions virtualRestrictions = {};
1355 	if (result == B_OK || exactAddress != NULL) {
1356 		if (exactAddress != NULL)
1357 			virtualRestrictions.address = exactAddress;
1358 		else
1359 			virtualRestrictions.address = address;
1360 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1361 	} else {
1362 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1363 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1364 	}
1365 
1366 	physical_address_restrictions physicalRestrictions = {};
1367 	team->user_data_area = create_area_etc(team->id, "user area",
1368 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1369 		&virtualRestrictions, &physicalRestrictions, &address);
1370 	if (team->user_data_area < 0)
1371 		return team->user_data_area;
1372 
1373 	team->user_data = (addr_t)address;
1374 	team->used_user_data = 0;
1375 	team->user_data_size = kTeamUserDataInitialSize;
1376 	team->free_user_threads = NULL;
1377 
1378 	return B_OK;
1379 }
1380 
1381 
1382 static void
1383 delete_team_user_data(Team* team)
1384 {
1385 	if (team->user_data_area >= 0) {
1386 		vm_delete_area(team->id, team->user_data_area, true);
1387 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1388 			kTeamUserDataReservedSize);
1389 
1390 		team->user_data = 0;
1391 		team->used_user_data = 0;
1392 		team->user_data_size = 0;
1393 		team->user_data_area = -1;
1394 		while (free_user_thread* entry = team->free_user_threads) {
1395 			team->free_user_threads = entry->next;
1396 			free(entry);
1397 		}
1398 	}
1399 }
1400 
1401 
1402 static status_t
1403 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1404 	int32 argCount, int32 envCount, char**& _flatArgs)
1405 {
1406 	if (argCount < 0 || envCount < 0)
1407 		return B_BAD_VALUE;
1408 
1409 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1410 		return B_TOO_MANY_ARGS;
1411 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1412 		return B_BAD_VALUE;
1413 
1414 	if (!IS_USER_ADDRESS(userFlatArgs))
1415 		return B_BAD_ADDRESS;
1416 
1417 	// allocate kernel memory
1418 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1419 	if (flatArgs == NULL)
1420 		return B_NO_MEMORY;
1421 
1422 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1423 		free(flatArgs);
1424 		return B_BAD_ADDRESS;
1425 	}
1426 
1427 	// check and relocate the array
1428 	status_t error = B_OK;
1429 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1430 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1431 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1432 		if (i == argCount || i == argCount + envCount + 1) {
1433 			// check array null termination
1434 			if (flatArgs[i] != NULL) {
1435 				error = B_BAD_VALUE;
1436 				break;
1437 			}
1438 		} else {
1439 			// check string
1440 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1441 			size_t maxLen = stringEnd - arg;
1442 			if (arg < stringBase || arg >= stringEnd
1443 					|| strnlen(arg, maxLen) == maxLen) {
1444 				error = B_BAD_VALUE;
1445 				break;
1446 			}
1447 
1448 			flatArgs[i] = arg;
1449 		}
1450 	}
1451 
1452 	if (error == B_OK)
1453 		_flatArgs = flatArgs;
1454 	else
1455 		free(flatArgs);
1456 
1457 	return error;
1458 }
1459 
1460 
1461 static void
1462 free_team_arg(struct team_arg* teamArg)
1463 {
1464 	if (teamArg != NULL) {
1465 		free(teamArg->flat_args);
1466 		free(teamArg->path);
1467 		free(teamArg);
1468 	}
1469 }
1470 
1471 
1472 static status_t
1473 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1474 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1475 	port_id port, uint32 token)
1476 {
1477 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1478 	if (teamArg == NULL)
1479 		return B_NO_MEMORY;
1480 
1481 	teamArg->path = strdup(path);
1482 	if (teamArg->path == NULL) {
1483 		free(teamArg);
1484 		return B_NO_MEMORY;
1485 	}
1486 
1487 	// copy the args over
1488 	teamArg->flat_args = flatArgs;
1489 	teamArg->flat_args_size = flatArgsSize;
1490 	teamArg->arg_count = argCount;
1491 	teamArg->env_count = envCount;
1492 	teamArg->flags = 0;
1493 	teamArg->umask = umask;
1494 	teamArg->error_port = port;
1495 	teamArg->error_token = token;
1496 
1497 	// determine the flags from the environment
1498 	const char* const* env = flatArgs + argCount + 1;
1499 	for (int32 i = 0; i < envCount; i++) {
1500 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1501 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1502 			break;
1503 		}
1504 	}
1505 
1506 	*_teamArg = teamArg;
1507 	return B_OK;
1508 }
1509 
1510 
1511 static status_t
1512 team_create_thread_start_internal(void* args)
1513 {
1514 	status_t err;
1515 	Thread* thread;
1516 	Team* team;
1517 	struct team_arg* teamArgs = (struct team_arg*)args;
1518 	const char* path;
1519 	addr_t entry;
1520 	char** userArgs;
1521 	char** userEnv;
1522 	struct user_space_program_args* programArgs;
1523 	uint32 argCount, envCount;
1524 
1525 	thread = thread_get_current_thread();
1526 	team = thread->team;
1527 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1528 
1529 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1530 		thread->id));
1531 
1532 	// Main stack area layout is currently as follows (starting from 0):
1533 	//
1534 	// size								| usage
1535 	// ---------------------------------+--------------------------------
1536 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1537 	// TLS_SIZE							| TLS data
1538 	// sizeof(user_space_program_args)	| argument structure for the runtime
1539 	//									| loader
1540 	// flat arguments size				| flat process arguments and environment
1541 
1542 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1543 	// the heap
1544 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1545 
1546 	argCount = teamArgs->arg_count;
1547 	envCount = teamArgs->env_count;
1548 
1549 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1550 		+ thread->user_stack_size + TLS_SIZE);
1551 
1552 	userArgs = (char**)(programArgs + 1);
1553 	userEnv = userArgs + argCount + 1;
1554 	path = teamArgs->path;
1555 
1556 	if (user_strlcpy(programArgs->program_path, path,
1557 				sizeof(programArgs->program_path)) < B_OK
1558 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1559 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1560 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1561 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1562 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1563 				sizeof(port_id)) < B_OK
1564 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1565 				sizeof(uint32)) < B_OK
1566 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1567 		|| user_memcpy(userArgs, teamArgs->flat_args,
1568 				teamArgs->flat_args_size) < B_OK) {
1569 		// the team deletion process will clean this mess
1570 		free_team_arg(teamArgs);
1571 		return B_BAD_ADDRESS;
1572 	}
1573 
1574 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1575 
1576 	// set team args and update state
1577 	team->Lock();
1578 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1579 	team->state = TEAM_STATE_NORMAL;
1580 	team->Unlock();
1581 
1582 	free_team_arg(teamArgs);
1583 		// the arguments are already on the user stack, we no longer need
1584 		// them in this form
1585 
1586 	// Clone commpage area
1587 	area_id commPageArea = clone_commpage_area(team->id,
1588 		&team->commpage_address);
1589 	if (commPageArea  < B_OK) {
1590 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1591 			strerror(commPageArea)));
1592 		return commPageArea;
1593 	}
1594 
1595 	// Register commpage image
1596 	image_id commPageImage = get_commpage_image();
1597 	image_info imageInfo;
1598 	err = get_image_info(commPageImage, &imageInfo);
1599 	if (err != B_OK) {
1600 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1601 			strerror(err)));
1602 		return err;
1603 	}
1604 	imageInfo.text = team->commpage_address;
1605 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1606 	if (image < 0) {
1607 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1608 			strerror(image)));
1609 		return image;
1610 	}
1611 
1612 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1613 	// automatic variables with function scope will never be destroyed.
1614 	{
1615 		// find runtime_loader path
1616 		KPath runtimeLoaderPath;
1617 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1618 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1619 		if (err < B_OK) {
1620 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1621 				strerror(err)));
1622 			return err;
1623 		}
1624 		runtimeLoaderPath.UnlockBuffer();
1625 		err = runtimeLoaderPath.Append("runtime_loader");
1626 
1627 		if (err == B_OK) {
1628 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1629 				&entry);
1630 		}
1631 	}
1632 
1633 	if (err < B_OK) {
1634 		// Luckily, we don't have to clean up the mess we created - that's
1635 		// done for us by the normal team deletion process
1636 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1637 			"%s\n", strerror(err)));
1638 		return err;
1639 	}
1640 
1641 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1642 
1643 	// enter userspace -- returns only in case of error
1644 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1645 		programArgs, team->commpage_address);
1646 }
1647 
1648 
1649 static status_t
1650 team_create_thread_start(void* args)
1651 {
1652 	team_create_thread_start_internal(args);
1653 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1654 	thread_exit();
1655 		// does not return
1656 	return B_OK;
1657 }
1658 
1659 
1660 static thread_id
1661 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1662 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1663 	port_id errorPort, uint32 errorToken)
1664 {
1665 	char** flatArgs = _flatArgs;
1666 	thread_id thread;
1667 	status_t status;
1668 	struct team_arg* teamArgs;
1669 	struct team_loading_info loadingInfo;
1670 	io_context* parentIOContext = NULL;
1671 	team_id teamID;
1672 
1673 	if (flatArgs == NULL || argCount == 0)
1674 		return B_BAD_VALUE;
1675 
1676 	const char* path = flatArgs[0];
1677 
1678 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1679 		"\n", path, flatArgs, argCount));
1680 
1681 	// cut the path from the main thread name
1682 	const char* threadName = strrchr(path, '/');
1683 	if (threadName != NULL)
1684 		threadName++;
1685 	else
1686 		threadName = path;
1687 
1688 	// create the main thread object
1689 	Thread* mainThread;
1690 	status = Thread::Create(threadName, mainThread);
1691 	if (status != B_OK)
1692 		return status;
1693 	BReference<Thread> mainThreadReference(mainThread, true);
1694 
1695 	// create team object
1696 	Team* team = Team::Create(mainThread->id, path, false);
1697 	if (team == NULL)
1698 		return B_NO_MEMORY;
1699 	BReference<Team> teamReference(team, true);
1700 
1701 	if (flags & B_WAIT_TILL_LOADED) {
1702 		loadingInfo.thread = thread_get_current_thread();
1703 		loadingInfo.result = B_ERROR;
1704 		loadingInfo.done = false;
1705 		team->loading_info = &loadingInfo;
1706 	}
1707 
1708 	// get the parent team
1709 	Team* parent = Team::Get(parentID);
1710 	if (parent == NULL)
1711 		return B_BAD_TEAM_ID;
1712 	BReference<Team> parentReference(parent, true);
1713 
1714 	parent->LockTeamAndProcessGroup();
1715 	team->Lock();
1716 
1717 	// inherit the parent's user/group
1718 	inherit_parent_user_and_group(team, parent);
1719 
1720  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1721 
1722 	sTeamHash.Insert(team);
1723 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1724 	if (!teamLimitReached)
1725 		sUsedTeams++;
1726 
1727 	teamsLocker.Unlock();
1728 
1729 	insert_team_into_parent(parent, team);
1730 	insert_team_into_group(parent->group, team);
1731 
1732 	// get a reference to the parent's I/O context -- we need it to create ours
1733 	parentIOContext = parent->io_context;
1734 	vfs_get_io_context(parentIOContext);
1735 
1736 	team->Unlock();
1737 	parent->UnlockTeamAndProcessGroup();
1738 
1739 	// notify team listeners
1740 	sNotificationService.Notify(TEAM_ADDED, team);
1741 
1742 	// check the executable's set-user/group-id permission
1743 	update_set_id_user_and_group(team, path);
1744 
1745 	if (teamLimitReached) {
1746 		status = B_NO_MORE_TEAMS;
1747 		goto err1;
1748 	}
1749 
1750 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1751 		envCount, (mode_t)-1, errorPort, errorToken);
1752 	if (status != B_OK)
1753 		goto err1;
1754 
1755 	_flatArgs = NULL;
1756 		// args are owned by the team_arg structure now
1757 
1758 	// create a new io_context for this team
1759 	team->io_context = vfs_new_io_context(parentIOContext, true);
1760 	if (!team->io_context) {
1761 		status = B_NO_MEMORY;
1762 		goto err2;
1763 	}
1764 
1765 	// We don't need the parent's I/O context any longer.
1766 	vfs_put_io_context(parentIOContext);
1767 	parentIOContext = NULL;
1768 
1769 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1770 	vfs_exec_io_context(team->io_context);
1771 
1772 	// create an address space for this team
1773 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1774 		&team->address_space);
1775 	if (status != B_OK)
1776 		goto err2;
1777 
1778 	team->address_space->SetRandomizingEnabled(
1779 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1780 
1781 	// create the user data area
1782 	status = create_team_user_data(team);
1783 	if (status != B_OK)
1784 		goto err4;
1785 
1786 	// In case we start the main thread, we shouldn't access the team object
1787 	// afterwards, so cache the team's ID.
1788 	teamID = team->id;
1789 
1790 	// Create a kernel thread, but under the context of the new team
1791 	// The new thread will take over ownership of teamArgs.
1792 	{
1793 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1794 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1795 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1796 			+ teamArgs->flat_args_size;
1797 		thread = thread_create_thread(threadAttributes, false);
1798 		if (thread < 0) {
1799 			status = thread;
1800 			goto err5;
1801 		}
1802 	}
1803 
1804 	// The team has been created successfully, so we keep the reference. Or
1805 	// more precisely: It's owned by the team's main thread, now.
1806 	teamReference.Detach();
1807 
1808 	// wait for the loader of the new team to finish its work
1809 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1810 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1811 
1812 		// resume the team's main thread
1813 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1814 			scheduler_enqueue_in_run_queue(mainThread);
1815 
1816 		// Now suspend ourselves until loading is finished. We will be woken
1817 		// either by the thread, when it finished or aborted loading, or when
1818 		// the team is going to die (e.g. is killed). In either case the one
1819 		// setting `loadingInfo.done' is responsible for removing the info from
1820 		// the team structure.
1821 		while (!loadingInfo.done) {
1822 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1823 			scheduler_reschedule();
1824 		}
1825 
1826 		schedulerLocker.Unlock();
1827 
1828 		if (loadingInfo.result < B_OK)
1829 			return loadingInfo.result;
1830 	}
1831 
1832 	// notify the debugger
1833 	user_debug_team_created(teamID);
1834 
1835 	return thread;
1836 
1837 err5:
1838 	delete_team_user_data(team);
1839 err4:
1840 	team->address_space->Put();
1841 err2:
1842 	free_team_arg(teamArgs);
1843 err1:
1844 	if (parentIOContext != NULL)
1845 		vfs_put_io_context(parentIOContext);
1846 
1847 	// Remove the team structure from the process group, the parent team, and
1848 	// the team hash table and delete the team structure.
1849 	parent->LockTeamAndProcessGroup();
1850 	team->Lock();
1851 
1852 	remove_team_from_group(team);
1853 	remove_team_from_parent(team->parent, team);
1854 
1855 	team->Unlock();
1856 	parent->UnlockTeamAndProcessGroup();
1857 
1858 	teamsLocker.Lock();
1859 	sTeamHash.Remove(team);
1860 	if (!teamLimitReached)
1861 		sUsedTeams--;
1862 	teamsLocker.Unlock();
1863 
1864 	sNotificationService.Notify(TEAM_REMOVED, team);
1865 
1866 	return status;
1867 }
1868 
1869 
1870 /*!	Almost shuts down the current team and loads a new image into it.
1871 	If successful, this function does not return and will takeover ownership of
1872 	the arguments provided.
1873 	This function may only be called in a userland team (caused by one of the
1874 	exec*() syscalls).
1875 */
1876 static status_t
1877 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1878 	int32 argCount, int32 envCount, mode_t umask)
1879 {
1880 	// NOTE: Since this function normally doesn't return, don't use automatic
1881 	// variables that need destruction in the function scope.
1882 	char** flatArgs = _flatArgs;
1883 	Team* team = thread_get_current_thread()->team;
1884 	struct team_arg* teamArgs;
1885 	const char* threadName;
1886 	thread_id nubThreadID = -1;
1887 
1888 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1889 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1890 		team->id));
1891 
1892 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1893 
1894 	// switching the kernel at run time is probably not a good idea :)
1895 	if (team == team_get_kernel_team())
1896 		return B_NOT_ALLOWED;
1897 
1898 	// we currently need to be single threaded here
1899 	// TODO: maybe we should just kill all other threads and
1900 	//	make the current thread the team's main thread?
1901 	Thread* currentThread = thread_get_current_thread();
1902 	if (currentThread != team->main_thread)
1903 		return B_NOT_ALLOWED;
1904 
1905 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1906 	// We iterate through the thread list to make sure that there's no other
1907 	// thread.
1908 	TeamLocker teamLocker(team);
1909 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1910 
1911 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1912 		nubThreadID = team->debug_info.nub_thread;
1913 
1914 	debugInfoLocker.Unlock();
1915 
1916 	for (Thread* thread = team->thread_list; thread != NULL;
1917 			thread = thread->team_next) {
1918 		if (thread != team->main_thread && thread->id != nubThreadID)
1919 			return B_NOT_ALLOWED;
1920 	}
1921 
1922 	team->DeleteUserTimers(true);
1923 	team->ResetSignalsOnExec();
1924 
1925 	teamLocker.Unlock();
1926 
1927 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1928 		argCount, envCount, umask, -1, 0);
1929 	if (status != B_OK)
1930 		return status;
1931 
1932 	_flatArgs = NULL;
1933 		// args are owned by the team_arg structure now
1934 
1935 	// TODO: remove team resources if there are any left
1936 	// thread_atkernel_exit() might not be called at all
1937 
1938 	thread_reset_for_exec();
1939 
1940 	user_debug_prepare_for_exec();
1941 
1942 	delete_team_user_data(team);
1943 	vm_delete_areas(team->address_space, false);
1944 	xsi_sem_undo(team);
1945 	delete_owned_ports(team);
1946 	sem_delete_owned_sems(team);
1947 	remove_images(team);
1948 	vfs_exec_io_context(team->io_context);
1949 	delete_realtime_sem_context(team->realtime_sem_context);
1950 	team->realtime_sem_context = NULL;
1951 
1952 	// update ASLR
1953 	team->address_space->SetRandomizingEnabled(
1954 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1955 
1956 	status = create_team_user_data(team);
1957 	if (status != B_OK) {
1958 		// creating the user data failed -- we're toast
1959 		free_team_arg(teamArgs);
1960 		exit_thread(status);
1961 		return status;
1962 	}
1963 
1964 	user_debug_finish_after_exec();
1965 
1966 	// rename the team
1967 
1968 	team->Lock();
1969 	team->SetName(path);
1970 	team->Unlock();
1971 
1972 	// cut the path from the team name and rename the main thread, too
1973 	threadName = strrchr(path, '/');
1974 	if (threadName != NULL)
1975 		threadName++;
1976 	else
1977 		threadName = path;
1978 	rename_thread(thread_get_current_thread_id(), threadName);
1979 
1980 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1981 
1982 	// Update user/group according to the executable's set-user/group-id
1983 	// permission.
1984 	update_set_id_user_and_group(team, path);
1985 
1986 	user_debug_team_exec();
1987 
1988 	// notify team listeners
1989 	sNotificationService.Notify(TEAM_EXEC, team);
1990 
1991 	// get a user thread for the thread
1992 	user_thread* userThread = team_allocate_user_thread(team);
1993 		// cannot fail (the allocation for the team would have failed already)
1994 	ThreadLocker currentThreadLocker(currentThread);
1995 	currentThread->user_thread = userThread;
1996 	currentThreadLocker.Unlock();
1997 
1998 	// create the user stack for the thread
1999 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2000 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2001 	if (status == B_OK) {
2002 		// prepare the stack, load the runtime loader, and enter userspace
2003 		team_create_thread_start(teamArgs);
2004 			// does never return
2005 	} else
2006 		free_team_arg(teamArgs);
2007 
2008 	// Sorry, we have to kill ourselves, there is no way out anymore
2009 	// (without any areas left and all that).
2010 	exit_thread(status);
2011 
2012 	// We return a status here since the signal that is sent by the
2013 	// call above is not immediately handled.
2014 	return B_ERROR;
2015 }
2016 
2017 
2018 static thread_id
2019 fork_team(void)
2020 {
2021 	Thread* parentThread = thread_get_current_thread();
2022 	Team* parentTeam = parentThread->team;
2023 	Team* team;
2024 	arch_fork_arg* forkArgs;
2025 	struct area_info info;
2026 	thread_id threadID;
2027 	status_t status;
2028 	ssize_t areaCookie;
2029 	int32 imageCookie;
2030 
2031 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2032 
2033 	if (parentTeam == team_get_kernel_team())
2034 		return B_NOT_ALLOWED;
2035 
2036 	// create a new team
2037 	// TODO: this is very similar to load_image_internal() - maybe we can do
2038 	// something about it :)
2039 
2040 	// create the main thread object
2041 	Thread* thread;
2042 	status = Thread::Create(parentThread->name, thread);
2043 	if (status != B_OK)
2044 		return status;
2045 	BReference<Thread> threadReference(thread, true);
2046 
2047 	// create the team object
2048 	team = Team::Create(thread->id, NULL, false);
2049 	if (team == NULL)
2050 		return B_NO_MEMORY;
2051 
2052 	parentTeam->LockTeamAndProcessGroup();
2053 	team->Lock();
2054 
2055 	team->SetName(parentTeam->Name());
2056 	team->SetArgs(parentTeam->Args());
2057 
2058 	team->commpage_address = parentTeam->commpage_address;
2059 
2060 	// Inherit the parent's user/group.
2061 	inherit_parent_user_and_group(team, parentTeam);
2062 
2063 	// inherit signal handlers
2064 	team->InheritSignalActions(parentTeam);
2065 
2066 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2067 
2068 	sTeamHash.Insert(team);
2069 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2070 	if (!teamLimitReached)
2071 		sUsedTeams++;
2072 
2073 	teamsLocker.Unlock();
2074 
2075 	insert_team_into_parent(parentTeam, team);
2076 	insert_team_into_group(parentTeam->group, team);
2077 
2078 	team->Unlock();
2079 	parentTeam->UnlockTeamAndProcessGroup();
2080 
2081 	// notify team listeners
2082 	sNotificationService.Notify(TEAM_ADDED, team);
2083 
2084 	// inherit some team debug flags
2085 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2086 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2087 
2088 	if (teamLimitReached) {
2089 		status = B_NO_MORE_TEAMS;
2090 		goto err1;
2091 	}
2092 
2093 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2094 	if (forkArgs == NULL) {
2095 		status = B_NO_MEMORY;
2096 		goto err1;
2097 	}
2098 
2099 	// create a new io_context for this team
2100 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2101 	if (!team->io_context) {
2102 		status = B_NO_MEMORY;
2103 		goto err2;
2104 	}
2105 
2106 	// duplicate the realtime sem context
2107 	if (parentTeam->realtime_sem_context) {
2108 		team->realtime_sem_context = clone_realtime_sem_context(
2109 			parentTeam->realtime_sem_context);
2110 		if (team->realtime_sem_context == NULL) {
2111 			status = B_NO_MEMORY;
2112 			goto err2;
2113 		}
2114 	}
2115 
2116 	// create an address space for this team
2117 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2118 		&team->address_space);
2119 	if (status < B_OK)
2120 		goto err3;
2121 
2122 	// copy all areas of the team
2123 	// TODO: should be able to handle stack areas differently (ie. don't have
2124 	// them copy-on-write)
2125 
2126 	areaCookie = 0;
2127 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2128 		if (info.area == parentTeam->user_data_area) {
2129 			// don't clone the user area; just create a new one
2130 			status = create_team_user_data(team, info.address);
2131 			if (status != B_OK)
2132 				break;
2133 
2134 			thread->user_thread = team_allocate_user_thread(team);
2135 		} else {
2136 			void* address;
2137 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2138 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2139 			if (area < B_OK) {
2140 				status = area;
2141 				break;
2142 			}
2143 
2144 			if (info.area == parentThread->user_stack_area)
2145 				thread->user_stack_area = area;
2146 		}
2147 	}
2148 
2149 	if (status < B_OK)
2150 		goto err4;
2151 
2152 	if (thread->user_thread == NULL) {
2153 #if KDEBUG
2154 		panic("user data area not found, parent area is %" B_PRId32,
2155 			parentTeam->user_data_area);
2156 #endif
2157 		status = B_ERROR;
2158 		goto err4;
2159 	}
2160 
2161 	thread->user_stack_base = parentThread->user_stack_base;
2162 	thread->user_stack_size = parentThread->user_stack_size;
2163 	thread->user_local_storage = parentThread->user_local_storage;
2164 	thread->sig_block_mask = parentThread->sig_block_mask;
2165 	thread->signal_stack_base = parentThread->signal_stack_base;
2166 	thread->signal_stack_size = parentThread->signal_stack_size;
2167 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2168 
2169 	arch_store_fork_frame(forkArgs);
2170 
2171 	// copy image list
2172 	image_info imageInfo;
2173 	imageCookie = 0;
2174 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2175 			== B_OK) {
2176 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2177 		if (image < 0)
2178 			goto err5;
2179 	}
2180 
2181 	// create the main thread
2182 	{
2183 		ThreadCreationAttributes threadCreationAttributes(NULL,
2184 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2185 		threadCreationAttributes.forkArgs = forkArgs;
2186 		threadID = thread_create_thread(threadCreationAttributes, false);
2187 		if (threadID < 0) {
2188 			status = threadID;
2189 			goto err5;
2190 		}
2191 	}
2192 
2193 	// notify the debugger
2194 	user_debug_team_created(team->id);
2195 
2196 	T(TeamForked(threadID));
2197 
2198 	resume_thread(threadID);
2199 	return threadID;
2200 
2201 err5:
2202 	remove_images(team);
2203 err4:
2204 	team->address_space->RemoveAndPut();
2205 err3:
2206 	delete_realtime_sem_context(team->realtime_sem_context);
2207 err2:
2208 	free(forkArgs);
2209 err1:
2210 	// Remove the team structure from the process group, the parent team, and
2211 	// the team hash table and delete the team structure.
2212 	parentTeam->LockTeamAndProcessGroup();
2213 	team->Lock();
2214 
2215 	remove_team_from_group(team);
2216 	remove_team_from_parent(team->parent, team);
2217 
2218 	team->Unlock();
2219 	parentTeam->UnlockTeamAndProcessGroup();
2220 
2221 	teamsLocker.Lock();
2222 	sTeamHash.Remove(team);
2223 	if (!teamLimitReached)
2224 		sUsedTeams--;
2225 	teamsLocker.Unlock();
2226 
2227 	sNotificationService.Notify(TEAM_REMOVED, team);
2228 
2229 	team->ReleaseReference();
2230 
2231 	return status;
2232 }
2233 
2234 
2235 /*!	Returns if the specified team \a parent has any children belonging to the
2236 	process group with the specified ID \a groupID.
2237 	The caller must hold \a parent's lock.
2238 */
2239 static bool
2240 has_children_in_group(Team* parent, pid_t groupID)
2241 {
2242 	for (Team* child = parent->children; child != NULL;
2243 			child = child->siblings_next) {
2244 		TeamLocker childLocker(child);
2245 		if (child->group_id == groupID)
2246 			return true;
2247 	}
2248 
2249 	return false;
2250 }
2251 
2252 
2253 /*!	Returns the first job control entry from \a children, which matches \a id.
2254 	\a id can be:
2255 	- \code > 0 \endcode: Matching an entry with that team ID.
2256 	- \code == -1 \endcode: Matching any entry.
2257 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2258 	\c 0 is an invalid value for \a id.
2259 
2260 	The caller must hold the lock of the team that \a children belongs to.
2261 
2262 	\param children The job control entry list to check.
2263 	\param id The match criterion.
2264 	\return The first matching entry or \c NULL, if none matches.
2265 */
2266 static job_control_entry*
2267 get_job_control_entry(team_job_control_children& children, pid_t id)
2268 {
2269 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2270 		 job_control_entry* entry = it.Next();) {
2271 
2272 		if (id > 0) {
2273 			if (entry->thread == id)
2274 				return entry;
2275 		} else if (id == -1) {
2276 			return entry;
2277 		} else {
2278 			pid_t processGroup
2279 				= (entry->team ? entry->team->group_id : entry->group_id);
2280 			if (processGroup == -id)
2281 				return entry;
2282 		}
2283 	}
2284 
2285 	return NULL;
2286 }
2287 
2288 
2289 /*!	Returns the first job control entry from one of team's dead, continued, or
2290     stopped children which matches \a id.
2291 	\a id can be:
2292 	- \code > 0 \endcode: Matching an entry with that team ID.
2293 	- \code == -1 \endcode: Matching any entry.
2294 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2295 	\c 0 is an invalid value for \a id.
2296 
2297 	The caller must hold \a team's lock.
2298 
2299 	\param team The team whose dead, stopped, and continued child lists shall be
2300 		checked.
2301 	\param id The match criterion.
2302 	\param flags Specifies which children shall be considered. Dead children
2303 		always are. Stopped children are considered when \a flags is ORed
2304 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2305 		bitwise with \c WCONTINUED.
2306 	\return The first matching entry or \c NULL, if none matches.
2307 */
2308 static job_control_entry*
2309 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2310 {
2311 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2312 
2313 	if (entry == NULL && (flags & WCONTINUED) != 0)
2314 		entry = get_job_control_entry(team->continued_children, id);
2315 
2316 	if (entry == NULL && (flags & WUNTRACED) != 0)
2317 		entry = get_job_control_entry(team->stopped_children, id);
2318 
2319 	return entry;
2320 }
2321 
2322 
2323 job_control_entry::job_control_entry()
2324 	:
2325 	has_group_ref(false)
2326 {
2327 }
2328 
2329 
2330 job_control_entry::~job_control_entry()
2331 {
2332 	if (has_group_ref) {
2333 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2334 
2335 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2336 		if (group == NULL) {
2337 			panic("job_control_entry::~job_control_entry(): unknown group "
2338 				"ID: %" B_PRId32, group_id);
2339 			return;
2340 		}
2341 
2342 		groupHashLocker.Unlock();
2343 
2344 		group->ReleaseReference();
2345 	}
2346 }
2347 
2348 
2349 /*!	Invoked when the owning team is dying, initializing the entry according to
2350 	the dead state.
2351 
2352 	The caller must hold the owning team's lock and the scheduler lock.
2353 */
2354 void
2355 job_control_entry::InitDeadState()
2356 {
2357 	if (team != NULL) {
2358 		ASSERT(team->exit.initialized);
2359 
2360 		group_id = team->group_id;
2361 		team->group->AcquireReference();
2362 		has_group_ref = true;
2363 
2364 		thread = team->id;
2365 		status = team->exit.status;
2366 		reason = team->exit.reason;
2367 		signal = team->exit.signal;
2368 		signaling_user = team->exit.signaling_user;
2369 
2370 		team = NULL;
2371 	}
2372 }
2373 
2374 
2375 job_control_entry&
2376 job_control_entry::operator=(const job_control_entry& other)
2377 {
2378 	state = other.state;
2379 	thread = other.thread;
2380 	signal = other.signal;
2381 	has_group_ref = false;
2382 	signaling_user = other.signaling_user;
2383 	team = other.team;
2384 	group_id = other.group_id;
2385 	status = other.status;
2386 	reason = other.reason;
2387 
2388 	return *this;
2389 }
2390 
2391 
2392 /*! This is the kernel backend for waitid().
2393 */
2394 static thread_id
2395 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2396 {
2397 	Thread* thread = thread_get_current_thread();
2398 	Team* team = thread->team;
2399 	struct job_control_entry foundEntry;
2400 	struct job_control_entry* freeDeathEntry = NULL;
2401 	status_t status = B_OK;
2402 
2403 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2404 		child, flags));
2405 
2406 	T(WaitForChild(child, flags));
2407 
2408 	pid_t originalChild = child;
2409 
2410 	bool ignoreFoundEntries = false;
2411 	bool ignoreFoundEntriesChecked = false;
2412 
2413 	while (true) {
2414 		// lock the team
2415 		TeamLocker teamLocker(team);
2416 
2417 		// A 0 child argument means to wait for all children in the process
2418 		// group of the calling team.
2419 		child = originalChild == 0 ? -team->group_id : originalChild;
2420 
2421 		// check whether any condition holds
2422 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2423 
2424 		// If we don't have an entry yet, check whether there are any children
2425 		// complying to the process group specification at all.
2426 		if (entry == NULL) {
2427 			// No success yet -- check whether there are any children complying
2428 			// to the process group specification at all.
2429 			bool childrenExist = false;
2430 			if (child == -1) {
2431 				childrenExist = team->children != NULL;
2432 			} else if (child < -1) {
2433 				childrenExist = has_children_in_group(team, -child);
2434 			} else {
2435 				if (Team* childTeam = Team::Get(child)) {
2436 					BReference<Team> childTeamReference(childTeam, true);
2437 					TeamLocker childTeamLocker(childTeam);
2438 					childrenExist = childTeam->parent == team;
2439 				}
2440 			}
2441 
2442 			if (!childrenExist) {
2443 				// there is no child we could wait for
2444 				status = ECHILD;
2445 			} else {
2446 				// the children we're waiting for are still running
2447 				status = B_WOULD_BLOCK;
2448 			}
2449 		} else {
2450 			// got something
2451 			foundEntry = *entry;
2452 
2453 			// unless WNOWAIT has been specified, "consume" the wait state
2454 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2455 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2456 					// The child is dead. Reap its death entry.
2457 					freeDeathEntry = entry;
2458 					team->dead_children.entries.Remove(entry);
2459 					team->dead_children.count--;
2460 				} else {
2461 					// The child is well. Reset its job control state.
2462 					team_set_job_control_state(entry->team,
2463 						JOB_CONTROL_STATE_NONE, NULL, false);
2464 				}
2465 			}
2466 		}
2467 
2468 		// If we haven't got anything yet, prepare for waiting for the
2469 		// condition variable.
2470 		ConditionVariableEntry deadWaitEntry;
2471 
2472 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2473 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2474 
2475 		teamLocker.Unlock();
2476 
2477 		// we got our entry and can return to our caller
2478 		if (status == B_OK) {
2479 			if (ignoreFoundEntries) {
2480 				// ... unless we shall ignore found entries
2481 				delete freeDeathEntry;
2482 				freeDeathEntry = NULL;
2483 				continue;
2484 			}
2485 
2486 			break;
2487 		}
2488 
2489 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2490 			T(WaitForChildDone(status));
2491 			return status;
2492 		}
2493 
2494 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2495 		if (status == B_INTERRUPTED) {
2496 			T(WaitForChildDone(status));
2497 			return status;
2498 		}
2499 
2500 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2501 		// all our children are dead and fail with ECHILD. We check the
2502 		// condition at this point.
2503 		if (!ignoreFoundEntriesChecked) {
2504 			teamLocker.Lock();
2505 
2506 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2507 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2508 				|| handler.sa_handler == SIG_IGN) {
2509 				ignoreFoundEntries = true;
2510 			}
2511 
2512 			teamLocker.Unlock();
2513 
2514 			ignoreFoundEntriesChecked = true;
2515 		}
2516 	}
2517 
2518 	delete freeDeathEntry;
2519 
2520 	// When we got here, we have a valid death entry, and already got
2521 	// unregistered from the team or group. Fill in the returned info.
2522 	memset(&_info, 0, sizeof(_info));
2523 	_info.si_signo = SIGCHLD;
2524 	_info.si_pid = foundEntry.thread;
2525 	_info.si_uid = foundEntry.signaling_user;
2526 	// TODO: Fill in si_errno?
2527 
2528 	switch (foundEntry.state) {
2529 		case JOB_CONTROL_STATE_DEAD:
2530 			_info.si_code = foundEntry.reason;
2531 			_info.si_status = foundEntry.reason == CLD_EXITED
2532 				? foundEntry.status : foundEntry.signal;
2533 			break;
2534 		case JOB_CONTROL_STATE_STOPPED:
2535 			_info.si_code = CLD_STOPPED;
2536 			_info.si_status = foundEntry.signal;
2537 			break;
2538 		case JOB_CONTROL_STATE_CONTINUED:
2539 			_info.si_code = CLD_CONTINUED;
2540 			_info.si_status = 0;
2541 			break;
2542 		case JOB_CONTROL_STATE_NONE:
2543 			// can't happen
2544 			break;
2545 	}
2546 
2547 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2548 	// status is available.
2549 	TeamLocker teamLocker(team);
2550 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2551 
2552 	if (is_team_signal_blocked(team, SIGCHLD)) {
2553 		if (get_job_control_entry(team, child, flags) == NULL)
2554 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2555 	}
2556 
2557 	schedulerLocker.Unlock();
2558 	teamLocker.Unlock();
2559 
2560 	// When the team is dead, the main thread continues to live in the kernel
2561 	// team for a very short time. To avoid surprises for the caller we rather
2562 	// wait until the thread is really gone.
2563 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2564 		wait_for_thread(foundEntry.thread, NULL);
2565 
2566 	T(WaitForChildDone(foundEntry));
2567 
2568 	return foundEntry.thread;
2569 }
2570 
2571 
2572 /*! Fills the team_info structure with information from the specified team.
2573 	Interrupts must be enabled. The team must not be locked.
2574 */
2575 static status_t
2576 fill_team_info(Team* team, team_info* info, size_t size)
2577 {
2578 	if (size != sizeof(team_info))
2579 		return B_BAD_VALUE;
2580 
2581 	// TODO: Set more informations for team_info
2582 	memset(info, 0, size);
2583 
2584 	info->team = team->id;
2585 		// immutable
2586 	info->image_count = count_images(team);
2587 		// protected by sImageMutex
2588 
2589 	TeamLocker teamLocker(team);
2590 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2591 
2592 	info->thread_count = team->num_threads;
2593 	//info->area_count =
2594 	info->debugger_nub_thread = team->debug_info.nub_thread;
2595 	info->debugger_nub_port = team->debug_info.nub_port;
2596 	info->uid = team->effective_uid;
2597 	info->gid = team->effective_gid;
2598 
2599 	strlcpy(info->args, team->Args(), sizeof(info->args));
2600 	info->argc = 1;
2601 
2602 	return B_OK;
2603 }
2604 
2605 
2606 /*!	Returns whether the process group contains stopped processes.
2607 	The caller must hold the process group's lock.
2608 */
2609 static bool
2610 process_group_has_stopped_processes(ProcessGroup* group)
2611 {
2612 	Team* team = group->teams;
2613 	while (team != NULL) {
2614 		// the parent team's lock guards the job control entry -- acquire it
2615 		team->LockTeamAndParent(false);
2616 
2617 		if (team->job_control_entry != NULL
2618 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2619 			team->UnlockTeamAndParent();
2620 			return true;
2621 		}
2622 
2623 		team->UnlockTeamAndParent();
2624 
2625 		team = team->group_next;
2626 	}
2627 
2628 	return false;
2629 }
2630 
2631 
2632 /*!	Iterates through all process groups queued in team_remove_team() and signals
2633 	those that are orphaned and have stopped processes.
2634 	The caller must not hold any team or process group locks.
2635 */
2636 static void
2637 orphaned_process_group_check()
2638 {
2639 	// process as long as there are groups in the list
2640 	while (true) {
2641 		// remove the head from the list
2642 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2643 
2644 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2645 		if (group == NULL)
2646 			return;
2647 
2648 		group->UnsetOrphanedCheck();
2649 		BReference<ProcessGroup> groupReference(group);
2650 
2651 		orphanedCheckLocker.Unlock();
2652 
2653 		AutoLocker<ProcessGroup> groupLocker(group);
2654 
2655 		// If the group is orphaned and contains stopped processes, we're
2656 		// supposed to send SIGHUP + SIGCONT.
2657 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2658 			Thread* currentThread = thread_get_current_thread();
2659 
2660 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2661 			send_signal_to_process_group_locked(group, signal, 0);
2662 
2663 			signal.SetNumber(SIGCONT);
2664 			send_signal_to_process_group_locked(group, signal, 0);
2665 		}
2666 	}
2667 }
2668 
2669 
2670 static status_t
2671 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2672 	uint32 flags)
2673 {
2674 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2675 		return B_BAD_VALUE;
2676 
2677 	// get the team
2678 	Team* team = Team::GetAndLock(id);
2679 	if (team == NULL)
2680 		return B_BAD_TEAM_ID;
2681 	BReference<Team> teamReference(team, true);
2682 	TeamLocker teamLocker(team, true);
2683 
2684 	if ((flags & B_CHECK_PERMISSION) != 0) {
2685 		uid_t uid = geteuid();
2686 		if (uid != 0 && uid != team->effective_uid)
2687 			return B_NOT_ALLOWED;
2688 	}
2689 
2690 	bigtime_t kernelTime = 0;
2691 	bigtime_t userTime = 0;
2692 
2693 	switch (who) {
2694 		case B_TEAM_USAGE_SELF:
2695 		{
2696 			Thread* thread = team->thread_list;
2697 
2698 			for (; thread != NULL; thread = thread->team_next) {
2699 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2700 				kernelTime += thread->kernel_time;
2701 				userTime += thread->user_time;
2702 			}
2703 
2704 			kernelTime += team->dead_threads_kernel_time;
2705 			userTime += team->dead_threads_user_time;
2706 			break;
2707 		}
2708 
2709 		case B_TEAM_USAGE_CHILDREN:
2710 		{
2711 			Team* child = team->children;
2712 			for (; child != NULL; child = child->siblings_next) {
2713 				TeamLocker childLocker(child);
2714 
2715 				Thread* thread = team->thread_list;
2716 
2717 				for (; thread != NULL; thread = thread->team_next) {
2718 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2719 					kernelTime += thread->kernel_time;
2720 					userTime += thread->user_time;
2721 				}
2722 
2723 				kernelTime += child->dead_threads_kernel_time;
2724 				userTime += child->dead_threads_user_time;
2725 			}
2726 
2727 			kernelTime += team->dead_children.kernel_time;
2728 			userTime += team->dead_children.user_time;
2729 			break;
2730 		}
2731 	}
2732 
2733 	info->kernel_time = kernelTime;
2734 	info->user_time = userTime;
2735 
2736 	return B_OK;
2737 }
2738 
2739 
2740 //	#pragma mark - Private kernel API
2741 
2742 
2743 status_t
2744 team_init(kernel_args* args)
2745 {
2746 	// create the team hash table
2747 	new(&sTeamHash) TeamTable;
2748 	if (sTeamHash.Init(64) != B_OK)
2749 		panic("Failed to init team hash table!");
2750 
2751 	new(&sGroupHash) ProcessGroupHashTable;
2752 	if (sGroupHash.Init() != B_OK)
2753 		panic("Failed to init process group hash table!");
2754 
2755 	// create initial session and process groups
2756 
2757 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2758 	if (session == NULL)
2759 		panic("Could not create initial session.\n");
2760 	BReference<ProcessSession> sessionReference(session, true);
2761 
2762 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2763 	if (group == NULL)
2764 		panic("Could not create initial process group.\n");
2765 	BReference<ProcessGroup> groupReference(group, true);
2766 
2767 	group->Publish(session);
2768 
2769 	// create the kernel team
2770 	sKernelTeam = Team::Create(1, "kernel_team", true);
2771 	if (sKernelTeam == NULL)
2772 		panic("could not create kernel team!\n");
2773 	sKernelTeam->SetArgs(sKernelTeam->Name());
2774 	sKernelTeam->state = TEAM_STATE_NORMAL;
2775 
2776 	sKernelTeam->saved_set_uid = 0;
2777 	sKernelTeam->real_uid = 0;
2778 	sKernelTeam->effective_uid = 0;
2779 	sKernelTeam->saved_set_gid = 0;
2780 	sKernelTeam->real_gid = 0;
2781 	sKernelTeam->effective_gid = 0;
2782 	sKernelTeam->supplementary_groups = NULL;
2783 	sKernelTeam->supplementary_group_count = 0;
2784 
2785 	insert_team_into_group(group, sKernelTeam);
2786 
2787 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2788 	if (sKernelTeam->io_context == NULL)
2789 		panic("could not create io_context for kernel team!\n");
2790 
2791 	// stick it in the team hash
2792 	sTeamHash.Insert(sKernelTeam);
2793 
2794 	add_debugger_command_etc("team", &dump_team_info,
2795 		"Dump info about a particular team",
2796 		"[ <id> | <address> | <name> ]\n"
2797 		"Prints information about the specified team. If no argument is given\n"
2798 		"the current team is selected.\n"
2799 		"  <id>       - The ID of the team.\n"
2800 		"  <address>  - The address of the team structure.\n"
2801 		"  <name>     - The team's name.\n", 0);
2802 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2803 		"\n"
2804 		"Prints a list of all existing teams.\n", 0);
2805 
2806 	new(&sNotificationService) TeamNotificationService();
2807 
2808 	sNotificationService.Register();
2809 
2810 	return B_OK;
2811 }
2812 
2813 
2814 int32
2815 team_max_teams(void)
2816 {
2817 	return sMaxTeams;
2818 }
2819 
2820 
2821 int32
2822 team_used_teams(void)
2823 {
2824 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2825 	return sUsedTeams;
2826 }
2827 
2828 
2829 /*! Returns a death entry of a child team specified by ID (if any).
2830 	The caller must hold the team's lock.
2831 
2832 	\param team The team whose dead children list to check.
2833 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2834 	\param _deleteEntry Return variable, indicating whether the caller needs to
2835 		delete the returned entry.
2836 	\return The death entry of the matching team, or \c NULL, if no death entry
2837 		for the team was found.
2838 */
2839 job_control_entry*
2840 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2841 {
2842 	if (child <= 0)
2843 		return NULL;
2844 
2845 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2846 		child);
2847 	if (entry) {
2848 		// remove the entry only, if the caller is the parent of the found team
2849 		if (team_get_current_team_id() == entry->thread) {
2850 			team->dead_children.entries.Remove(entry);
2851 			team->dead_children.count--;
2852 			*_deleteEntry = true;
2853 		} else {
2854 			*_deleteEntry = false;
2855 		}
2856 	}
2857 
2858 	return entry;
2859 }
2860 
2861 
2862 /*! Quick check to see if we have a valid team ID. */
2863 bool
2864 team_is_valid(team_id id)
2865 {
2866 	if (id <= 0)
2867 		return false;
2868 
2869 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2870 
2871 	return team_get_team_struct_locked(id) != NULL;
2872 }
2873 
2874 
2875 Team*
2876 team_get_team_struct_locked(team_id id)
2877 {
2878 	return sTeamHash.Lookup(id);
2879 }
2880 
2881 
2882 void
2883 team_set_controlling_tty(int32 ttyIndex)
2884 {
2885 	// lock the team, so its session won't change while we're playing with it
2886 	Team* team = thread_get_current_thread()->team;
2887 	TeamLocker teamLocker(team);
2888 
2889 	// get and lock the session
2890 	ProcessSession* session = team->group->Session();
2891 	AutoLocker<ProcessSession> sessionLocker(session);
2892 
2893 	// set the session's fields
2894 	session->controlling_tty = ttyIndex;
2895 	session->foreground_group = -1;
2896 }
2897 
2898 
2899 int32
2900 team_get_controlling_tty()
2901 {
2902 	// lock the team, so its session won't change while we're playing with it
2903 	Team* team = thread_get_current_thread()->team;
2904 	TeamLocker teamLocker(team);
2905 
2906 	// get and lock the session
2907 	ProcessSession* session = team->group->Session();
2908 	AutoLocker<ProcessSession> sessionLocker(session);
2909 
2910 	// get the session's field
2911 	return session->controlling_tty;
2912 }
2913 
2914 
2915 status_t
2916 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2917 {
2918 	// lock the team, so its session won't change while we're playing with it
2919 	Thread* thread = thread_get_current_thread();
2920 	Team* team = thread->team;
2921 	TeamLocker teamLocker(team);
2922 
2923 	// get and lock the session
2924 	ProcessSession* session = team->group->Session();
2925 	AutoLocker<ProcessSession> sessionLocker(session);
2926 
2927 	// check given TTY -- must be the controlling tty of the calling process
2928 	if (session->controlling_tty != ttyIndex)
2929 		return ENOTTY;
2930 
2931 	// check given process group -- must belong to our session
2932 	{
2933 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2934 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2935 		if (group == NULL || group->Session() != session)
2936 			return B_BAD_VALUE;
2937 	}
2938 
2939 	// If we are a background group, we can do that unharmed only when we
2940 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2941 	if (session->foreground_group != -1
2942 		&& session->foreground_group != team->group_id
2943 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2944 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2945 
2946 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2947 			pid_t groupID = team->group_id;
2948 
2949 			schedulerLocker.Unlock();
2950 			sessionLocker.Unlock();
2951 			teamLocker.Unlock();
2952 
2953 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2954 			send_signal_to_process_group(groupID, signal, 0);
2955 			return B_INTERRUPTED;
2956 		}
2957 	}
2958 
2959 	session->foreground_group = processGroupID;
2960 
2961 	return B_OK;
2962 }
2963 
2964 
2965 /*!	Removes the specified team from the global team hash, from its process
2966 	group, and from its parent.
2967 	It also moves all of its children to the kernel team.
2968 
2969 	The caller must hold the following locks:
2970 	- \a team's process group's lock,
2971 	- the kernel team's lock,
2972 	- \a team's parent team's lock (might be the kernel team), and
2973 	- \a team's lock.
2974 */
2975 void
2976 team_remove_team(Team* team, pid_t& _signalGroup)
2977 {
2978 	Team* parent = team->parent;
2979 
2980 	// remember how long this team lasted
2981 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2982 		+ team->dead_children.kernel_time;
2983 	parent->dead_children.user_time += team->dead_threads_user_time
2984 		+ team->dead_children.user_time;
2985 
2986 	// remove the team from the hash table
2987 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2988 	sTeamHash.Remove(team);
2989 	sUsedTeams--;
2990 	teamsLocker.Unlock();
2991 
2992 	// The team can no longer be accessed by ID. Navigation to it is still
2993 	// possible from its process group and its parent and children, but that
2994 	// will be rectified shortly.
2995 	team->state = TEAM_STATE_DEATH;
2996 
2997 	// If we're a controlling process (i.e. a session leader with controlling
2998 	// terminal), there's a bit of signalling we have to do. We can't do any of
2999 	// the signaling here due to the bunch of locks we're holding, but we need
3000 	// to determine, whom to signal.
3001 	_signalGroup = -1;
3002 	bool isSessionLeader = false;
3003 	if (team->session_id == team->id
3004 		&& team->group->Session()->controlling_tty >= 0) {
3005 		isSessionLeader = true;
3006 
3007 		ProcessSession* session = team->group->Session();
3008 
3009 		AutoLocker<ProcessSession> sessionLocker(session);
3010 
3011 		session->controlling_tty = -1;
3012 		_signalGroup = session->foreground_group;
3013 	}
3014 
3015 	// remove us from our process group
3016 	remove_team_from_group(team);
3017 
3018 	// move the team's children to the kernel team
3019 	while (Team* child = team->children) {
3020 		// remove the child from the current team and add it to the kernel team
3021 		TeamLocker childLocker(child);
3022 
3023 		remove_team_from_parent(team, child);
3024 		insert_team_into_parent(sKernelTeam, child);
3025 
3026 		// move job control entries too
3027 		sKernelTeam->stopped_children.entries.MoveFrom(
3028 			&team->stopped_children.entries);
3029 		sKernelTeam->continued_children.entries.MoveFrom(
3030 			&team->continued_children.entries);
3031 
3032 		// If the team was a session leader with controlling terminal,
3033 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3034 		// groups with stopped processes. Due to locking complications we can't
3035 		// do that here, so we only check whether we were a reason for the
3036 		// child's process group not being an orphan and, if so, schedule a
3037 		// later check (cf. orphaned_process_group_check()).
3038 		if (isSessionLeader) {
3039 			ProcessGroup* childGroup = child->group;
3040 			if (childGroup->Session()->id == team->session_id
3041 				&& childGroup->id != team->group_id) {
3042 				childGroup->ScheduleOrphanedCheck();
3043 			}
3044 		}
3045 
3046 		// Note, we don't move the dead children entries. Those will be deleted
3047 		// when the team structure is deleted.
3048 	}
3049 
3050 	// remove us from our parent
3051 	remove_team_from_parent(parent, team);
3052 }
3053 
3054 
3055 /*!	Kills all threads but the main thread of the team and shuts down user
3056 	debugging for it.
3057 	To be called on exit of the team's main thread. No locks must be held.
3058 
3059 	\param team The team in question.
3060 	\return The port of the debugger for the team, -1 if none. To be passed to
3061 		team_delete_team().
3062 */
3063 port_id
3064 team_shutdown_team(Team* team)
3065 {
3066 	ASSERT(thread_get_current_thread() == team->main_thread);
3067 
3068 	TeamLocker teamLocker(team);
3069 
3070 	// Make sure debugging changes won't happen anymore.
3071 	port_id debuggerPort = -1;
3072 	while (true) {
3073 		// If a debugger change is in progress for the team, we'll have to
3074 		// wait until it is done.
3075 		ConditionVariableEntry waitForDebuggerEntry;
3076 		bool waitForDebugger = false;
3077 
3078 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3079 
3080 		if (team->debug_info.debugger_changed_condition != NULL) {
3081 			team->debug_info.debugger_changed_condition->Add(
3082 				&waitForDebuggerEntry);
3083 			waitForDebugger = true;
3084 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3085 			// The team is being debugged. That will stop with the termination
3086 			// of the nub thread. Since we set the team state to death, no one
3087 			// can install a debugger anymore. We fetch the debugger's port to
3088 			// send it a message at the bitter end.
3089 			debuggerPort = team->debug_info.debugger_port;
3090 		}
3091 
3092 		debugInfoLocker.Unlock();
3093 
3094 		if (!waitForDebugger)
3095 			break;
3096 
3097 		// wait for the debugger change to be finished
3098 		teamLocker.Unlock();
3099 
3100 		waitForDebuggerEntry.Wait();
3101 
3102 		teamLocker.Lock();
3103 	}
3104 
3105 	// Mark the team as shutting down. That will prevent new threads from being
3106 	// created and debugger changes from taking place.
3107 	team->state = TEAM_STATE_SHUTDOWN;
3108 
3109 	// delete all timers
3110 	team->DeleteUserTimers(false);
3111 
3112 	// deactivate CPU time user timers for the team
3113 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3114 
3115 	if (team->HasActiveCPUTimeUserTimers())
3116 		team->DeactivateCPUTimeUserTimers();
3117 
3118 	schedulerLocker.Unlock();
3119 
3120 	// kill all threads but the main thread
3121 	team_death_entry deathEntry;
3122 	deathEntry.condition.Init(team, "team death");
3123 
3124 	while (true) {
3125 		team->death_entry = &deathEntry;
3126 		deathEntry.remaining_threads = 0;
3127 
3128 		Thread* thread = team->thread_list;
3129 		while (thread != NULL) {
3130 			if (thread != team->main_thread) {
3131 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3132 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3133 				deathEntry.remaining_threads++;
3134 			}
3135 
3136 			thread = thread->team_next;
3137 		}
3138 
3139 		if (deathEntry.remaining_threads == 0)
3140 			break;
3141 
3142 		// there are threads to wait for
3143 		ConditionVariableEntry entry;
3144 		deathEntry.condition.Add(&entry);
3145 
3146 		teamLocker.Unlock();
3147 
3148 		entry.Wait();
3149 
3150 		teamLocker.Lock();
3151 	}
3152 
3153 	team->death_entry = NULL;
3154 
3155 	return debuggerPort;
3156 }
3157 
3158 
3159 /*!	Called on team exit to notify threads waiting on the team and free most
3160 	resources associated with it.
3161 	The caller shouldn't hold any locks.
3162 */
3163 void
3164 team_delete_team(Team* team, port_id debuggerPort)
3165 {
3166 	// Not quite in our job description, but work that has been left by
3167 	// team_remove_team() and that can be done now that we're not holding any
3168 	// locks.
3169 	orphaned_process_group_check();
3170 
3171 	team_id teamID = team->id;
3172 
3173 	ASSERT(team->num_threads == 0);
3174 
3175 	// If someone is waiting for this team to be loaded, but it dies
3176 	// unexpectedly before being done, we need to notify the waiting
3177 	// thread now.
3178 
3179 	TeamLocker teamLocker(team);
3180 
3181 	if (team->loading_info) {
3182 		// there's indeed someone waiting
3183 		struct team_loading_info* loadingInfo = team->loading_info;
3184 		team->loading_info = NULL;
3185 
3186 		loadingInfo->result = B_ERROR;
3187 		loadingInfo->done = true;
3188 
3189 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3190 
3191 		// wake up the waiting thread
3192 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3193 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3194 	}
3195 
3196 	// notify team watchers
3197 
3198 	{
3199 		// we're not reachable from anyone anymore at this point, so we
3200 		// can safely access the list without any locking
3201 		struct team_watcher* watcher;
3202 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3203 				&team->watcher_list)) != NULL) {
3204 			watcher->hook(teamID, watcher->data);
3205 			free(watcher);
3206 		}
3207 	}
3208 
3209 	teamLocker.Unlock();
3210 
3211 	sNotificationService.Notify(TEAM_REMOVED, team);
3212 
3213 	// free team resources
3214 
3215 	delete_realtime_sem_context(team->realtime_sem_context);
3216 	xsi_sem_undo(team);
3217 	remove_images(team);
3218 	team->address_space->RemoveAndPut();
3219 
3220 	team->ReleaseReference();
3221 
3222 	// notify the debugger, that the team is gone
3223 	user_debug_team_deleted(teamID, debuggerPort);
3224 }
3225 
3226 
3227 Team*
3228 team_get_kernel_team(void)
3229 {
3230 	return sKernelTeam;
3231 }
3232 
3233 
3234 team_id
3235 team_get_kernel_team_id(void)
3236 {
3237 	if (!sKernelTeam)
3238 		return 0;
3239 
3240 	return sKernelTeam->id;
3241 }
3242 
3243 
3244 team_id
3245 team_get_current_team_id(void)
3246 {
3247 	return thread_get_current_thread()->team->id;
3248 }
3249 
3250 
3251 status_t
3252 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3253 {
3254 	if (id == sKernelTeam->id) {
3255 		// we're the kernel team, so we don't have to go through all
3256 		// the hassle (locking and hash lookup)
3257 		*_addressSpace = VMAddressSpace::GetKernel();
3258 		return B_OK;
3259 	}
3260 
3261 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3262 
3263 	Team* team = team_get_team_struct_locked(id);
3264 	if (team == NULL)
3265 		return B_BAD_VALUE;
3266 
3267 	team->address_space->Get();
3268 	*_addressSpace = team->address_space;
3269 	return B_OK;
3270 }
3271 
3272 
3273 /*!	Sets the team's job control state.
3274 	The caller must hold the parent team's lock. Interrupts are allowed to be
3275 	enabled or disabled. In the latter case the scheduler lock may be held as
3276 	well.
3277 	\a team The team whose job control state shall be set.
3278 	\a newState The new state to be set.
3279 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3280 		the caller is responsible for filling in the following fields of the
3281 		entry before releasing the parent team's lock, unless the new state is
3282 		\c JOB_CONTROL_STATE_NONE:
3283 		- \c signal: The number of the signal causing the state change.
3284 		- \c signaling_user: The real UID of the user sending the signal.
3285 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3286 */
3287 void
3288 team_set_job_control_state(Team* team, job_control_state newState,
3289 	Signal* signal, bool schedulerLocked)
3290 {
3291 	if (team == NULL || team->job_control_entry == NULL)
3292 		return;
3293 
3294 	// don't touch anything, if the state stays the same or the team is already
3295 	// dead
3296 	job_control_entry* entry = team->job_control_entry;
3297 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3298 		return;
3299 
3300 	T(SetJobControlState(team->id, newState, signal));
3301 
3302 	// remove from the old list
3303 	switch (entry->state) {
3304 		case JOB_CONTROL_STATE_NONE:
3305 			// entry is in no list ATM
3306 			break;
3307 		case JOB_CONTROL_STATE_DEAD:
3308 			// can't get here
3309 			break;
3310 		case JOB_CONTROL_STATE_STOPPED:
3311 			team->parent->stopped_children.entries.Remove(entry);
3312 			break;
3313 		case JOB_CONTROL_STATE_CONTINUED:
3314 			team->parent->continued_children.entries.Remove(entry);
3315 			break;
3316 	}
3317 
3318 	entry->state = newState;
3319 
3320 	if (signal != NULL) {
3321 		entry->signal = signal->Number();
3322 		entry->signaling_user = signal->SendingUser();
3323 	}
3324 
3325 	// add to new list
3326 	team_job_control_children* childList = NULL;
3327 	switch (entry->state) {
3328 		case JOB_CONTROL_STATE_NONE:
3329 			// entry doesn't get into any list
3330 			break;
3331 		case JOB_CONTROL_STATE_DEAD:
3332 			childList = &team->parent->dead_children;
3333 			team->parent->dead_children.count++;
3334 			break;
3335 		case JOB_CONTROL_STATE_STOPPED:
3336 			childList = &team->parent->stopped_children;
3337 			break;
3338 		case JOB_CONTROL_STATE_CONTINUED:
3339 			childList = &team->parent->continued_children;
3340 			break;
3341 	}
3342 
3343 	if (childList != NULL) {
3344 		childList->entries.Add(entry);
3345 		team->parent->dead_children.condition_variable.NotifyAll(
3346 			schedulerLocked);
3347 	}
3348 }
3349 
3350 
3351 /*!	Inits the given team's exit information, if not yet initialized, to some
3352 	generic "killed" status.
3353 	The caller must not hold the team's lock. Interrupts must be enabled.
3354 
3355 	\param team The team whose exit info shall be initialized.
3356 */
3357 void
3358 team_init_exit_info_on_error(Team* team)
3359 {
3360 	TeamLocker teamLocker(team);
3361 
3362 	if (!team->exit.initialized) {
3363 		team->exit.reason = CLD_KILLED;
3364 		team->exit.signal = SIGKILL;
3365 		team->exit.signaling_user = geteuid();
3366 		team->exit.status = 0;
3367 		team->exit.initialized = true;
3368 	}
3369 }
3370 
3371 
3372 /*! Adds a hook to the team that is called as soon as this team goes away.
3373 	This call might get public in the future.
3374 */
3375 status_t
3376 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3377 {
3378 	if (hook == NULL || teamID < B_OK)
3379 		return B_BAD_VALUE;
3380 
3381 	// create the watcher object
3382 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3383 	if (watcher == NULL)
3384 		return B_NO_MEMORY;
3385 
3386 	watcher->hook = hook;
3387 	watcher->data = data;
3388 
3389 	// add watcher, if the team isn't already dying
3390 	// get the team
3391 	Team* team = Team::GetAndLock(teamID);
3392 	if (team == NULL) {
3393 		free(watcher);
3394 		return B_BAD_TEAM_ID;
3395 	}
3396 
3397 	list_add_item(&team->watcher_list, watcher);
3398 
3399 	team->UnlockAndReleaseReference();
3400 
3401 	return B_OK;
3402 }
3403 
3404 
3405 status_t
3406 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3407 {
3408 	if (hook == NULL || teamID < 0)
3409 		return B_BAD_VALUE;
3410 
3411 	// get team and remove watcher (if present)
3412 	Team* team = Team::GetAndLock(teamID);
3413 	if (team == NULL)
3414 		return B_BAD_TEAM_ID;
3415 
3416 	// search for watcher
3417 	team_watcher* watcher = NULL;
3418 	while ((watcher = (team_watcher*)list_get_next_item(
3419 			&team->watcher_list, watcher)) != NULL) {
3420 		if (watcher->hook == hook && watcher->data == data) {
3421 			// got it!
3422 			list_remove_item(&team->watcher_list, watcher);
3423 			break;
3424 		}
3425 	}
3426 
3427 	team->UnlockAndReleaseReference();
3428 
3429 	if (watcher == NULL)
3430 		return B_ENTRY_NOT_FOUND;
3431 
3432 	free(watcher);
3433 	return B_OK;
3434 }
3435 
3436 
3437 /*!	Allocates a user_thread structure from the team.
3438 	The team lock must be held, unless the function is called for the team's
3439 	main thread. Interrupts must be enabled.
3440 */
3441 struct user_thread*
3442 team_allocate_user_thread(Team* team)
3443 {
3444 	if (team->user_data == 0)
3445 		return NULL;
3446 
3447 	// take an entry from the free list, if any
3448 	if (struct free_user_thread* entry = team->free_user_threads) {
3449 		user_thread* thread = entry->thread;
3450 		team->free_user_threads = entry->next;
3451 		free(entry);
3452 		return thread;
3453 	}
3454 
3455 	while (true) {
3456 		// enough space left?
3457 		size_t needed = ROUNDUP(sizeof(user_thread), 128);
3458 		if (team->user_data_size - team->used_user_data < needed) {
3459 			// try to resize the area
3460 			if (resize_area(team->user_data_area,
3461 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3462 				return NULL;
3463 			}
3464 
3465 			// resized user area successfully -- try to allocate the user_thread
3466 			// again
3467 			team->user_data_size += B_PAGE_SIZE;
3468 			continue;
3469 		}
3470 
3471 		// allocate the user_thread
3472 		user_thread* thread
3473 			= (user_thread*)(team->user_data + team->used_user_data);
3474 		team->used_user_data += needed;
3475 
3476 		return thread;
3477 	}
3478 }
3479 
3480 
3481 /*!	Frees the given user_thread structure.
3482 	The team's lock must not be held. Interrupts must be enabled.
3483 	\param team The team the user thread was allocated from.
3484 	\param userThread The user thread to free.
3485 */
3486 void
3487 team_free_user_thread(Team* team, struct user_thread* userThread)
3488 {
3489 	if (userThread == NULL)
3490 		return;
3491 
3492 	// create a free list entry
3493 	free_user_thread* entry
3494 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3495 	if (entry == NULL) {
3496 		// we have to leak the user thread :-/
3497 		return;
3498 	}
3499 
3500 	// add to free list
3501 	TeamLocker teamLocker(team);
3502 
3503 	entry->thread = userThread;
3504 	entry->next = team->free_user_threads;
3505 	team->free_user_threads = entry;
3506 }
3507 
3508 
3509 //	#pragma mark - Associated data interface
3510 
3511 
3512 AssociatedData::AssociatedData()
3513 	:
3514 	fOwner(NULL)
3515 {
3516 }
3517 
3518 
3519 AssociatedData::~AssociatedData()
3520 {
3521 }
3522 
3523 
3524 void
3525 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3526 {
3527 }
3528 
3529 
3530 AssociatedDataOwner::AssociatedDataOwner()
3531 {
3532 	mutex_init(&fLock, "associated data owner");
3533 }
3534 
3535 
3536 AssociatedDataOwner::~AssociatedDataOwner()
3537 {
3538 	mutex_destroy(&fLock);
3539 }
3540 
3541 
3542 bool
3543 AssociatedDataOwner::AddData(AssociatedData* data)
3544 {
3545 	MutexLocker locker(fLock);
3546 
3547 	if (data->Owner() != NULL)
3548 		return false;
3549 
3550 	data->AcquireReference();
3551 	fList.Add(data);
3552 	data->SetOwner(this);
3553 
3554 	return true;
3555 }
3556 
3557 
3558 bool
3559 AssociatedDataOwner::RemoveData(AssociatedData* data)
3560 {
3561 	MutexLocker locker(fLock);
3562 
3563 	if (data->Owner() != this)
3564 		return false;
3565 
3566 	data->SetOwner(NULL);
3567 	fList.Remove(data);
3568 
3569 	locker.Unlock();
3570 
3571 	data->ReleaseReference();
3572 
3573 	return true;
3574 }
3575 
3576 
3577 void
3578 AssociatedDataOwner::PrepareForDeletion()
3579 {
3580 	MutexLocker locker(fLock);
3581 
3582 	// move all data to a temporary list and unset the owner
3583 	DataList list;
3584 	list.MoveFrom(&fList);
3585 
3586 	for (DataList::Iterator it = list.GetIterator();
3587 		AssociatedData* data = it.Next();) {
3588 		data->SetOwner(NULL);
3589 	}
3590 
3591 	locker.Unlock();
3592 
3593 	// call the notification hooks and release our references
3594 	while (AssociatedData* data = list.RemoveHead()) {
3595 		data->OwnerDeleted(this);
3596 		data->ReleaseReference();
3597 	}
3598 }
3599 
3600 
3601 /*!	Associates data with the current team.
3602 	When the team is deleted, the data object is notified.
3603 	The team acquires a reference to the object.
3604 
3605 	\param data The data object.
3606 	\return \c true on success, \c false otherwise. Fails only when the supplied
3607 		data object is already associated with another owner.
3608 */
3609 bool
3610 team_associate_data(AssociatedData* data)
3611 {
3612 	return thread_get_current_thread()->team->AddData(data);
3613 }
3614 
3615 
3616 /*!	Dissociates data from the current team.
3617 	Balances an earlier call to team_associate_data().
3618 
3619 	\param data The data object.
3620 	\return \c true on success, \c false otherwise. Fails only when the data
3621 		object is not associated with the current team.
3622 */
3623 bool
3624 team_dissociate_data(AssociatedData* data)
3625 {
3626 	return thread_get_current_thread()->team->RemoveData(data);
3627 }
3628 
3629 
3630 //	#pragma mark - Public kernel API
3631 
3632 
3633 thread_id
3634 load_image(int32 argCount, const char** args, const char** env)
3635 {
3636 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3637 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3638 }
3639 
3640 
3641 thread_id
3642 load_image_etc(int32 argCount, const char* const* args,
3643 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3644 {
3645 	// we need to flatten the args and environment
3646 
3647 	if (args == NULL)
3648 		return B_BAD_VALUE;
3649 
3650 	// determine total needed size
3651 	int32 argSize = 0;
3652 	for (int32 i = 0; i < argCount; i++)
3653 		argSize += strlen(args[i]) + 1;
3654 
3655 	int32 envCount = 0;
3656 	int32 envSize = 0;
3657 	while (env != NULL && env[envCount] != NULL)
3658 		envSize += strlen(env[envCount++]) + 1;
3659 
3660 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3661 	if (size > MAX_PROCESS_ARGS_SIZE)
3662 		return B_TOO_MANY_ARGS;
3663 
3664 	// allocate space
3665 	char** flatArgs = (char**)malloc(size);
3666 	if (flatArgs == NULL)
3667 		return B_NO_MEMORY;
3668 
3669 	char** slot = flatArgs;
3670 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3671 
3672 	// copy arguments and environment
3673 	for (int32 i = 0; i < argCount; i++) {
3674 		int32 argSize = strlen(args[i]) + 1;
3675 		memcpy(stringSpace, args[i], argSize);
3676 		*slot++ = stringSpace;
3677 		stringSpace += argSize;
3678 	}
3679 
3680 	*slot++ = NULL;
3681 
3682 	for (int32 i = 0; i < envCount; i++) {
3683 		int32 envSize = strlen(env[i]) + 1;
3684 		memcpy(stringSpace, env[i], envSize);
3685 		*slot++ = stringSpace;
3686 		stringSpace += envSize;
3687 	}
3688 
3689 	*slot++ = NULL;
3690 
3691 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3692 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3693 
3694 	free(flatArgs);
3695 		// load_image_internal() unset our variable if it took over ownership
3696 
3697 	return thread;
3698 }
3699 
3700 
3701 status_t
3702 wait_for_team(team_id id, status_t* _returnCode)
3703 {
3704 	// check whether the team exists
3705 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3706 
3707 	Team* team = team_get_team_struct_locked(id);
3708 	if (team == NULL)
3709 		return B_BAD_TEAM_ID;
3710 
3711 	id = team->id;
3712 
3713 	teamsLocker.Unlock();
3714 
3715 	// wait for the main thread (it has the same ID as the team)
3716 	return wait_for_thread(id, _returnCode);
3717 }
3718 
3719 
3720 status_t
3721 kill_team(team_id id)
3722 {
3723 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3724 
3725 	Team* team = team_get_team_struct_locked(id);
3726 	if (team == NULL)
3727 		return B_BAD_TEAM_ID;
3728 
3729 	id = team->id;
3730 
3731 	teamsLocker.Unlock();
3732 
3733 	if (team == sKernelTeam)
3734 		return B_NOT_ALLOWED;
3735 
3736 	// Just kill the team's main thread (it has same ID as the team). The
3737 	// cleanup code there will take care of the team.
3738 	return kill_thread(id);
3739 }
3740 
3741 
3742 status_t
3743 _get_team_info(team_id id, team_info* info, size_t size)
3744 {
3745 	// get the team
3746 	Team* team = Team::Get(id);
3747 	if (team == NULL)
3748 		return B_BAD_TEAM_ID;
3749 	BReference<Team> teamReference(team, true);
3750 
3751 	// fill in the info
3752 	return fill_team_info(team, info, size);
3753 }
3754 
3755 
3756 status_t
3757 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3758 {
3759 	int32 slot = *cookie;
3760 	if (slot < 1)
3761 		slot = 1;
3762 
3763 	InterruptsSpinLocker locker(sTeamHashLock);
3764 
3765 	team_id lastTeamID = peek_next_thread_id();
3766 		// TODO: This is broken, since the id can wrap around!
3767 
3768 	// get next valid team
3769 	Team* team = NULL;
3770 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3771 		slot++;
3772 
3773 	if (team == NULL)
3774 		return B_BAD_TEAM_ID;
3775 
3776 	// get a reference to the team and unlock
3777 	BReference<Team> teamReference(team);
3778 	locker.Unlock();
3779 
3780 	// fill in the info
3781 	*cookie = ++slot;
3782 	return fill_team_info(team, info, size);
3783 }
3784 
3785 
3786 status_t
3787 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3788 {
3789 	if (size != sizeof(team_usage_info))
3790 		return B_BAD_VALUE;
3791 
3792 	return common_get_team_usage_info(id, who, info, 0);
3793 }
3794 
3795 
3796 pid_t
3797 getpid(void)
3798 {
3799 	return thread_get_current_thread()->team->id;
3800 }
3801 
3802 
3803 pid_t
3804 getppid(void)
3805 {
3806 	Team* team = thread_get_current_thread()->team;
3807 
3808 	TeamLocker teamLocker(team);
3809 
3810 	return team->parent->id;
3811 }
3812 
3813 
3814 pid_t
3815 getpgid(pid_t id)
3816 {
3817 	if (id < 0) {
3818 		errno = EINVAL;
3819 		return -1;
3820 	}
3821 
3822 	if (id == 0) {
3823 		// get process group of the calling process
3824 		Team* team = thread_get_current_thread()->team;
3825 		TeamLocker teamLocker(team);
3826 		return team->group_id;
3827 	}
3828 
3829 	// get the team
3830 	Team* team = Team::GetAndLock(id);
3831 	if (team == NULL) {
3832 		errno = ESRCH;
3833 		return -1;
3834 	}
3835 
3836 	// get the team's process group ID
3837 	pid_t groupID = team->group_id;
3838 
3839 	team->UnlockAndReleaseReference();
3840 
3841 	return groupID;
3842 }
3843 
3844 
3845 pid_t
3846 getsid(pid_t id)
3847 {
3848 	if (id < 0) {
3849 		errno = EINVAL;
3850 		return -1;
3851 	}
3852 
3853 	if (id == 0) {
3854 		// get session of the calling process
3855 		Team* team = thread_get_current_thread()->team;
3856 		TeamLocker teamLocker(team);
3857 		return team->session_id;
3858 	}
3859 
3860 	// get the team
3861 	Team* team = Team::GetAndLock(id);
3862 	if (team == NULL) {
3863 		errno = ESRCH;
3864 		return -1;
3865 	}
3866 
3867 	// get the team's session ID
3868 	pid_t sessionID = team->session_id;
3869 
3870 	team->UnlockAndReleaseReference();
3871 
3872 	return sessionID;
3873 }
3874 
3875 
3876 //	#pragma mark - User syscalls
3877 
3878 
3879 status_t
3880 _user_exec(const char* userPath, const char* const* userFlatArgs,
3881 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3882 {
3883 	// NOTE: Since this function normally doesn't return, don't use automatic
3884 	// variables that need destruction in the function scope.
3885 	char path[B_PATH_NAME_LENGTH];
3886 
3887 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3888 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3889 		return B_BAD_ADDRESS;
3890 
3891 	// copy and relocate the flat arguments
3892 	char** flatArgs;
3893 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3894 		argCount, envCount, flatArgs);
3895 
3896 	if (error == B_OK) {
3897 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3898 			envCount, umask);
3899 			// this one only returns in case of error
3900 	}
3901 
3902 	free(flatArgs);
3903 	return error;
3904 }
3905 
3906 
3907 thread_id
3908 _user_fork(void)
3909 {
3910 	return fork_team();
3911 }
3912 
3913 
3914 pid_t
3915 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3916 {
3917 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3918 		return B_BAD_ADDRESS;
3919 
3920 	siginfo_t info;
3921 	pid_t foundChild = wait_for_child(child, flags, info);
3922 	if (foundChild < 0)
3923 		return syscall_restart_handle_post(foundChild);
3924 
3925 	// copy info back to userland
3926 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3927 		return B_BAD_ADDRESS;
3928 
3929 	return foundChild;
3930 }
3931 
3932 
3933 pid_t
3934 _user_process_info(pid_t process, int32 which)
3935 {
3936 	// we only allow to return the parent of the current process
3937 	if (which == PARENT_ID
3938 		&& process != 0 && process != thread_get_current_thread()->team->id)
3939 		return B_BAD_VALUE;
3940 
3941 	pid_t result;
3942 	switch (which) {
3943 		case SESSION_ID:
3944 			result = getsid(process);
3945 			break;
3946 		case GROUP_ID:
3947 			result = getpgid(process);
3948 			break;
3949 		case PARENT_ID:
3950 			result = getppid();
3951 			break;
3952 		default:
3953 			return B_BAD_VALUE;
3954 	}
3955 
3956 	return result >= 0 ? result : errno;
3957 }
3958 
3959 
3960 pid_t
3961 _user_setpgid(pid_t processID, pid_t groupID)
3962 {
3963 	// setpgid() can be called either by the parent of the target process or
3964 	// by the process itself to do one of two things:
3965 	// * Create a new process group with the target process' ID and the target
3966 	//   process as group leader.
3967 	// * Set the target process' process group to an already existing one in the
3968 	//   same session.
3969 
3970 	if (groupID < 0)
3971 		return B_BAD_VALUE;
3972 
3973 	Team* currentTeam = thread_get_current_thread()->team;
3974 	if (processID == 0)
3975 		processID = currentTeam->id;
3976 
3977 	// if the group ID is not specified, use the target process' ID
3978 	if (groupID == 0)
3979 		groupID = processID;
3980 
3981 	// We loop when running into the following race condition: We create a new
3982 	// process group, because there isn't one with that ID yet, but later when
3983 	// trying to publish it, we find that someone else created and published
3984 	// a group with that ID in the meantime. In that case we just restart the
3985 	// whole action.
3986 	while (true) {
3987 		// Look up the process group by ID. If it doesn't exist yet and we are
3988 		// allowed to create a new one, do that.
3989 		ProcessGroup* group = ProcessGroup::Get(groupID);
3990 		bool newGroup = false;
3991 		if (group == NULL) {
3992 			if (groupID != processID)
3993 				return B_NOT_ALLOWED;
3994 
3995 			group = new(std::nothrow) ProcessGroup(groupID);
3996 			if (group == NULL)
3997 				return B_NO_MEMORY;
3998 
3999 			newGroup = true;
4000 		}
4001 		BReference<ProcessGroup> groupReference(group, true);
4002 
4003 		// get the target team
4004 		Team* team = Team::Get(processID);
4005 		if (team == NULL)
4006 			return ESRCH;
4007 		BReference<Team> teamReference(team, true);
4008 
4009 		// lock the new process group and the team's current process group
4010 		while (true) {
4011 			// lock the team's current process group
4012 			team->LockProcessGroup();
4013 
4014 			ProcessGroup* oldGroup = team->group;
4015 			if (oldGroup == group) {
4016 				// it's the same as the target group, so just bail out
4017 				oldGroup->Unlock();
4018 				return group->id;
4019 			}
4020 
4021 			oldGroup->AcquireReference();
4022 
4023 			// lock the target process group, if locking order allows it
4024 			if (newGroup || group->id > oldGroup->id) {
4025 				group->Lock();
4026 				break;
4027 			}
4028 
4029 			// try to lock
4030 			if (group->TryLock())
4031 				break;
4032 
4033 			// no dice -- unlock the team's current process group and relock in
4034 			// the correct order
4035 			oldGroup->Unlock();
4036 
4037 			group->Lock();
4038 			oldGroup->Lock();
4039 
4040 			// check whether things are still the same
4041 			TeamLocker teamLocker(team);
4042 			if (team->group == oldGroup)
4043 				break;
4044 
4045 			// something changed -- unlock everything and retry
4046 			teamLocker.Unlock();
4047 			oldGroup->Unlock();
4048 			group->Unlock();
4049 			oldGroup->ReleaseReference();
4050 		}
4051 
4052 		// we now have references and locks of both new and old process group
4053 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4054 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4055 		AutoLocker<ProcessGroup> groupLocker(group, true);
4056 
4057 		// also lock the target team and its parent
4058 		team->LockTeamAndParent(false);
4059 		TeamLocker parentLocker(team->parent, true);
4060 		TeamLocker teamLocker(team, true);
4061 
4062 		// perform the checks
4063 		if (team == currentTeam) {
4064 			// we set our own group
4065 
4066 			// we must not change our process group ID if we're a session leader
4067 			if (is_session_leader(currentTeam))
4068 				return B_NOT_ALLOWED;
4069 		} else {
4070 			// Calling team != target team. The target team must be a child of
4071 			// the calling team and in the same session. (If that's the case it
4072 			// isn't a session leader either.)
4073 			if (team->parent != currentTeam
4074 				|| team->session_id != currentTeam->session_id) {
4075 				return B_NOT_ALLOWED;
4076 			}
4077 
4078 			// The call is also supposed to fail on a child, when the child has
4079 			// already executed exec*() [EACCES].
4080 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4081 				return EACCES;
4082 		}
4083 
4084 		// If we created a new process group, publish it now.
4085 		if (newGroup) {
4086 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4087 			if (sGroupHash.Lookup(groupID)) {
4088 				// A group with the group ID appeared since we first checked.
4089 				// Back to square one.
4090 				continue;
4091 			}
4092 
4093 			group->PublishLocked(team->group->Session());
4094 		} else if (group->Session()->id != team->session_id) {
4095 			// The existing target process group belongs to a different session.
4096 			// That's not allowed.
4097 			return B_NOT_ALLOWED;
4098 		}
4099 
4100 		// Everything is ready -- set the group.
4101 		remove_team_from_group(team);
4102 		insert_team_into_group(group, team);
4103 
4104 		// Changing the process group might have changed the situation for a
4105 		// parent waiting in wait_for_child(). Hence we notify it.
4106 		team->parent->dead_children.condition_variable.NotifyAll(false);
4107 
4108 		return group->id;
4109 	}
4110 }
4111 
4112 
4113 pid_t
4114 _user_setsid(void)
4115 {
4116 	Team* team = thread_get_current_thread()->team;
4117 
4118 	// create a new process group and session
4119 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4120 	if (group == NULL)
4121 		return B_NO_MEMORY;
4122 	BReference<ProcessGroup> groupReference(group, true);
4123 	AutoLocker<ProcessGroup> groupLocker(group);
4124 
4125 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4126 	if (session == NULL)
4127 		return B_NO_MEMORY;
4128 	BReference<ProcessSession> sessionReference(session, true);
4129 
4130 	// lock the team's current process group, parent, and the team itself
4131 	team->LockTeamParentAndProcessGroup();
4132 	BReference<ProcessGroup> oldGroupReference(team->group);
4133 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4134 	TeamLocker parentLocker(team->parent, true);
4135 	TeamLocker teamLocker(team, true);
4136 
4137 	// the team must not already be a process group leader
4138 	if (is_process_group_leader(team))
4139 		return B_NOT_ALLOWED;
4140 
4141 	// remove the team from the old and add it to the new process group
4142 	remove_team_from_group(team);
4143 	group->Publish(session);
4144 	insert_team_into_group(group, team);
4145 
4146 	// Changing the process group might have changed the situation for a
4147 	// parent waiting in wait_for_child(). Hence we notify it.
4148 	team->parent->dead_children.condition_variable.NotifyAll(false);
4149 
4150 	return group->id;
4151 }
4152 
4153 
4154 status_t
4155 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4156 {
4157 	status_t returnCode;
4158 	status_t status;
4159 
4160 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4161 		return B_BAD_ADDRESS;
4162 
4163 	status = wait_for_team(id, &returnCode);
4164 	if (status >= B_OK && _userReturnCode != NULL) {
4165 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4166 				!= B_OK)
4167 			return B_BAD_ADDRESS;
4168 		return B_OK;
4169 	}
4170 
4171 	return syscall_restart_handle_post(status);
4172 }
4173 
4174 
4175 thread_id
4176 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4177 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4178 	port_id errorPort, uint32 errorToken)
4179 {
4180 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4181 
4182 	if (argCount < 1)
4183 		return B_BAD_VALUE;
4184 
4185 	// copy and relocate the flat arguments
4186 	char** flatArgs;
4187 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4188 		argCount, envCount, flatArgs);
4189 	if (error != B_OK)
4190 		return error;
4191 
4192 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4193 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4194 		errorToken);
4195 
4196 	free(flatArgs);
4197 		// load_image_internal() unset our variable if it took over ownership
4198 
4199 	return thread;
4200 }
4201 
4202 
4203 void
4204 _user_exit_team(status_t returnValue)
4205 {
4206 	Thread* thread = thread_get_current_thread();
4207 	Team* team = thread->team;
4208 
4209 	// set this thread's exit status
4210 	thread->exit.status = returnValue;
4211 
4212 	// set the team exit status
4213 	TeamLocker teamLocker(team);
4214 
4215 	if (!team->exit.initialized) {
4216 		team->exit.reason = CLD_EXITED;
4217 		team->exit.signal = 0;
4218 		team->exit.signaling_user = 0;
4219 		team->exit.status = returnValue;
4220 		team->exit.initialized = true;
4221 	}
4222 
4223 	teamLocker.Unlock();
4224 
4225 	// Stop the thread, if the team is being debugged and that has been
4226 	// requested.
4227 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4228 		user_debug_stop_thread();
4229 
4230 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4231 	// userland. The signal handling code forwards the signal to the main
4232 	// thread (if that's not already this one), which will take the team down.
4233 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4234 	send_signal_to_thread(thread, signal, 0);
4235 }
4236 
4237 
4238 status_t
4239 _user_kill_team(team_id team)
4240 {
4241 	return kill_team(team);
4242 }
4243 
4244 
4245 status_t
4246 _user_get_team_info(team_id id, team_info* userInfo)
4247 {
4248 	status_t status;
4249 	team_info info;
4250 
4251 	if (!IS_USER_ADDRESS(userInfo))
4252 		return B_BAD_ADDRESS;
4253 
4254 	status = _get_team_info(id, &info, sizeof(team_info));
4255 	if (status == B_OK) {
4256 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4257 			return B_BAD_ADDRESS;
4258 	}
4259 
4260 	return status;
4261 }
4262 
4263 
4264 status_t
4265 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4266 {
4267 	status_t status;
4268 	team_info info;
4269 	int32 cookie;
4270 
4271 	if (!IS_USER_ADDRESS(userCookie)
4272 		|| !IS_USER_ADDRESS(userInfo)
4273 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4274 		return B_BAD_ADDRESS;
4275 
4276 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4277 	if (status != B_OK)
4278 		return status;
4279 
4280 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4281 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4282 		return B_BAD_ADDRESS;
4283 
4284 	return status;
4285 }
4286 
4287 
4288 team_id
4289 _user_get_current_team(void)
4290 {
4291 	return team_get_current_team_id();
4292 }
4293 
4294 
4295 status_t
4296 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4297 	size_t size)
4298 {
4299 	if (size != sizeof(team_usage_info))
4300 		return B_BAD_VALUE;
4301 
4302 	team_usage_info info;
4303 	status_t status = common_get_team_usage_info(team, who, &info,
4304 		B_CHECK_PERMISSION);
4305 
4306 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4307 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4308 		return B_BAD_ADDRESS;
4309 	}
4310 
4311 	return status;
4312 }
4313 
4314 
4315 status_t
4316 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4317 	size_t size, size_t* _sizeNeeded)
4318 {
4319 	// check parameters
4320 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4321 		|| (buffer == NULL && size > 0)
4322 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4323 		return B_BAD_ADDRESS;
4324 	}
4325 
4326 	KMessage info;
4327 
4328 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4329 		// allocate memory for a copy of the needed team data
4330 		struct ExtendedTeamData {
4331 			team_id	id;
4332 			pid_t	group_id;
4333 			pid_t	session_id;
4334 			uid_t	real_uid;
4335 			gid_t	real_gid;
4336 			uid_t	effective_uid;
4337 			gid_t	effective_gid;
4338 			char	name[B_OS_NAME_LENGTH];
4339 		};
4340 
4341 		ExtendedTeamData* teamClone
4342 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4343 			// It would be nicer to use new, but then we'd have to use
4344 			// ObjectDeleter and declare the structure outside of the function
4345 			// due to template parameter restrictions.
4346 		if (teamClone == NULL)
4347 			return B_NO_MEMORY;
4348 		MemoryDeleter teamCloneDeleter(teamClone);
4349 
4350 		io_context* ioContext;
4351 		{
4352 			// get the team structure
4353 			Team* team = Team::GetAndLock(teamID);
4354 			if (team == NULL)
4355 				return B_BAD_TEAM_ID;
4356 			BReference<Team> teamReference(team, true);
4357 			TeamLocker teamLocker(team, true);
4358 
4359 			// copy the data
4360 			teamClone->id = team->id;
4361 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4362 			teamClone->group_id = team->group_id;
4363 			teamClone->session_id = team->session_id;
4364 			teamClone->real_uid = team->real_uid;
4365 			teamClone->real_gid = team->real_gid;
4366 			teamClone->effective_uid = team->effective_uid;
4367 			teamClone->effective_gid = team->effective_gid;
4368 
4369 			// also fetch a reference to the I/O context
4370 			ioContext = team->io_context;
4371 			vfs_get_io_context(ioContext);
4372 		}
4373 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4374 			&vfs_put_io_context);
4375 
4376 		// add the basic data to the info message
4377 		if (info.AddInt32("id", teamClone->id) != B_OK
4378 			|| info.AddString("name", teamClone->name) != B_OK
4379 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4380 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4381 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4382 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4383 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4384 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4385 			return B_NO_MEMORY;
4386 		}
4387 
4388 		// get the current working directory from the I/O context
4389 		dev_t cwdDevice;
4390 		ino_t cwdDirectory;
4391 		{
4392 			MutexLocker ioContextLocker(ioContext->io_mutex);
4393 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4394 		}
4395 
4396 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4397 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4398 			return B_NO_MEMORY;
4399 		}
4400 	}
4401 
4402 	// TODO: Support the other flags!
4403 
4404 	// copy the needed size and, if it fits, the message back to userland
4405 	size_t sizeNeeded = info.ContentSize();
4406 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4407 		return B_BAD_ADDRESS;
4408 
4409 	if (sizeNeeded > size)
4410 		return B_BUFFER_OVERFLOW;
4411 
4412 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4413 		return B_BAD_ADDRESS;
4414 
4415 	return B_OK;
4416 }
4417