xref: /haiku/src/system/kernel/team.cpp (revision fc5d11e9ab9ea9a62402217b2a9e46f23aa74ea8)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 #include <util/ThreadAutoLock.h>
62 
63 #include "TeamThreadTables.h"
64 
65 
66 //#define TRACE_TEAM
67 #ifdef TRACE_TEAM
68 #	define TRACE(x) dprintf x
69 #else
70 #	define TRACE(x) ;
71 #endif
72 
73 
74 struct team_key {
75 	team_id id;
76 };
77 
78 struct team_arg {
79 	char	*path;
80 	char	**flat_args;
81 	size_t	flat_args_size;
82 	uint32	arg_count;
83 	uint32	env_count;
84 	mode_t	umask;
85 	uint32	flags;
86 	port_id	error_port;
87 	uint32	error_token;
88 };
89 
90 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
91 
92 
93 namespace {
94 
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 // #pragma mark - TeamTable
105 
106 
107 typedef BKernel::TeamThreadTable<Team> TeamTable;
108 
109 
110 // #pragma mark - ProcessGroupHashDefinition
111 
112 
113 struct ProcessGroupHashDefinition {
114 	typedef pid_t			KeyType;
115 	typedef	ProcessGroup	ValueType;
116 
117 	size_t HashKey(pid_t key) const
118 	{
119 		return key;
120 	}
121 
122 	size_t Hash(ProcessGroup* value) const
123 	{
124 		return HashKey(value->id);
125 	}
126 
127 	bool Compare(pid_t key, ProcessGroup* value) const
128 	{
129 		return value->id == key;
130 	}
131 
132 	ProcessGroup*& GetLink(ProcessGroup* value) const
133 	{
134 		return value->next;
135 	}
136 };
137 
138 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
139 
140 
141 }	// unnamed namespace
142 
143 
144 // #pragma mark -
145 
146 
147 // the team_id -> Team hash table and the lock protecting it
148 static TeamTable sTeamHash;
149 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
150 
151 // the pid_t -> ProcessGroup hash table and the lock protecting it
152 static ProcessGroupHashTable sGroupHash;
153 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
154 
155 static Team* sKernelTeam = NULL;
156 static bool sDisableUserAddOns = false;
157 
158 // A list of process groups of children of dying session leaders that need to
159 // be signalled, if they have become orphaned and contain stopped processes.
160 static ProcessGroupList sOrphanedCheckProcessGroups;
161 static mutex sOrphanedCheckLock
162 	= MUTEX_INITIALIZER("orphaned process group check");
163 
164 // some arbitrarily chosen limits -- should probably depend on the available
165 // memory (the limit is not yet enforced)
166 static int32 sMaxTeams = 2048;
167 static int32 sUsedTeams = 1;
168 
169 static TeamNotificationService sNotificationService;
170 
171 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
172 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
173 
174 
175 // #pragma mark - TeamListIterator
176 
177 
178 TeamListIterator::TeamListIterator()
179 {
180 	// queue the entry
181 	InterruptsWriteSpinLocker locker(sTeamHashLock);
182 	sTeamHash.InsertIteratorEntry(&fEntry);
183 }
184 
185 
186 TeamListIterator::~TeamListIterator()
187 {
188 	// remove the entry
189 	InterruptsWriteSpinLocker locker(sTeamHashLock);
190 	sTeamHash.RemoveIteratorEntry(&fEntry);
191 }
192 
193 
194 Team*
195 TeamListIterator::Next()
196 {
197 	// get the next team -- if there is one, get reference for it
198 	InterruptsWriteSpinLocker locker(sTeamHashLock);
199 	Team* team = sTeamHash.NextElement(&fEntry);
200 	if (team != NULL)
201 		team->AcquireReference();
202 
203 	return team;
204 }
205 
206 
207 // #pragma mark - Tracing
208 
209 
210 #if TEAM_TRACING
211 namespace TeamTracing {
212 
213 class TeamForked : public AbstractTraceEntry {
214 public:
215 	TeamForked(thread_id forkedThread)
216 		:
217 		fForkedThread(forkedThread)
218 	{
219 		Initialized();
220 	}
221 
222 	virtual void AddDump(TraceOutput& out)
223 	{
224 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
225 	}
226 
227 private:
228 	thread_id			fForkedThread;
229 };
230 
231 
232 class ExecTeam : public AbstractTraceEntry {
233 public:
234 	ExecTeam(const char* path, int32 argCount, const char* const* args,
235 			int32 envCount, const char* const* env)
236 		:
237 		fArgCount(argCount),
238 		fArgs(NULL)
239 	{
240 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
241 			false);
242 
243 		// determine the buffer size we need for the args
244 		size_t argBufferSize = 0;
245 		for (int32 i = 0; i < argCount; i++)
246 			argBufferSize += strlen(args[i]) + 1;
247 
248 		// allocate a buffer
249 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
250 		if (fArgs) {
251 			char* buffer = fArgs;
252 			for (int32 i = 0; i < argCount; i++) {
253 				size_t argSize = strlen(args[i]) + 1;
254 				memcpy(buffer, args[i], argSize);
255 				buffer += argSize;
256 			}
257 		}
258 
259 		// ignore env for the time being
260 		(void)envCount;
261 		(void)env;
262 
263 		Initialized();
264 	}
265 
266 	virtual void AddDump(TraceOutput& out)
267 	{
268 		out.Print("team exec, \"%p\", args:", fPath);
269 
270 		if (fArgs != NULL) {
271 			char* args = fArgs;
272 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
273 				out.Print(" \"%s\"", args);
274 				args += strlen(args) + 1;
275 			}
276 		} else
277 			out.Print(" <too long>");
278 	}
279 
280 private:
281 	char*	fPath;
282 	int32	fArgCount;
283 	char*	fArgs;
284 };
285 
286 
287 static const char*
288 job_control_state_name(job_control_state state)
289 {
290 	switch (state) {
291 		case JOB_CONTROL_STATE_NONE:
292 			return "none";
293 		case JOB_CONTROL_STATE_STOPPED:
294 			return "stopped";
295 		case JOB_CONTROL_STATE_CONTINUED:
296 			return "continued";
297 		case JOB_CONTROL_STATE_DEAD:
298 			return "dead";
299 		default:
300 			return "invalid";
301 	}
302 }
303 
304 
305 class SetJobControlState : public AbstractTraceEntry {
306 public:
307 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
308 		:
309 		fTeam(team),
310 		fNewState(newState),
311 		fSignal(signal != NULL ? signal->Number() : 0)
312 	{
313 		Initialized();
314 	}
315 
316 	virtual void AddDump(TraceOutput& out)
317 	{
318 		out.Print("team set job control state, team %" B_PRId32 ", "
319 			"new state: %s, signal: %d",
320 			fTeam, job_control_state_name(fNewState), fSignal);
321 	}
322 
323 private:
324 	team_id				fTeam;
325 	job_control_state	fNewState;
326 	int					fSignal;
327 };
328 
329 
330 class WaitForChild : public AbstractTraceEntry {
331 public:
332 	WaitForChild(pid_t child, uint32 flags)
333 		:
334 		fChild(child),
335 		fFlags(flags)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("team wait for child, child: %" B_PRId32 ", "
343 			"flags: %#" B_PRIx32, fChild, fFlags);
344 	}
345 
346 private:
347 	pid_t	fChild;
348 	uint32	fFlags;
349 };
350 
351 
352 class WaitForChildDone : public AbstractTraceEntry {
353 public:
354 	WaitForChildDone(const job_control_entry& entry)
355 		:
356 		fState(entry.state),
357 		fTeam(entry.thread),
358 		fStatus(entry.status),
359 		fReason(entry.reason),
360 		fSignal(entry.signal)
361 	{
362 		Initialized();
363 	}
364 
365 	WaitForChildDone(status_t error)
366 		:
367 		fTeam(error)
368 	{
369 		Initialized();
370 	}
371 
372 	virtual void AddDump(TraceOutput& out)
373 	{
374 		if (fTeam >= 0) {
375 			out.Print("team wait for child done, team: %" B_PRId32 ", "
376 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
377 				fTeam, job_control_state_name(fState), fStatus, fReason,
378 				fSignal);
379 		} else {
380 			out.Print("team wait for child failed, error: "
381 				"%#" B_PRIx32 ", ", fTeam);
382 		}
383 	}
384 
385 private:
386 	job_control_state	fState;
387 	team_id				fTeam;
388 	status_t			fStatus;
389 	uint16				fReason;
390 	uint16				fSignal;
391 };
392 
393 }	// namespace TeamTracing
394 
395 #	define T(x) new(std::nothrow) TeamTracing::x;
396 #else
397 #	define T(x) ;
398 #endif
399 
400 
401 //	#pragma mark - TeamNotificationService
402 
403 
404 TeamNotificationService::TeamNotificationService()
405 	: DefaultNotificationService("teams")
406 {
407 }
408 
409 
410 void
411 TeamNotificationService::Notify(uint32 eventCode, Team* team)
412 {
413 	char eventBuffer[128];
414 	KMessage event;
415 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
416 	event.AddInt32("event", eventCode);
417 	event.AddInt32("team", team->id);
418 	event.AddPointer("teamStruct", team);
419 
420 	DefaultNotificationService::Notify(event, eventCode);
421 }
422 
423 
424 //	#pragma mark - Team
425 
426 
427 Team::Team(team_id id, bool kernel)
428 {
429 	// allocate an ID
430 	this->id = id;
431 	visible = true;
432 
433 	hash_next = siblings_next = parent = children = group_next = NULL;
434 	serial_number = -1;
435 
436 	group_id = session_id = -1;
437 	group = NULL;
438 
439 	num_threads = 0;
440 	state = TEAM_STATE_BIRTH;
441 	flags = 0;
442 	io_context = NULL;
443 	realtime_sem_context = NULL;
444 	xsi_sem_context = NULL;
445 	death_entry = NULL;
446 	list_init(&dead_threads);
447 
448 	dead_children.condition_variable.Init(&dead_children, "team children");
449 	dead_children.count = 0;
450 	dead_children.kernel_time = 0;
451 	dead_children.user_time = 0;
452 
453 	job_control_entry = new(nothrow) ::job_control_entry;
454 	if (job_control_entry != NULL) {
455 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
456 		job_control_entry->thread = id;
457 		job_control_entry->team = this;
458 	}
459 
460 	address_space = NULL;
461 	main_thread = NULL;
462 	thread_list = NULL;
463 	loading_info = NULL;
464 
465 	list_init(&image_list);
466 	list_init(&watcher_list);
467 	list_init(&sem_list);
468 	list_init_etc(&port_list, port_team_link_offset());
469 
470 	user_data = 0;
471 	user_data_area = -1;
472 	used_user_data = 0;
473 	user_data_size = 0;
474 	free_user_threads = NULL;
475 
476 	commpage_address = NULL;
477 
478 	clear_team_debug_info(&debug_info, true);
479 
480 	dead_threads_kernel_time = 0;
481 	dead_threads_user_time = 0;
482 	cpu_clock_offset = 0;
483 	B_INITIALIZE_SPINLOCK(&time_lock);
484 
485 	saved_set_uid = real_uid = effective_uid = -1;
486 	saved_set_gid = real_gid = effective_gid = -1;
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	B_INITIALIZE_SPINLOCK(&signal_lock);
492 
493 	// init mutex
494 	if (kernel) {
495 		mutex_init(&fLock, "Team:kernel");
496 	} else {
497 		char lockName[16];
498 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
499 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
500 	}
501 
502 	fName[0] = '\0';
503 	fArgs[0] = '\0';
504 
505 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
506 		kernel ? -1 : MAX_QUEUED_SIGNALS);
507 	memset(fSignalActions, 0, sizeof(fSignalActions));
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	delete job_control_entry;
545 		// usually already NULL and transferred to the parent
546 
547 	mutex_destroy(&fLock);
548 }
549 
550 
551 /*static*/ Team*
552 Team::Create(team_id id, const char* name, bool kernel)
553 {
554 	// create the team object
555 	Team* team = new(std::nothrow) Team(id, kernel);
556 	if (team == NULL)
557 		return NULL;
558 	ObjectDeleter<Team> teamDeleter(team);
559 
560 	if (name != NULL)
561 		team->SetName(name);
562 
563 	// check initialization
564 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
565 		return NULL;
566 
567 	// finish initialization (arch specifics)
568 	if (arch_team_init_team_struct(team, kernel) != B_OK)
569 		return NULL;
570 
571 	if (!kernel) {
572 		status_t error = user_timer_create_team_timers(team);
573 		if (error != B_OK)
574 			return NULL;
575 	}
576 
577 	// everything went fine
578 	return teamDeleter.Detach();
579 }
580 
581 
582 /*!	\brief Returns the team with the given ID.
583 	Returns a reference to the team.
584 	Team and thread spinlock must not be held.
585 */
586 /*static*/ Team*
587 Team::Get(team_id id)
588 {
589 	if (id == B_CURRENT_TEAM) {
590 		Team* team = thread_get_current_thread()->team;
591 		team->AcquireReference();
592 		return team;
593 	}
594 
595 	InterruptsReadSpinLocker locker(sTeamHashLock);
596 	Team* team = sTeamHash.Lookup(id);
597 	if (team != NULL)
598 		team->AcquireReference();
599 	return team;
600 }
601 
602 
603 /*!	\brief Returns the team with the given ID in a locked state.
604 	Returns a reference to the team.
605 	Team and thread spinlock must not be held.
606 */
607 /*static*/ Team*
608 Team::GetAndLock(team_id id)
609 {
610 	// get the team
611 	Team* team = Get(id);
612 	if (team == NULL)
613 		return NULL;
614 
615 	// lock it
616 	team->Lock();
617 
618 	// only return the team, when it isn't already dying
619 	if (team->state >= TEAM_STATE_SHUTDOWN) {
620 		team->Unlock();
621 		team->ReleaseReference();
622 		return NULL;
623 	}
624 
625 	return team;
626 }
627 
628 
629 /*!	Locks the team and its parent team (if any).
630 	The caller must hold a reference to the team or otherwise make sure that
631 	it won't be deleted.
632 	If the team doesn't have a parent, only the team itself is locked. If the
633 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
634 	only the team itself is locked.
635 
636 	\param dontLockParentIfKernel If \c true, the team's parent team is only
637 		locked, if it is not the kernel team.
638 */
639 void
640 Team::LockTeamAndParent(bool dontLockParentIfKernel)
641 {
642 	// The locking order is parent -> child. Since the parent can change as long
643 	// as we don't lock the team, we need to do a trial and error loop.
644 	Lock();
645 
646 	while (true) {
647 		// If the team doesn't have a parent, we're done. Otherwise try to lock
648 		// the parent.This will succeed in most cases, simplifying things.
649 		Team* parent = this->parent;
650 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
651 			|| parent->TryLock()) {
652 			return;
653 		}
654 
655 		// get a temporary reference to the parent, unlock this team, lock the
656 		// parent, and re-lock this team
657 		BReference<Team> parentReference(parent);
658 
659 		Unlock();
660 		parent->Lock();
661 		Lock();
662 
663 		// If the parent hasn't changed in the meantime, we're done.
664 		if (this->parent == parent)
665 			return;
666 
667 		// The parent has changed -- unlock and retry.
668 		parent->Unlock();
669 	}
670 }
671 
672 
673 /*!	Unlocks the team and its parent team (if any).
674 */
675 void
676 Team::UnlockTeamAndParent()
677 {
678 	if (parent != NULL)
679 		parent->Unlock();
680 
681 	Unlock();
682 }
683 
684 
685 /*!	Locks the team, its parent team (if any), and the team's process group.
686 	The caller must hold a reference to the team or otherwise make sure that
687 	it won't be deleted.
688 	If the team doesn't have a parent, only the team itself is locked.
689 */
690 void
691 Team::LockTeamParentAndProcessGroup()
692 {
693 	LockTeamAndProcessGroup();
694 
695 	// We hold the group's and the team's lock, but not the parent team's lock.
696 	// If we have a parent, try to lock it.
697 	if (this->parent == NULL || this->parent->TryLock())
698 		return;
699 
700 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
701 	// the job.
702 	Unlock();
703 	LockTeamAndParent(false);
704 }
705 
706 
707 /*!	Unlocks the team, its parent team (if any), and the team's process group.
708 */
709 void
710 Team::UnlockTeamParentAndProcessGroup()
711 {
712 	group->Unlock();
713 
714 	if (parent != NULL)
715 		parent->Unlock();
716 
717 	Unlock();
718 }
719 
720 
721 void
722 Team::LockTeamAndProcessGroup()
723 {
724 	// The locking order is process group -> child. Since the process group can
725 	// change as long as we don't lock the team, we need to do a trial and error
726 	// loop.
727 	Lock();
728 
729 	while (true) {
730 		// Try to lock the group. This will succeed in most cases, simplifying
731 		// things.
732 		ProcessGroup* group = this->group;
733 		if (group == NULL)
734 			return;
735 
736 		if (group->TryLock())
737 			return;
738 
739 		// get a temporary reference to the group, unlock this team, lock the
740 		// group, and re-lock this team
741 		BReference<ProcessGroup> groupReference(group);
742 
743 		Unlock();
744 		group->Lock();
745 		Lock();
746 
747 		// If the group hasn't changed in the meantime, we're done.
748 		if (this->group == group)
749 			return;
750 
751 		// The group has changed -- unlock and retry.
752 		group->Unlock();
753 	}
754 }
755 
756 
757 void
758 Team::UnlockTeamAndProcessGroup()
759 {
760 	group->Unlock();
761 	Unlock();
762 }
763 
764 
765 void
766 Team::SetName(const char* name)
767 {
768 	if (const char* lastSlash = strrchr(name, '/'))
769 		name = lastSlash + 1;
770 
771 	strlcpy(fName, name, B_OS_NAME_LENGTH);
772 }
773 
774 
775 void
776 Team::SetArgs(const char* args)
777 {
778 	strlcpy(fArgs, args, sizeof(fArgs));
779 }
780 
781 
782 void
783 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
784 {
785 	fArgs[0] = '\0';
786 	strlcpy(fArgs, path, sizeof(fArgs));
787 	for (int i = 0; i < otherArgCount; i++) {
788 		strlcat(fArgs, " ", sizeof(fArgs));
789 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
790 	}
791 }
792 
793 
794 void
795 Team::ResetSignalsOnExec()
796 {
797 	// We are supposed to keep pending signals. Signal actions shall be reset
798 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
799 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
800 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
801 	// flags, but since there aren't any handlers, they make little sense, so
802 	// we clear them.
803 
804 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
805 		struct sigaction& action = SignalActionFor(i);
806 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
807 			action.sa_handler = SIG_DFL;
808 
809 		action.sa_mask = 0;
810 		action.sa_flags = 0;
811 		action.sa_userdata = NULL;
812 	}
813 }
814 
815 
816 void
817 Team::InheritSignalActions(Team* parent)
818 {
819 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
820 }
821 
822 
823 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
824 	ID.
825 
826 	The caller must hold the team's lock.
827 
828 	\param timer The timer to be added. If it doesn't have an ID yet, it is
829 		considered user-defined and will be assigned an ID.
830 	\return \c B_OK, if the timer was added successfully, another error code
831 		otherwise.
832 */
833 status_t
834 Team::AddUserTimer(UserTimer* timer)
835 {
836 	// don't allow addition of timers when already shutting the team down
837 	if (state >= TEAM_STATE_SHUTDOWN)
838 		return B_BAD_TEAM_ID;
839 
840 	// If the timer is user-defined, check timer limit and increment
841 	// user-defined count.
842 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
843 		return EAGAIN;
844 
845 	fUserTimers.AddTimer(timer);
846 
847 	return B_OK;
848 }
849 
850 
851 /*!	Removes the given user timer from the team.
852 
853 	The caller must hold the team's lock.
854 
855 	\param timer The timer to be removed.
856 
857 */
858 void
859 Team::RemoveUserTimer(UserTimer* timer)
860 {
861 	fUserTimers.RemoveTimer(timer);
862 
863 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
864 		UserDefinedTimersRemoved(1);
865 }
866 
867 
868 /*!	Deletes all (or all user-defined) user timers of the team.
869 
870 	Timer's belonging to the team's threads are not affected.
871 	The caller must hold the team's lock.
872 
873 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
874 		otherwise all timers are deleted.
875 */
876 void
877 Team::DeleteUserTimers(bool userDefinedOnly)
878 {
879 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
880 	UserDefinedTimersRemoved(count);
881 }
882 
883 
884 /*!	If not at the limit yet, increments the team's user-defined timer count.
885 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
886 */
887 bool
888 Team::CheckAddUserDefinedTimer()
889 {
890 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
891 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
892 		atomic_add(&fUserDefinedTimerCount, -1);
893 		return false;
894 	}
895 
896 	return true;
897 }
898 
899 
900 /*!	Subtracts the given count for the team's user-defined timer count.
901 	\param count The count to subtract.
902 */
903 void
904 Team::UserDefinedTimersRemoved(int32 count)
905 {
906 	atomic_add(&fUserDefinedTimerCount, -count);
907 }
908 
909 
910 void
911 Team::DeactivateCPUTimeUserTimers()
912 {
913 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
914 		timer->Deactivate();
915 
916 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
917 		timer->Deactivate();
918 }
919 
920 
921 /*!	Returns the team's current total CPU time (kernel + user + offset).
922 
923 	The caller must hold \c time_lock.
924 
925 	\param ignoreCurrentRun If \c true and the current thread is one team's
926 		threads, don't add the time since the last time \c last_time was
927 		updated. Should be used in "thread unscheduled" scheduler callbacks,
928 		since although the thread is still running at that time, its time has
929 		already been stopped.
930 	\return The team's current total CPU time.
931 */
932 bigtime_t
933 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
934 {
935 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
936 		+ dead_threads_user_time;
937 
938 	Thread* currentThread = thread_get_current_thread();
939 	bigtime_t now = system_time();
940 
941 	for (Thread* thread = thread_list; thread != NULL;
942 			thread = thread->team_next) {
943 		bool alreadyLocked = thread == lockedThread;
944 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
945 		time += thread->kernel_time + thread->user_time;
946 
947 		if (thread->last_time != 0) {
948 			if (!ignoreCurrentRun || thread != currentThread)
949 				time += now - thread->last_time;
950 		}
951 
952 		if (alreadyLocked)
953 			threadTimeLocker.Detach();
954 	}
955 
956 	return time;
957 }
958 
959 
960 /*!	Returns the team's current user CPU time.
961 
962 	The caller must hold \c time_lock.
963 
964 	\return The team's current user CPU time.
965 */
966 bigtime_t
967 Team::UserCPUTime() const
968 {
969 	bigtime_t time = dead_threads_user_time;
970 
971 	bigtime_t now = system_time();
972 
973 	for (Thread* thread = thread_list; thread != NULL;
974 			thread = thread->team_next) {
975 		SpinLocker threadTimeLocker(thread->time_lock);
976 		time += thread->user_time;
977 
978 		if (thread->last_time != 0 && !thread->in_kernel)
979 			time += now - thread->last_time;
980 	}
981 
982 	return time;
983 }
984 
985 
986 //	#pragma mark - ProcessGroup
987 
988 
989 ProcessGroup::ProcessGroup(pid_t id)
990 	:
991 	id(id),
992 	teams(NULL),
993 	fSession(NULL),
994 	fInOrphanedCheckList(false)
995 {
996 	char lockName[32];
997 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
998 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
999 }
1000 
1001 
1002 ProcessGroup::~ProcessGroup()
1003 {
1004 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1005 
1006 	// If the group is in the orphaned check list, remove it.
1007 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1008 
1009 	if (fInOrphanedCheckList)
1010 		sOrphanedCheckProcessGroups.Remove(this);
1011 
1012 	orphanedCheckLocker.Unlock();
1013 
1014 	// remove group from the hash table and from the session
1015 	if (fSession != NULL) {
1016 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1017 		sGroupHash.RemoveUnchecked(this);
1018 		groupHashLocker.Unlock();
1019 
1020 		fSession->ReleaseReference();
1021 	}
1022 
1023 	mutex_destroy(&fLock);
1024 }
1025 
1026 
1027 /*static*/ ProcessGroup*
1028 ProcessGroup::Get(pid_t id)
1029 {
1030 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1031 	ProcessGroup* group = sGroupHash.Lookup(id);
1032 	if (group != NULL)
1033 		group->AcquireReference();
1034 	return group;
1035 }
1036 
1037 
1038 /*!	Adds the group the given session and makes it publicly accessible.
1039 	The caller must not hold the process group hash lock.
1040 */
1041 void
1042 ProcessGroup::Publish(ProcessSession* session)
1043 {
1044 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1045 	PublishLocked(session);
1046 }
1047 
1048 
1049 /*!	Adds the group to the given session and makes it publicly accessible.
1050 	The caller must hold the process group hash lock.
1051 */
1052 void
1053 ProcessGroup::PublishLocked(ProcessSession* session)
1054 {
1055 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1056 
1057 	fSession = session;
1058 	fSession->AcquireReference();
1059 
1060 	sGroupHash.InsertUnchecked(this);
1061 }
1062 
1063 
1064 /*!	Checks whether the process group is orphaned.
1065 	The caller must hold the group's lock.
1066 	\return \c true, if the group is orphaned, \c false otherwise.
1067 */
1068 bool
1069 ProcessGroup::IsOrphaned() const
1070 {
1071 	// Orphaned Process Group: "A process group in which the parent of every
1072 	// member is either itself a member of the group or is not a member of the
1073 	// group's session." (Open Group Base Specs Issue 7)
1074 	bool orphaned = true;
1075 
1076 	Team* team = teams;
1077 	while (orphaned && team != NULL) {
1078 		team->LockTeamAndParent(false);
1079 
1080 		Team* parent = team->parent;
1081 		if (parent != NULL && parent->group_id != id
1082 			&& parent->session_id == fSession->id) {
1083 			orphaned = false;
1084 		}
1085 
1086 		team->UnlockTeamAndParent();
1087 
1088 		team = team->group_next;
1089 	}
1090 
1091 	return orphaned;
1092 }
1093 
1094 
1095 void
1096 ProcessGroup::ScheduleOrphanedCheck()
1097 {
1098 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1099 
1100 	if (!fInOrphanedCheckList) {
1101 		sOrphanedCheckProcessGroups.Add(this);
1102 		fInOrphanedCheckList = true;
1103 	}
1104 }
1105 
1106 
1107 void
1108 ProcessGroup::UnsetOrphanedCheck()
1109 {
1110 	fInOrphanedCheckList = false;
1111 }
1112 
1113 
1114 //	#pragma mark - ProcessSession
1115 
1116 
1117 ProcessSession::ProcessSession(pid_t id)
1118 	:
1119 	id(id),
1120 	controlling_tty(-1),
1121 	foreground_group(-1)
1122 {
1123 	char lockName[32];
1124 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1125 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1126 }
1127 
1128 
1129 ProcessSession::~ProcessSession()
1130 {
1131 	mutex_destroy(&fLock);
1132 }
1133 
1134 
1135 //	#pragma mark - KDL functions
1136 
1137 
1138 static void
1139 _dump_team_info(Team* team)
1140 {
1141 	kprintf("TEAM: %p\n", team);
1142 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1143 		team->id);
1144 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1145 	kprintf("name:             '%s'\n", team->Name());
1146 	kprintf("args:             '%s'\n", team->Args());
1147 	kprintf("hash_next:        %p\n", team->hash_next);
1148 	kprintf("parent:           %p", team->parent);
1149 	if (team->parent != NULL) {
1150 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1151 	} else
1152 		kprintf("\n");
1153 
1154 	kprintf("children:         %p\n", team->children);
1155 	kprintf("num_threads:      %d\n", team->num_threads);
1156 	kprintf("state:            %d\n", team->state);
1157 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1158 	kprintf("io_context:       %p\n", team->io_context);
1159 	if (team->address_space)
1160 		kprintf("address_space:    %p\n", team->address_space);
1161 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1162 		(void*)team->user_data, team->user_data_area);
1163 	kprintf("free user thread: %p\n", team->free_user_threads);
1164 	kprintf("main_thread:      %p\n", team->main_thread);
1165 	kprintf("thread_list:      %p\n", team->thread_list);
1166 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1167 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1168 }
1169 
1170 
1171 static int
1172 dump_team_info(int argc, char** argv)
1173 {
1174 	ulong arg;
1175 	bool found = false;
1176 
1177 	if (argc < 2) {
1178 		Thread* thread = thread_get_current_thread();
1179 		if (thread != NULL && thread->team != NULL)
1180 			_dump_team_info(thread->team);
1181 		else
1182 			kprintf("No current team!\n");
1183 		return 0;
1184 	}
1185 
1186 	arg = strtoul(argv[1], NULL, 0);
1187 	if (IS_KERNEL_ADDRESS(arg)) {
1188 		// semi-hack
1189 		_dump_team_info((Team*)arg);
1190 		return 0;
1191 	}
1192 
1193 	// walk through the thread list, trying to match name or id
1194 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1195 		Team* team = it.Next();) {
1196 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1197 			|| team->id == (team_id)arg) {
1198 			_dump_team_info(team);
1199 			found = true;
1200 			break;
1201 		}
1202 	}
1203 
1204 	if (!found)
1205 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1206 	return 0;
1207 }
1208 
1209 
1210 static int
1211 dump_teams(int argc, char** argv)
1212 {
1213 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1214 		B_PRINTF_POINTER_WIDTH, "parent");
1215 
1216 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1217 		Team* team = it.Next();) {
1218 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 
1225 //	#pragma mark - Private functions
1226 
1227 
1228 /*! Get the parent of a given process.
1229 
1230 	Used in the implementation of getppid (where a process can get its own
1231 	parent, only) as well as in user_process_info where the information is
1232 	available to anyone (allowing to display a tree of running processes)
1233 */
1234 static pid_t
1235 _getppid(pid_t id)
1236 {
1237 	if (id < 0) {
1238 		errno = EINVAL;
1239 		return -1;
1240 	}
1241 
1242 	if (id == 0) {
1243 		Team* team = thread_get_current_thread()->team;
1244 		TeamLocker teamLocker(team);
1245 		if (team->parent == NULL) {
1246 			errno = EINVAL;
1247 			return -1;
1248 		}
1249 		return team->parent->id;
1250 	}
1251 
1252 	Team* team = Team::GetAndLock(id);
1253 	if (team == NULL) {
1254 		errno = ESRCH;
1255 		return -1;
1256 	}
1257 
1258 	pid_t parentID;
1259 
1260 	if (team->parent == NULL) {
1261 		errno = EINVAL;
1262 		parentID = -1;
1263 	} else
1264 		parentID = team->parent->id;
1265 
1266 	team->UnlockAndReleaseReference();
1267 
1268 	return parentID;
1269 }
1270 
1271 
1272 /*!	Inserts team \a team into the child list of team \a parent.
1273 
1274 	The caller must hold the lock of both \a parent and \a team.
1275 
1276 	\param parent The parent team.
1277 	\param team The team to be inserted into \a parent's child list.
1278 */
1279 static void
1280 insert_team_into_parent(Team* parent, Team* team)
1281 {
1282 	ASSERT(parent != NULL);
1283 
1284 	team->siblings_next = parent->children;
1285 	parent->children = team;
1286 	team->parent = parent;
1287 }
1288 
1289 
1290 /*!	Removes team \a team from the child list of team \a parent.
1291 
1292 	The caller must hold the lock of both \a parent and \a team.
1293 
1294 	\param parent The parent team.
1295 	\param team The team to be removed from \a parent's child list.
1296 */
1297 static void
1298 remove_team_from_parent(Team* parent, Team* team)
1299 {
1300 	Team* child;
1301 	Team* last = NULL;
1302 
1303 	for (child = parent->children; child != NULL;
1304 			child = child->siblings_next) {
1305 		if (child == team) {
1306 			if (last == NULL)
1307 				parent->children = child->siblings_next;
1308 			else
1309 				last->siblings_next = child->siblings_next;
1310 
1311 			team->parent = NULL;
1312 			break;
1313 		}
1314 		last = child;
1315 	}
1316 }
1317 
1318 
1319 /*!	Returns whether the given team is a session leader.
1320 	The caller must hold the team's lock or its process group's lock.
1321 */
1322 static bool
1323 is_session_leader(Team* team)
1324 {
1325 	return team->session_id == team->id;
1326 }
1327 
1328 
1329 /*!	Returns whether the given team is a process group leader.
1330 	The caller must hold the team's lock or its process group's lock.
1331 */
1332 static bool
1333 is_process_group_leader(Team* team)
1334 {
1335 	return team->group_id == team->id;
1336 }
1337 
1338 
1339 /*!	Inserts the given team into the given process group.
1340 	The caller must hold the process group's lock, the team's lock, and the
1341 	team's parent's lock.
1342 */
1343 static void
1344 insert_team_into_group(ProcessGroup* group, Team* team)
1345 {
1346 	team->group = group;
1347 	team->group_id = group->id;
1348 	team->session_id = group->Session()->id;
1349 
1350 	team->group_next = group->teams;
1351 	group->teams = team;
1352 	group->AcquireReference();
1353 }
1354 
1355 
1356 /*!	Removes the given team from its process group.
1357 
1358 	The caller must hold the process group's lock, the team's lock, and the
1359 	team's parent's lock. Interrupts must be enabled.
1360 
1361 	\param team The team that'll be removed from its process group.
1362 */
1363 static void
1364 remove_team_from_group(Team* team)
1365 {
1366 	ProcessGroup* group = team->group;
1367 	Team* current;
1368 	Team* last = NULL;
1369 
1370 	// the team must be in a process group to let this function have any effect
1371 	if (group == NULL)
1372 		return;
1373 
1374 	for (current = group->teams; current != NULL;
1375 			current = current->group_next) {
1376 		if (current == team) {
1377 			if (last == NULL)
1378 				group->teams = current->group_next;
1379 			else
1380 				last->group_next = current->group_next;
1381 
1382 			break;
1383 		}
1384 		last = current;
1385 	}
1386 
1387 	team->group = NULL;
1388 	team->group_next = NULL;
1389 	team->group_id = -1;
1390 
1391 	group->ReleaseReference();
1392 }
1393 
1394 
1395 static status_t
1396 create_team_user_data(Team* team, void* exactAddress = NULL)
1397 {
1398 	void* address;
1399 	uint32 addressSpec;
1400 
1401 	if (exactAddress != NULL) {
1402 		address = exactAddress;
1403 		addressSpec = B_EXACT_ADDRESS;
1404 	} else {
1405 		address = (void*)KERNEL_USER_DATA_BASE;
1406 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1407 	}
1408 
1409 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1410 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1411 
1412 	virtual_address_restrictions virtualRestrictions = {};
1413 	if (result == B_OK || exactAddress != NULL) {
1414 		if (exactAddress != NULL)
1415 			virtualRestrictions.address = exactAddress;
1416 		else
1417 			virtualRestrictions.address = address;
1418 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1419 	} else {
1420 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1421 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1422 	}
1423 
1424 	physical_address_restrictions physicalRestrictions = {};
1425 	team->user_data_area = create_area_etc(team->id, "user area",
1426 		kTeamUserDataInitialSize, B_FULL_LOCK,
1427 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1428 		&virtualRestrictions, &physicalRestrictions, &address);
1429 	if (team->user_data_area < 0)
1430 		return team->user_data_area;
1431 
1432 	team->user_data = (addr_t)address;
1433 	team->used_user_data = 0;
1434 	team->user_data_size = kTeamUserDataInitialSize;
1435 	team->free_user_threads = NULL;
1436 
1437 	return B_OK;
1438 }
1439 
1440 
1441 static void
1442 delete_team_user_data(Team* team)
1443 {
1444 	if (team->user_data_area >= 0) {
1445 		vm_delete_area(team->id, team->user_data_area, true);
1446 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1447 			kTeamUserDataReservedSize);
1448 
1449 		team->user_data = 0;
1450 		team->used_user_data = 0;
1451 		team->user_data_size = 0;
1452 		team->user_data_area = -1;
1453 		while (free_user_thread* entry = team->free_user_threads) {
1454 			team->free_user_threads = entry->next;
1455 			free(entry);
1456 		}
1457 	}
1458 }
1459 
1460 
1461 static status_t
1462 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1463 	int32 argCount, int32 envCount, char**& _flatArgs)
1464 {
1465 	if (argCount < 0 || envCount < 0)
1466 		return B_BAD_VALUE;
1467 
1468 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1469 		return B_TOO_MANY_ARGS;
1470 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1471 		return B_BAD_VALUE;
1472 
1473 	if (!IS_USER_ADDRESS(userFlatArgs))
1474 		return B_BAD_ADDRESS;
1475 
1476 	// allocate kernel memory
1477 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1478 	if (flatArgs == NULL)
1479 		return B_NO_MEMORY;
1480 
1481 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1482 		free(flatArgs);
1483 		return B_BAD_ADDRESS;
1484 	}
1485 
1486 	// check and relocate the array
1487 	status_t error = B_OK;
1488 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1489 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1490 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1491 		if (i == argCount || i == argCount + envCount + 1) {
1492 			// check array null termination
1493 			if (flatArgs[i] != NULL) {
1494 				error = B_BAD_VALUE;
1495 				break;
1496 			}
1497 		} else {
1498 			// check string
1499 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1500 			size_t maxLen = stringEnd - arg;
1501 			if (arg < stringBase || arg >= stringEnd
1502 					|| strnlen(arg, maxLen) == maxLen) {
1503 				error = B_BAD_VALUE;
1504 				break;
1505 			}
1506 
1507 			flatArgs[i] = arg;
1508 		}
1509 	}
1510 
1511 	if (error == B_OK)
1512 		_flatArgs = flatArgs;
1513 	else
1514 		free(flatArgs);
1515 
1516 	return error;
1517 }
1518 
1519 
1520 static void
1521 free_team_arg(struct team_arg* teamArg)
1522 {
1523 	if (teamArg != NULL) {
1524 		free(teamArg->flat_args);
1525 		free(teamArg->path);
1526 		free(teamArg);
1527 	}
1528 }
1529 
1530 
1531 static status_t
1532 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1533 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1534 	port_id port, uint32 token)
1535 {
1536 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1537 	if (teamArg == NULL)
1538 		return B_NO_MEMORY;
1539 
1540 	teamArg->path = strdup(path);
1541 	if (teamArg->path == NULL) {
1542 		free(teamArg);
1543 		return B_NO_MEMORY;
1544 	}
1545 
1546 	// copy the args over
1547 	teamArg->flat_args = flatArgs;
1548 	teamArg->flat_args_size = flatArgsSize;
1549 	teamArg->arg_count = argCount;
1550 	teamArg->env_count = envCount;
1551 	teamArg->flags = 0;
1552 	teamArg->umask = umask;
1553 	teamArg->error_port = port;
1554 	teamArg->error_token = token;
1555 
1556 	// determine the flags from the environment
1557 	const char* const* env = flatArgs + argCount + 1;
1558 	for (int32 i = 0; i < envCount; i++) {
1559 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1560 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1561 			break;
1562 		}
1563 	}
1564 
1565 	*_teamArg = teamArg;
1566 	return B_OK;
1567 }
1568 
1569 
1570 static status_t
1571 team_create_thread_start_internal(void* args)
1572 {
1573 	status_t err;
1574 	Thread* thread;
1575 	Team* team;
1576 	struct team_arg* teamArgs = (struct team_arg*)args;
1577 	const char* path;
1578 	addr_t entry;
1579 	char** userArgs;
1580 	char** userEnv;
1581 	struct user_space_program_args* programArgs;
1582 	uint32 argCount, envCount;
1583 
1584 	thread = thread_get_current_thread();
1585 	team = thread->team;
1586 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1587 
1588 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1589 		thread->id));
1590 
1591 	// Main stack area layout is currently as follows (starting from 0):
1592 	//
1593 	// size								| usage
1594 	// ---------------------------------+--------------------------------
1595 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1596 	// TLS_SIZE							| TLS data
1597 	// sizeof(user_space_program_args)	| argument structure for the runtime
1598 	//									| loader
1599 	// flat arguments size				| flat process arguments and environment
1600 
1601 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1602 	// the heap
1603 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1604 
1605 	argCount = teamArgs->arg_count;
1606 	envCount = teamArgs->env_count;
1607 
1608 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1609 		+ thread->user_stack_size + TLS_SIZE);
1610 
1611 	userArgs = (char**)(programArgs + 1);
1612 	userEnv = userArgs + argCount + 1;
1613 	path = teamArgs->path;
1614 
1615 	if (user_strlcpy(programArgs->program_path, path,
1616 				sizeof(programArgs->program_path)) < B_OK
1617 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1618 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1619 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1620 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1621 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1622 				sizeof(port_id)) < B_OK
1623 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1624 				sizeof(uint32)) < B_OK
1625 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1626 		|| user_memcpy(&programArgs->disable_user_addons,
1627 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1628 		|| user_memcpy(userArgs, teamArgs->flat_args,
1629 				teamArgs->flat_args_size) < B_OK) {
1630 		// the team deletion process will clean this mess
1631 		free_team_arg(teamArgs);
1632 		return B_BAD_ADDRESS;
1633 	}
1634 
1635 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1636 
1637 	// set team args and update state
1638 	team->Lock();
1639 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1640 	team->state = TEAM_STATE_NORMAL;
1641 	team->Unlock();
1642 
1643 	free_team_arg(teamArgs);
1644 		// the arguments are already on the user stack, we no longer need
1645 		// them in this form
1646 
1647 	// Clone commpage area
1648 	area_id commPageArea = clone_commpage_area(team->id,
1649 		&team->commpage_address);
1650 	if (commPageArea  < B_OK) {
1651 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1652 			strerror(commPageArea)));
1653 		return commPageArea;
1654 	}
1655 
1656 	// Register commpage image
1657 	image_id commPageImage = get_commpage_image();
1658 	extended_image_info imageInfo;
1659 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1660 	if (err != B_OK) {
1661 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1662 			strerror(err)));
1663 		return err;
1664 	}
1665 	imageInfo.basic_info.text = team->commpage_address;
1666 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1667 	imageInfo.symbol_table = NULL;
1668 	imageInfo.symbol_hash = NULL;
1669 	imageInfo.string_table = NULL;
1670 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1671 	if (image < 0) {
1672 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1673 			strerror(image)));
1674 		return image;
1675 	}
1676 
1677 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1678 	// automatic variables with function scope will never be destroyed.
1679 	{
1680 		// find runtime_loader path
1681 		KPath runtimeLoaderPath;
1682 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1683 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1684 		if (err < B_OK) {
1685 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1686 				strerror(err)));
1687 			return err;
1688 		}
1689 		runtimeLoaderPath.UnlockBuffer();
1690 		err = runtimeLoaderPath.Append("runtime_loader");
1691 
1692 		if (err == B_OK) {
1693 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1694 				&entry);
1695 		}
1696 	}
1697 
1698 	if (err < B_OK) {
1699 		// Luckily, we don't have to clean up the mess we created - that's
1700 		// done for us by the normal team deletion process
1701 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1702 			"%s\n", strerror(err)));
1703 		return err;
1704 	}
1705 
1706 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1707 
1708 	// enter userspace -- returns only in case of error
1709 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1710 		programArgs, team->commpage_address);
1711 }
1712 
1713 
1714 static status_t
1715 team_create_thread_start(void* args)
1716 {
1717 	team_create_thread_start_internal(args);
1718 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1719 	thread_exit();
1720 		// does not return
1721 	return B_OK;
1722 }
1723 
1724 
1725 static thread_id
1726 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1727 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1728 	port_id errorPort, uint32 errorToken)
1729 {
1730 	char** flatArgs = _flatArgs;
1731 	thread_id thread;
1732 	status_t status;
1733 	struct team_arg* teamArgs;
1734 	struct team_loading_info loadingInfo;
1735 	ConditionVariableEntry loadingWaitEntry;
1736 	io_context* parentIOContext = NULL;
1737 	team_id teamID;
1738 	bool teamLimitReached = false;
1739 
1740 	if (flatArgs == NULL || argCount == 0)
1741 		return B_BAD_VALUE;
1742 
1743 	const char* path = flatArgs[0];
1744 
1745 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1746 		"\n", path, flatArgs, argCount));
1747 
1748 	// cut the path from the main thread name
1749 	const char* threadName = strrchr(path, '/');
1750 	if (threadName != NULL)
1751 		threadName++;
1752 	else
1753 		threadName = path;
1754 
1755 	// create the main thread object
1756 	Thread* mainThread;
1757 	status = Thread::Create(threadName, mainThread);
1758 	if (status != B_OK)
1759 		return status;
1760 	BReference<Thread> mainThreadReference(mainThread, true);
1761 
1762 	// create team object
1763 	Team* team = Team::Create(mainThread->id, path, false);
1764 	if (team == NULL)
1765 		return B_NO_MEMORY;
1766 	BReference<Team> teamReference(team, true);
1767 
1768 	BReference<Team> teamLoadingReference;
1769 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1770 		loadingInfo.condition.Init(team, "image load");
1771 		loadingInfo.condition.Add(&loadingWaitEntry);
1772 		loadingInfo.result = B_ERROR;
1773 		team->loading_info = &loadingInfo;
1774 		teamLoadingReference = teamReference;
1775 	}
1776 
1777 	// get the parent team
1778 	Team* parent = Team::Get(parentID);
1779 	if (parent == NULL)
1780 		return B_BAD_TEAM_ID;
1781 	BReference<Team> parentReference(parent, true);
1782 
1783 	parent->LockTeamAndProcessGroup();
1784 	team->Lock();
1785 
1786 	// inherit the parent's user/group
1787 	inherit_parent_user_and_group(team, parent);
1788 
1789 	// get a reference to the parent's I/O context -- we need it to create ours
1790 	parentIOContext = parent->io_context;
1791 	vfs_get_io_context(parentIOContext);
1792 
1793 	team->Unlock();
1794 	parent->UnlockTeamAndProcessGroup();
1795 
1796 	// check the executable's set-user/group-id permission
1797 	update_set_id_user_and_group(team, path);
1798 
1799 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1800 		envCount, (mode_t)-1, errorPort, errorToken);
1801 	if (status != B_OK)
1802 		goto err1;
1803 
1804 	_flatArgs = NULL;
1805 		// args are owned by the team_arg structure now
1806 
1807 	// create a new io_context for this team
1808 	team->io_context = vfs_new_io_context(parentIOContext, true);
1809 	if (!team->io_context) {
1810 		status = B_NO_MEMORY;
1811 		goto err2;
1812 	}
1813 
1814 	// We don't need the parent's I/O context any longer.
1815 	vfs_put_io_context(parentIOContext);
1816 	parentIOContext = NULL;
1817 
1818 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1819 	vfs_exec_io_context(team->io_context);
1820 
1821 	// create an address space for this team
1822 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1823 		&team->address_space);
1824 	if (status != B_OK)
1825 		goto err2;
1826 
1827 	team->address_space->SetRandomizingEnabled(
1828 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1829 
1830 	// create the user data area
1831 	status = create_team_user_data(team);
1832 	if (status != B_OK)
1833 		goto err4;
1834 
1835 	// insert the team into its parent and the teams hash
1836 	parent->LockTeamAndProcessGroup();
1837 	team->Lock();
1838 
1839 	{
1840 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1841 
1842 		sTeamHash.Insert(team);
1843 		teamLimitReached = sUsedTeams >= sMaxTeams;
1844 		if (!teamLimitReached)
1845 			sUsedTeams++;
1846 	}
1847 
1848 	insert_team_into_parent(parent, team);
1849 	insert_team_into_group(parent->group, team);
1850 
1851 	team->Unlock();
1852 	parent->UnlockTeamAndProcessGroup();
1853 
1854 	// notify team listeners
1855 	sNotificationService.Notify(TEAM_ADDED, team);
1856 
1857 	if (teamLimitReached) {
1858 		status = B_NO_MORE_TEAMS;
1859 		goto err6;
1860 	}
1861 
1862 	// In case we start the main thread, we shouldn't access the team object
1863 	// afterwards, so cache the team's ID.
1864 	teamID = team->id;
1865 
1866 	// Create a kernel thread, but under the context of the new team
1867 	// The new thread will take over ownership of teamArgs.
1868 	{
1869 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1870 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1871 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1872 			+ teamArgs->flat_args_size;
1873 		thread = thread_create_thread(threadAttributes, false);
1874 		if (thread < 0) {
1875 			status = thread;
1876 			goto err6;
1877 		}
1878 	}
1879 
1880 	// The team has been created successfully, so we keep the reference. Or
1881 	// more precisely: It's owned by the team's main thread, now.
1882 	teamReference.Detach();
1883 
1884 	// wait for the loader of the new team to finish its work
1885 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1886 		if (mainThread != NULL) {
1887 			// resume the team's main thread
1888 			thread_continue(mainThread);
1889 		}
1890 
1891 		// Now wait until loading is finished. We will be woken either by the
1892 		// thread, when it finished or aborted loading, or when the team is
1893 		// going to die (e.g. is killed). In either case the one notifying is
1894 		// responsible for unsetting `loading_info` in the team structure.
1895 		loadingWaitEntry.Wait();
1896 
1897 		// We must synchronize with the thread that woke us up, to ensure
1898 		// there are no remaining consumers of the team_loading_info.
1899 		team->Lock();
1900 		if (team->loading_info != NULL)
1901 			panic("team loading wait complete, but loading_info != NULL");
1902 		team->Unlock();
1903 		teamLoadingReference.Unset();
1904 
1905 		if (loadingInfo.result < B_OK)
1906 			return loadingInfo.result;
1907 	}
1908 
1909 	// notify the debugger
1910 	user_debug_team_created(teamID);
1911 
1912 	return thread;
1913 
1914 err6:
1915 	// Remove the team structure from the process group, the parent team, and
1916 	// the team hash table and delete the team structure.
1917 	parent->LockTeamAndProcessGroup();
1918 	team->Lock();
1919 
1920 	remove_team_from_group(team);
1921 	remove_team_from_parent(team->parent, team);
1922 
1923 	team->Unlock();
1924 	parent->UnlockTeamAndProcessGroup();
1925 
1926 	{
1927 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1928 		sTeamHash.Remove(team);
1929 		if (!teamLimitReached)
1930 			sUsedTeams--;
1931 	}
1932 
1933 	sNotificationService.Notify(TEAM_REMOVED, team);
1934 
1935 	delete_team_user_data(team);
1936 err4:
1937 	team->address_space->Put();
1938 err2:
1939 	free_team_arg(teamArgs);
1940 err1:
1941 	if (parentIOContext != NULL)
1942 		vfs_put_io_context(parentIOContext);
1943 
1944 	return status;
1945 }
1946 
1947 
1948 /*!	Almost shuts down the current team and loads a new image into it.
1949 	If successful, this function does not return and will takeover ownership of
1950 	the arguments provided.
1951 	This function may only be called in a userland team (caused by one of the
1952 	exec*() syscalls).
1953 */
1954 static status_t
1955 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1956 	int32 argCount, int32 envCount, mode_t umask)
1957 {
1958 	// NOTE: Since this function normally doesn't return, don't use automatic
1959 	// variables that need destruction in the function scope.
1960 	char** flatArgs = _flatArgs;
1961 	Team* team = thread_get_current_thread()->team;
1962 	struct team_arg* teamArgs;
1963 	const char* threadName;
1964 	thread_id nubThreadID = -1;
1965 
1966 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1967 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1968 		team->id));
1969 
1970 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1971 
1972 	// switching the kernel at run time is probably not a good idea :)
1973 	if (team == team_get_kernel_team())
1974 		return B_NOT_ALLOWED;
1975 
1976 	// we currently need to be single threaded here
1977 	// TODO: maybe we should just kill all other threads and
1978 	//	make the current thread the team's main thread?
1979 	Thread* currentThread = thread_get_current_thread();
1980 	if (currentThread != team->main_thread)
1981 		return B_NOT_ALLOWED;
1982 
1983 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1984 	// We iterate through the thread list to make sure that there's no other
1985 	// thread.
1986 	TeamLocker teamLocker(team);
1987 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1988 
1989 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1990 		nubThreadID = team->debug_info.nub_thread;
1991 
1992 	debugInfoLocker.Unlock();
1993 
1994 	for (Thread* thread = team->thread_list; thread != NULL;
1995 			thread = thread->team_next) {
1996 		if (thread != team->main_thread && thread->id != nubThreadID)
1997 			return B_NOT_ALLOWED;
1998 	}
1999 
2000 	team->DeleteUserTimers(true);
2001 	team->ResetSignalsOnExec();
2002 
2003 	teamLocker.Unlock();
2004 
2005 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
2006 		argCount, envCount, umask, -1, 0);
2007 	if (status != B_OK)
2008 		return status;
2009 
2010 	_flatArgs = NULL;
2011 		// args are owned by the team_arg structure now
2012 
2013 	// TODO: remove team resources if there are any left
2014 	// thread_atkernel_exit() might not be called at all
2015 
2016 	thread_reset_for_exec();
2017 
2018 	user_debug_prepare_for_exec();
2019 
2020 	delete_team_user_data(team);
2021 	vm_delete_areas(team->address_space, false);
2022 	xsi_sem_undo(team);
2023 	delete_owned_ports(team);
2024 	sem_delete_owned_sems(team);
2025 	remove_images(team);
2026 	vfs_exec_io_context(team->io_context);
2027 	delete_realtime_sem_context(team->realtime_sem_context);
2028 	team->realtime_sem_context = NULL;
2029 
2030 	// update ASLR
2031 	team->address_space->SetRandomizingEnabled(
2032 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2033 
2034 	status = create_team_user_data(team);
2035 	if (status != B_OK) {
2036 		// creating the user data failed -- we're toast
2037 		free_team_arg(teamArgs);
2038 		exit_thread(status);
2039 		return status;
2040 	}
2041 
2042 	user_debug_finish_after_exec();
2043 
2044 	// rename the team
2045 
2046 	team->Lock();
2047 	team->SetName(path);
2048 	team->Unlock();
2049 
2050 	// cut the path from the team name and rename the main thread, too
2051 	threadName = strrchr(path, '/');
2052 	if (threadName != NULL)
2053 		threadName++;
2054 	else
2055 		threadName = path;
2056 	rename_thread(thread_get_current_thread_id(), threadName);
2057 
2058 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2059 
2060 	// Update user/group according to the executable's set-user/group-id
2061 	// permission.
2062 	update_set_id_user_and_group(team, path);
2063 
2064 	user_debug_team_exec();
2065 
2066 	// notify team listeners
2067 	sNotificationService.Notify(TEAM_EXEC, team);
2068 
2069 	// get a user thread for the thread
2070 	user_thread* userThread = team_allocate_user_thread(team);
2071 		// cannot fail (the allocation for the team would have failed already)
2072 	ThreadLocker currentThreadLocker(currentThread);
2073 	currentThread->user_thread = userThread;
2074 	currentThreadLocker.Unlock();
2075 
2076 	// create the user stack for the thread
2077 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2078 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2079 	if (status == B_OK) {
2080 		// prepare the stack, load the runtime loader, and enter userspace
2081 		team_create_thread_start(teamArgs);
2082 			// does never return
2083 	} else
2084 		free_team_arg(teamArgs);
2085 
2086 	// Sorry, we have to kill ourselves, there is no way out anymore
2087 	// (without any areas left and all that).
2088 	exit_thread(status);
2089 
2090 	// We return a status here since the signal that is sent by the
2091 	// call above is not immediately handled.
2092 	return B_ERROR;
2093 }
2094 
2095 
2096 static thread_id
2097 fork_team(void)
2098 {
2099 	Thread* parentThread = thread_get_current_thread();
2100 	Team* parentTeam = parentThread->team;
2101 	Team* team;
2102 	arch_fork_arg* forkArgs;
2103 	struct area_info info;
2104 	thread_id threadID;
2105 	status_t status;
2106 	ssize_t areaCookie;
2107 	bool teamLimitReached = false;
2108 
2109 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2110 
2111 	if (parentTeam == team_get_kernel_team())
2112 		return B_NOT_ALLOWED;
2113 
2114 	// create a new team
2115 	// TODO: this is very similar to load_image_internal() - maybe we can do
2116 	// something about it :)
2117 
2118 	// create the main thread object
2119 	Thread* thread;
2120 	status = Thread::Create(parentThread->name, thread);
2121 	if (status != B_OK)
2122 		return status;
2123 	BReference<Thread> threadReference(thread, true);
2124 
2125 	// create the team object
2126 	team = Team::Create(thread->id, NULL, false);
2127 	if (team == NULL)
2128 		return B_NO_MEMORY;
2129 
2130 	parentTeam->LockTeamAndProcessGroup();
2131 	team->Lock();
2132 
2133 	team->SetName(parentTeam->Name());
2134 	team->SetArgs(parentTeam->Args());
2135 
2136 	team->commpage_address = parentTeam->commpage_address;
2137 
2138 	// Inherit the parent's user/group.
2139 	inherit_parent_user_and_group(team, parentTeam);
2140 
2141 	// inherit signal handlers
2142 	team->InheritSignalActions(parentTeam);
2143 
2144 	team->Unlock();
2145 	parentTeam->UnlockTeamAndProcessGroup();
2146 
2147 	// inherit some team debug flags
2148 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2149 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2150 
2151 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2152 	if (forkArgs == NULL) {
2153 		status = B_NO_MEMORY;
2154 		goto err1;
2155 	}
2156 
2157 	// create a new io_context for this team
2158 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2159 	if (!team->io_context) {
2160 		status = B_NO_MEMORY;
2161 		goto err2;
2162 	}
2163 
2164 	// duplicate the realtime sem context
2165 	if (parentTeam->realtime_sem_context) {
2166 		team->realtime_sem_context = clone_realtime_sem_context(
2167 			parentTeam->realtime_sem_context);
2168 		if (team->realtime_sem_context == NULL) {
2169 			status = B_NO_MEMORY;
2170 			goto err2;
2171 		}
2172 	}
2173 
2174 	// create an address space for this team
2175 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2176 		&team->address_space);
2177 	if (status < B_OK)
2178 		goto err3;
2179 
2180 	// copy all areas of the team
2181 	// TODO: should be able to handle stack areas differently (ie. don't have
2182 	// them copy-on-write)
2183 
2184 	areaCookie = 0;
2185 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2186 		if (info.area == parentTeam->user_data_area) {
2187 			// don't clone the user area; just create a new one
2188 			status = create_team_user_data(team, info.address);
2189 			if (status != B_OK)
2190 				break;
2191 
2192 			thread->user_thread = team_allocate_user_thread(team);
2193 		} else {
2194 			void* address;
2195 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2196 				&address, B_CLONE_ADDRESS, info.area);
2197 			if (area < B_OK) {
2198 				status = area;
2199 				break;
2200 			}
2201 
2202 			if (info.area == parentThread->user_stack_area)
2203 				thread->user_stack_area = area;
2204 		}
2205 	}
2206 
2207 	if (status < B_OK)
2208 		goto err4;
2209 
2210 	if (thread->user_thread == NULL) {
2211 #if KDEBUG
2212 		panic("user data area not found, parent area is %" B_PRId32,
2213 			parentTeam->user_data_area);
2214 #endif
2215 		status = B_ERROR;
2216 		goto err4;
2217 	}
2218 
2219 	thread->user_stack_base = parentThread->user_stack_base;
2220 	thread->user_stack_size = parentThread->user_stack_size;
2221 	thread->user_local_storage = parentThread->user_local_storage;
2222 	thread->sig_block_mask = parentThread->sig_block_mask;
2223 	thread->signal_stack_base = parentThread->signal_stack_base;
2224 	thread->signal_stack_size = parentThread->signal_stack_size;
2225 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2226 
2227 	arch_store_fork_frame(forkArgs);
2228 
2229 	// copy image list
2230 	if (copy_images(parentTeam->id, team) != B_OK)
2231 		goto err5;
2232 
2233 	// insert the team into its parent and the teams hash
2234 	parentTeam->LockTeamAndProcessGroup();
2235 	team->Lock();
2236 
2237 	{
2238 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2239 
2240 		sTeamHash.Insert(team);
2241 		teamLimitReached = sUsedTeams >= sMaxTeams;
2242 		if (!teamLimitReached)
2243 			sUsedTeams++;
2244 	}
2245 
2246 	insert_team_into_parent(parentTeam, team);
2247 	insert_team_into_group(parentTeam->group, team);
2248 
2249 	team->Unlock();
2250 	parentTeam->UnlockTeamAndProcessGroup();
2251 
2252 	// notify team listeners
2253 	sNotificationService.Notify(TEAM_ADDED, team);
2254 
2255 	if (teamLimitReached) {
2256 		status = B_NO_MORE_TEAMS;
2257 		goto err6;
2258 	}
2259 
2260 	// create the main thread
2261 	{
2262 		ThreadCreationAttributes threadCreationAttributes(NULL,
2263 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2264 		threadCreationAttributes.forkArgs = forkArgs;
2265 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2266 		threadID = thread_create_thread(threadCreationAttributes, false);
2267 		if (threadID < 0) {
2268 			status = threadID;
2269 			goto err6;
2270 		}
2271 	}
2272 
2273 	// notify the debugger
2274 	user_debug_team_created(team->id);
2275 
2276 	T(TeamForked(threadID));
2277 
2278 	resume_thread(threadID);
2279 	return threadID;
2280 
2281 err6:
2282 	// Remove the team structure from the process group, the parent team, and
2283 	// the team hash table and delete the team structure.
2284 	parentTeam->LockTeamAndProcessGroup();
2285 	team->Lock();
2286 
2287 	remove_team_from_group(team);
2288 	remove_team_from_parent(team->parent, team);
2289 
2290 	team->Unlock();
2291 	parentTeam->UnlockTeamAndProcessGroup();
2292 
2293 	{
2294 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2295 		sTeamHash.Remove(team);
2296 		if (!teamLimitReached)
2297 			sUsedTeams--;
2298 	}
2299 
2300 	sNotificationService.Notify(TEAM_REMOVED, team);
2301 err5:
2302 	remove_images(team);
2303 err4:
2304 	team->address_space->RemoveAndPut();
2305 err3:
2306 	delete_realtime_sem_context(team->realtime_sem_context);
2307 err2:
2308 	free(forkArgs);
2309 err1:
2310 	team->ReleaseReference();
2311 
2312 	return status;
2313 }
2314 
2315 
2316 /*!	Returns if the specified team \a parent has any children belonging to the
2317 	process group with the specified ID \a groupID.
2318 	The caller must hold \a parent's lock.
2319 */
2320 static bool
2321 has_children_in_group(Team* parent, pid_t groupID)
2322 {
2323 	for (Team* child = parent->children; child != NULL;
2324 			child = child->siblings_next) {
2325 		TeamLocker childLocker(child);
2326 		if (child->group_id == groupID)
2327 			return true;
2328 	}
2329 
2330 	return false;
2331 }
2332 
2333 
2334 /*!	Returns the first job control entry from \a children, which matches \a id.
2335 	\a id can be:
2336 	- \code > 0 \endcode: Matching an entry with that team ID.
2337 	- \code == -1 \endcode: Matching any entry.
2338 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2339 	\c 0 is an invalid value for \a id.
2340 
2341 	The caller must hold the lock of the team that \a children belongs to.
2342 
2343 	\param children The job control entry list to check.
2344 	\param id The match criterion.
2345 	\return The first matching entry or \c NULL, if none matches.
2346 */
2347 static job_control_entry*
2348 get_job_control_entry(team_job_control_children& children, pid_t id)
2349 {
2350 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2351 		 job_control_entry* entry = it.Next();) {
2352 
2353 		if (id > 0) {
2354 			if (entry->thread == id)
2355 				return entry;
2356 		} else if (id == -1) {
2357 			return entry;
2358 		} else {
2359 			pid_t processGroup
2360 				= (entry->team ? entry->team->group_id : entry->group_id);
2361 			if (processGroup == -id)
2362 				return entry;
2363 		}
2364 	}
2365 
2366 	return NULL;
2367 }
2368 
2369 
2370 /*!	Returns the first job control entry from one of team's dead, continued, or
2371 	stopped children which matches \a id.
2372 	\a id can be:
2373 	- \code > 0 \endcode: Matching an entry with that team ID.
2374 	- \code == -1 \endcode: Matching any entry.
2375 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2376 	\c 0 is an invalid value for \a id.
2377 
2378 	The caller must hold \a team's lock.
2379 
2380 	\param team The team whose dead, stopped, and continued child lists shall be
2381 		checked.
2382 	\param id The match criterion.
2383 	\param flags Specifies which children shall be considered. Dead children
2384 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2385 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2386 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2387 		\c WCONTINUED.
2388 	\return The first matching entry or \c NULL, if none matches.
2389 */
2390 static job_control_entry*
2391 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2392 {
2393 	job_control_entry* entry = NULL;
2394 
2395 	if ((flags & WEXITED) != 0)
2396 		entry = get_job_control_entry(team->dead_children, id);
2397 
2398 	if (entry == NULL && (flags & WCONTINUED) != 0)
2399 		entry = get_job_control_entry(team->continued_children, id);
2400 
2401 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2402 		entry = get_job_control_entry(team->stopped_children, id);
2403 
2404 	return entry;
2405 }
2406 
2407 
2408 job_control_entry::job_control_entry()
2409 	:
2410 	has_group_ref(false)
2411 {
2412 }
2413 
2414 
2415 job_control_entry::~job_control_entry()
2416 {
2417 	if (has_group_ref) {
2418 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2419 
2420 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2421 		if (group == NULL) {
2422 			panic("job_control_entry::~job_control_entry(): unknown group "
2423 				"ID: %" B_PRId32, group_id);
2424 			return;
2425 		}
2426 
2427 		groupHashLocker.Unlock();
2428 
2429 		group->ReleaseReference();
2430 	}
2431 }
2432 
2433 
2434 /*!	Invoked when the owning team is dying, initializing the entry according to
2435 	the dead state.
2436 
2437 	The caller must hold the owning team's lock and the scheduler lock.
2438 */
2439 void
2440 job_control_entry::InitDeadState()
2441 {
2442 	if (team != NULL) {
2443 		ASSERT(team->exit.initialized);
2444 
2445 		group_id = team->group_id;
2446 		team->group->AcquireReference();
2447 		has_group_ref = true;
2448 
2449 		thread = team->id;
2450 		status = team->exit.status;
2451 		reason = team->exit.reason;
2452 		signal = team->exit.signal;
2453 		signaling_user = team->exit.signaling_user;
2454 		user_time = team->dead_threads_user_time
2455 			+ team->dead_children.user_time;
2456 		kernel_time = team->dead_threads_kernel_time
2457 			+ team->dead_children.kernel_time;
2458 
2459 		team = NULL;
2460 	}
2461 }
2462 
2463 
2464 job_control_entry&
2465 job_control_entry::operator=(const job_control_entry& other)
2466 {
2467 	state = other.state;
2468 	thread = other.thread;
2469 	signal = other.signal;
2470 	has_group_ref = false;
2471 	signaling_user = other.signaling_user;
2472 	team = other.team;
2473 	group_id = other.group_id;
2474 	status = other.status;
2475 	reason = other.reason;
2476 	user_time = other.user_time;
2477 	kernel_time = other.kernel_time;
2478 
2479 	return *this;
2480 }
2481 
2482 
2483 /*! This is the kernel backend for waitid().
2484 */
2485 static thread_id
2486 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2487 	team_usage_info& _usage_info)
2488 {
2489 	Thread* thread = thread_get_current_thread();
2490 	Team* team = thread->team;
2491 	struct job_control_entry foundEntry;
2492 	struct job_control_entry* freeDeathEntry = NULL;
2493 	status_t status = B_OK;
2494 
2495 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2496 		child, flags));
2497 
2498 	T(WaitForChild(child, flags));
2499 
2500 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2501 		T(WaitForChildDone(B_BAD_VALUE));
2502 		return B_BAD_VALUE;
2503 	}
2504 
2505 	pid_t originalChild = child;
2506 
2507 	bool ignoreFoundEntries = false;
2508 	bool ignoreFoundEntriesChecked = false;
2509 
2510 	while (true) {
2511 		// lock the team
2512 		TeamLocker teamLocker(team);
2513 
2514 		// A 0 child argument means to wait for all children in the process
2515 		// group of the calling team.
2516 		child = originalChild == 0 ? -team->group_id : originalChild;
2517 
2518 		// check whether any condition holds
2519 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2520 
2521 		// If we don't have an entry yet, check whether there are any children
2522 		// complying to the process group specification at all.
2523 		if (entry == NULL) {
2524 			// No success yet -- check whether there are any children complying
2525 			// to the process group specification at all.
2526 			bool childrenExist = false;
2527 			if (child == -1) {
2528 				childrenExist = team->children != NULL;
2529 			} else if (child < -1) {
2530 				childrenExist = has_children_in_group(team, -child);
2531 			} else if (child != team->id) {
2532 				if (Team* childTeam = Team::Get(child)) {
2533 					BReference<Team> childTeamReference(childTeam, true);
2534 					TeamLocker childTeamLocker(childTeam);
2535 					childrenExist = childTeam->parent == team;
2536 				}
2537 			}
2538 
2539 			if (!childrenExist) {
2540 				// there is no child we could wait for
2541 				status = ECHILD;
2542 			} else {
2543 				// the children we're waiting for are still running
2544 				status = B_WOULD_BLOCK;
2545 			}
2546 		} else {
2547 			// got something
2548 			foundEntry = *entry;
2549 
2550 			// unless WNOWAIT has been specified, "consume" the wait state
2551 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2552 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2553 					// The child is dead. Reap its death entry.
2554 					freeDeathEntry = entry;
2555 					team->dead_children.entries.Remove(entry);
2556 					team->dead_children.count--;
2557 				} else {
2558 					// The child is well. Reset its job control state.
2559 					team_set_job_control_state(entry->team,
2560 						JOB_CONTROL_STATE_NONE, NULL);
2561 				}
2562 			}
2563 		}
2564 
2565 		// If we haven't got anything yet, prepare for waiting for the
2566 		// condition variable.
2567 		ConditionVariableEntry deadWaitEntry;
2568 
2569 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2570 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2571 
2572 		teamLocker.Unlock();
2573 
2574 		// we got our entry and can return to our caller
2575 		if (status == B_OK) {
2576 			if (ignoreFoundEntries) {
2577 				// ... unless we shall ignore found entries
2578 				delete freeDeathEntry;
2579 				freeDeathEntry = NULL;
2580 				continue;
2581 			}
2582 
2583 			break;
2584 		}
2585 
2586 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2587 			T(WaitForChildDone(status));
2588 			return status;
2589 		}
2590 
2591 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2592 		if (status == B_INTERRUPTED) {
2593 			T(WaitForChildDone(status));
2594 			return status;
2595 		}
2596 
2597 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2598 		// all our children are dead and fail with ECHILD. We check the
2599 		// condition at this point.
2600 		if (!ignoreFoundEntriesChecked) {
2601 			teamLocker.Lock();
2602 
2603 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2604 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2605 				|| handler.sa_handler == SIG_IGN) {
2606 				ignoreFoundEntries = true;
2607 			}
2608 
2609 			teamLocker.Unlock();
2610 
2611 			ignoreFoundEntriesChecked = true;
2612 		}
2613 	}
2614 
2615 	delete freeDeathEntry;
2616 
2617 	// When we got here, we have a valid death entry, and already got
2618 	// unregistered from the team or group. Fill in the returned info.
2619 	memset(&_info, 0, sizeof(_info));
2620 	_info.si_signo = SIGCHLD;
2621 	_info.si_pid = foundEntry.thread;
2622 	_info.si_uid = foundEntry.signaling_user;
2623 	// TODO: Fill in si_errno?
2624 
2625 	switch (foundEntry.state) {
2626 		case JOB_CONTROL_STATE_DEAD:
2627 			_info.si_code = foundEntry.reason;
2628 			_info.si_status = foundEntry.reason == CLD_EXITED
2629 				? foundEntry.status : foundEntry.signal;
2630 			_usage_info.user_time = foundEntry.user_time;
2631 			_usage_info.kernel_time = foundEntry.kernel_time;
2632 			break;
2633 		case JOB_CONTROL_STATE_STOPPED:
2634 			_info.si_code = CLD_STOPPED;
2635 			_info.si_status = foundEntry.signal;
2636 			break;
2637 		case JOB_CONTROL_STATE_CONTINUED:
2638 			_info.si_code = CLD_CONTINUED;
2639 			_info.si_status = 0;
2640 			break;
2641 		case JOB_CONTROL_STATE_NONE:
2642 			// can't happen
2643 			break;
2644 	}
2645 
2646 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2647 	// status is available.
2648 	TeamLocker teamLocker(team);
2649 	InterruptsSpinLocker signalLocker(team->signal_lock);
2650 	SpinLocker threadCreationLocker(gThreadCreationLock);
2651 
2652 	if (is_team_signal_blocked(team, SIGCHLD)) {
2653 		if (get_job_control_entry(team, child, flags) == NULL)
2654 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2655 	}
2656 
2657 	threadCreationLocker.Unlock();
2658 	signalLocker.Unlock();
2659 	teamLocker.Unlock();
2660 
2661 	// When the team is dead, the main thread continues to live in the kernel
2662 	// team for a very short time. To avoid surprises for the caller we rather
2663 	// wait until the thread is really gone.
2664 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2665 		wait_for_thread(foundEntry.thread, NULL);
2666 
2667 	T(WaitForChildDone(foundEntry));
2668 
2669 	return foundEntry.thread;
2670 }
2671 
2672 
2673 /*! Fills the team_info structure with information from the specified team.
2674 	Interrupts must be enabled. The team must not be locked.
2675 */
2676 static status_t
2677 fill_team_info(Team* team, team_info* info, size_t size)
2678 {
2679 	if (size != sizeof(team_info))
2680 		return B_BAD_VALUE;
2681 
2682 	// TODO: Set more informations for team_info
2683 	memset(info, 0, size);
2684 
2685 	info->team = team->id;
2686 		// immutable
2687 	info->image_count = count_images(team);
2688 		// protected by sImageMutex
2689 
2690 	TeamLocker teamLocker(team);
2691 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2692 
2693 	info->thread_count = team->num_threads;
2694 	//info->area_count =
2695 	info->debugger_nub_thread = team->debug_info.nub_thread;
2696 	info->debugger_nub_port = team->debug_info.nub_port;
2697 	info->uid = team->effective_uid;
2698 	info->gid = team->effective_gid;
2699 
2700 	strlcpy(info->args, team->Args(), sizeof(info->args));
2701 	info->argc = 1;
2702 
2703 	return B_OK;
2704 }
2705 
2706 
2707 /*!	Returns whether the process group contains stopped processes.
2708 	The caller must hold the process group's lock.
2709 */
2710 static bool
2711 process_group_has_stopped_processes(ProcessGroup* group)
2712 {
2713 	Team* team = group->teams;
2714 	while (team != NULL) {
2715 		// the parent team's lock guards the job control entry -- acquire it
2716 		team->LockTeamAndParent(false);
2717 
2718 		if (team->job_control_entry != NULL
2719 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2720 			team->UnlockTeamAndParent();
2721 			return true;
2722 		}
2723 
2724 		team->UnlockTeamAndParent();
2725 
2726 		team = team->group_next;
2727 	}
2728 
2729 	return false;
2730 }
2731 
2732 
2733 /*!	Iterates through all process groups queued in team_remove_team() and signals
2734 	those that are orphaned and have stopped processes.
2735 	The caller must not hold any team or process group locks.
2736 */
2737 static void
2738 orphaned_process_group_check()
2739 {
2740 	// process as long as there are groups in the list
2741 	while (true) {
2742 		// remove the head from the list
2743 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2744 
2745 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2746 		if (group == NULL)
2747 			return;
2748 
2749 		group->UnsetOrphanedCheck();
2750 		BReference<ProcessGroup> groupReference(group);
2751 
2752 		orphanedCheckLocker.Unlock();
2753 
2754 		AutoLocker<ProcessGroup> groupLocker(group);
2755 
2756 		// If the group is orphaned and contains stopped processes, we're
2757 		// supposed to send SIGHUP + SIGCONT.
2758 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2759 			Thread* currentThread = thread_get_current_thread();
2760 
2761 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2762 			send_signal_to_process_group_locked(group, signal, 0);
2763 
2764 			signal.SetNumber(SIGCONT);
2765 			send_signal_to_process_group_locked(group, signal, 0);
2766 		}
2767 	}
2768 }
2769 
2770 
2771 static status_t
2772 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2773 	uint32 flags)
2774 {
2775 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2776 		return B_BAD_VALUE;
2777 
2778 	// get the team
2779 	Team* team = Team::GetAndLock(id);
2780 	if (team == NULL)
2781 		return B_BAD_TEAM_ID;
2782 	BReference<Team> teamReference(team, true);
2783 	TeamLocker teamLocker(team, true);
2784 
2785 	if ((flags & B_CHECK_PERMISSION) != 0) {
2786 		uid_t uid = geteuid();
2787 		if (uid != 0 && uid != team->effective_uid)
2788 			return B_NOT_ALLOWED;
2789 	}
2790 
2791 	bigtime_t kernelTime = 0;
2792 	bigtime_t userTime = 0;
2793 
2794 	switch (who) {
2795 		case B_TEAM_USAGE_SELF:
2796 		{
2797 			Thread* thread = team->thread_list;
2798 
2799 			for (; thread != NULL; thread = thread->team_next) {
2800 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2801 				kernelTime += thread->kernel_time;
2802 				userTime += thread->user_time;
2803 			}
2804 
2805 			kernelTime += team->dead_threads_kernel_time;
2806 			userTime += team->dead_threads_user_time;
2807 			break;
2808 		}
2809 
2810 		case B_TEAM_USAGE_CHILDREN:
2811 		{
2812 			Team* child = team->children;
2813 			for (; child != NULL; child = child->siblings_next) {
2814 				TeamLocker childLocker(child);
2815 
2816 				Thread* thread = team->thread_list;
2817 
2818 				for (; thread != NULL; thread = thread->team_next) {
2819 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2820 					kernelTime += thread->kernel_time;
2821 					userTime += thread->user_time;
2822 				}
2823 
2824 				kernelTime += child->dead_threads_kernel_time;
2825 				userTime += child->dead_threads_user_time;
2826 			}
2827 
2828 			kernelTime += team->dead_children.kernel_time;
2829 			userTime += team->dead_children.user_time;
2830 			break;
2831 		}
2832 	}
2833 
2834 	info->kernel_time = kernelTime;
2835 	info->user_time = userTime;
2836 
2837 	return B_OK;
2838 }
2839 
2840 
2841 //	#pragma mark - Private kernel API
2842 
2843 
2844 status_t
2845 team_init(kernel_args* args)
2846 {
2847 	// create the team hash table
2848 	new(&sTeamHash) TeamTable;
2849 	if (sTeamHash.Init(64) != B_OK)
2850 		panic("Failed to init team hash table!");
2851 
2852 	new(&sGroupHash) ProcessGroupHashTable;
2853 	if (sGroupHash.Init() != B_OK)
2854 		panic("Failed to init process group hash table!");
2855 
2856 	// create initial session and process groups
2857 
2858 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2859 	if (session == NULL)
2860 		panic("Could not create initial session.\n");
2861 	BReference<ProcessSession> sessionReference(session, true);
2862 
2863 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2864 	if (group == NULL)
2865 		panic("Could not create initial process group.\n");
2866 	BReference<ProcessGroup> groupReference(group, true);
2867 
2868 	group->Publish(session);
2869 
2870 	// create the kernel team
2871 	sKernelTeam = Team::Create(1, "kernel_team", true);
2872 	if (sKernelTeam == NULL)
2873 		panic("could not create kernel team!\n");
2874 
2875 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2876 	sKernelTeam->SetArgs(sKernelTeam->Name());
2877 	sKernelTeam->state = TEAM_STATE_NORMAL;
2878 
2879 	sKernelTeam->saved_set_uid = 0;
2880 	sKernelTeam->real_uid = 0;
2881 	sKernelTeam->effective_uid = 0;
2882 	sKernelTeam->saved_set_gid = 0;
2883 	sKernelTeam->real_gid = 0;
2884 	sKernelTeam->effective_gid = 0;
2885 	sKernelTeam->supplementary_groups = NULL;
2886 
2887 	insert_team_into_group(group, sKernelTeam);
2888 
2889 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2890 	if (sKernelTeam->io_context == NULL)
2891 		panic("could not create io_context for kernel team!\n");
2892 
2893 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2894 		dprintf("Failed to resize FD table for kernel team!\n");
2895 
2896 	// stick it in the team hash
2897 	sTeamHash.Insert(sKernelTeam);
2898 
2899 	// check safe mode settings
2900 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2901 		false);
2902 
2903 	add_debugger_command_etc("team", &dump_team_info,
2904 		"Dump info about a particular team",
2905 		"[ <id> | <address> | <name> ]\n"
2906 		"Prints information about the specified team. If no argument is given\n"
2907 		"the current team is selected.\n"
2908 		"  <id>       - The ID of the team.\n"
2909 		"  <address>  - The address of the team structure.\n"
2910 		"  <name>     - The team's name.\n", 0);
2911 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2912 		"\n"
2913 		"Prints a list of all existing teams.\n", 0);
2914 
2915 	new(&sNotificationService) TeamNotificationService();
2916 
2917 	sNotificationService.Register();
2918 
2919 	return B_OK;
2920 }
2921 
2922 
2923 int32
2924 team_max_teams(void)
2925 {
2926 	return sMaxTeams;
2927 }
2928 
2929 
2930 int32
2931 team_used_teams(void)
2932 {
2933 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2934 	return sUsedTeams;
2935 }
2936 
2937 
2938 /*! Returns a death entry of a child team specified by ID (if any).
2939 	The caller must hold the team's lock.
2940 
2941 	\param team The team whose dead children list to check.
2942 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2943 	\param _deleteEntry Return variable, indicating whether the caller needs to
2944 		delete the returned entry.
2945 	\return The death entry of the matching team, or \c NULL, if no death entry
2946 		for the team was found.
2947 */
2948 job_control_entry*
2949 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2950 {
2951 	if (child <= 0)
2952 		return NULL;
2953 
2954 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2955 		child);
2956 	if (entry) {
2957 		// remove the entry only, if the caller is the parent of the found team
2958 		if (team_get_current_team_id() == entry->thread) {
2959 			team->dead_children.entries.Remove(entry);
2960 			team->dead_children.count--;
2961 			*_deleteEntry = true;
2962 		} else {
2963 			*_deleteEntry = false;
2964 		}
2965 	}
2966 
2967 	return entry;
2968 }
2969 
2970 
2971 /*! Quick check to see if we have a valid team ID. */
2972 bool
2973 team_is_valid(team_id id)
2974 {
2975 	if (id <= 0)
2976 		return false;
2977 
2978 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2979 	return team_get_team_struct_locked(id) != NULL;
2980 }
2981 
2982 
2983 Team*
2984 team_get_team_struct_locked(team_id id)
2985 {
2986 	return sTeamHash.Lookup(id);
2987 }
2988 
2989 
2990 void
2991 team_set_controlling_tty(int32 ttyIndex)
2992 {
2993 	// lock the team, so its session won't change while we're playing with it
2994 	Team* team = thread_get_current_thread()->team;
2995 	TeamLocker teamLocker(team);
2996 
2997 	// get and lock the session
2998 	ProcessSession* session = team->group->Session();
2999 	AutoLocker<ProcessSession> sessionLocker(session);
3000 
3001 	// set the session's fields
3002 	session->controlling_tty = ttyIndex;
3003 	session->foreground_group = -1;
3004 }
3005 
3006 
3007 int32
3008 team_get_controlling_tty()
3009 {
3010 	// lock the team, so its session won't change while we're playing with it
3011 	Team* team = thread_get_current_thread()->team;
3012 	TeamLocker teamLocker(team);
3013 
3014 	// get and lock the session
3015 	ProcessSession* session = team->group->Session();
3016 	AutoLocker<ProcessSession> sessionLocker(session);
3017 
3018 	// get the session's field
3019 	return session->controlling_tty;
3020 }
3021 
3022 
3023 status_t
3024 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3025 {
3026 	// lock the team, so its session won't change while we're playing with it
3027 	Thread* thread = thread_get_current_thread();
3028 	Team* team = thread->team;
3029 	TeamLocker teamLocker(team);
3030 
3031 	// get and lock the session
3032 	ProcessSession* session = team->group->Session();
3033 	AutoLocker<ProcessSession> sessionLocker(session);
3034 
3035 	// check given TTY -- must be the controlling tty of the calling process
3036 	if (session->controlling_tty != ttyIndex)
3037 		return ENOTTY;
3038 
3039 	// check given process group -- must belong to our session
3040 	{
3041 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3042 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3043 		if (group == NULL || group->Session() != session)
3044 			return B_BAD_VALUE;
3045 	}
3046 
3047 	// If we are a background group, we can do that unharmed only when we
3048 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3049 	if (session->foreground_group != -1
3050 		&& session->foreground_group != team->group_id
3051 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3052 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3053 		InterruptsSpinLocker signalLocker(team->signal_lock);
3054 
3055 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3056 			pid_t groupID = team->group_id;
3057 
3058 			signalLocker.Unlock();
3059 			sessionLocker.Unlock();
3060 			teamLocker.Unlock();
3061 
3062 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3063 			send_signal_to_process_group(groupID, signal, 0);
3064 			return B_INTERRUPTED;
3065 		}
3066 	}
3067 
3068 	session->foreground_group = processGroupID;
3069 
3070 	return B_OK;
3071 }
3072 
3073 
3074 uid_t
3075 team_geteuid(team_id id)
3076 {
3077 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3078 	Team* team = team_get_team_struct_locked(id);
3079 	if (team == NULL)
3080 		return (uid_t)-1;
3081 	return team->effective_uid;
3082 }
3083 
3084 
3085 /*!	Removes the specified team from the global team hash, from its process
3086 	group, and from its parent.
3087 	It also moves all of its children to the kernel team.
3088 
3089 	The caller must hold the following locks:
3090 	- \a team's process group's lock,
3091 	- the kernel team's lock,
3092 	- \a team's parent team's lock (might be the kernel team), and
3093 	- \a team's lock.
3094 */
3095 void
3096 team_remove_team(Team* team, pid_t& _signalGroup)
3097 {
3098 	Team* parent = team->parent;
3099 
3100 	// remember how long this team lasted
3101 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3102 		+ team->dead_children.kernel_time;
3103 	parent->dead_children.user_time += team->dead_threads_user_time
3104 		+ team->dead_children.user_time;
3105 
3106 	// remove the team from the hash table
3107 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3108 	sTeamHash.Remove(team);
3109 	sUsedTeams--;
3110 	teamsLocker.Unlock();
3111 
3112 	// The team can no longer be accessed by ID. Navigation to it is still
3113 	// possible from its process group and its parent and children, but that
3114 	// will be rectified shortly.
3115 	team->state = TEAM_STATE_DEATH;
3116 
3117 	// If we're a controlling process (i.e. a session leader with controlling
3118 	// terminal), there's a bit of signalling we have to do. We can't do any of
3119 	// the signaling here due to the bunch of locks we're holding, but we need
3120 	// to determine, whom to signal.
3121 	_signalGroup = -1;
3122 	bool isSessionLeader = false;
3123 	if (team->session_id == team->id
3124 		&& team->group->Session()->controlling_tty >= 0) {
3125 		isSessionLeader = true;
3126 
3127 		ProcessSession* session = team->group->Session();
3128 
3129 		AutoLocker<ProcessSession> sessionLocker(session);
3130 
3131 		session->controlling_tty = -1;
3132 		_signalGroup = session->foreground_group;
3133 	}
3134 
3135 	// remove us from our process group
3136 	remove_team_from_group(team);
3137 
3138 	// move the team's children to the kernel team
3139 	while (Team* child = team->children) {
3140 		// remove the child from the current team and add it to the kernel team
3141 		TeamLocker childLocker(child);
3142 
3143 		remove_team_from_parent(team, child);
3144 		insert_team_into_parent(sKernelTeam, child);
3145 
3146 		// move job control entries too
3147 		sKernelTeam->stopped_children.entries.MoveFrom(
3148 			&team->stopped_children.entries);
3149 		sKernelTeam->continued_children.entries.MoveFrom(
3150 			&team->continued_children.entries);
3151 
3152 		// If the team was a session leader with controlling terminal,
3153 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3154 		// groups with stopped processes. Due to locking complications we can't
3155 		// do that here, so we only check whether we were a reason for the
3156 		// child's process group not being an orphan and, if so, schedule a
3157 		// later check (cf. orphaned_process_group_check()).
3158 		if (isSessionLeader) {
3159 			ProcessGroup* childGroup = child->group;
3160 			if (childGroup->Session()->id == team->session_id
3161 				&& childGroup->id != team->group_id) {
3162 				childGroup->ScheduleOrphanedCheck();
3163 			}
3164 		}
3165 
3166 		// Note, we don't move the dead children entries. Those will be deleted
3167 		// when the team structure is deleted.
3168 	}
3169 
3170 	// remove us from our parent
3171 	remove_team_from_parent(parent, team);
3172 }
3173 
3174 
3175 /*!	Kills all threads but the main thread of the team and shuts down user
3176 	debugging for it.
3177 	To be called on exit of the team's main thread. No locks must be held.
3178 
3179 	\param team The team in question.
3180 	\return The port of the debugger for the team, -1 if none. To be passed to
3181 		team_delete_team().
3182 */
3183 port_id
3184 team_shutdown_team(Team* team)
3185 {
3186 	ASSERT(thread_get_current_thread() == team->main_thread);
3187 
3188 	TeamLocker teamLocker(team);
3189 
3190 	// Make sure debugging changes won't happen anymore.
3191 	port_id debuggerPort = -1;
3192 	while (true) {
3193 		// If a debugger change is in progress for the team, we'll have to
3194 		// wait until it is done.
3195 		ConditionVariableEntry waitForDebuggerEntry;
3196 		bool waitForDebugger = false;
3197 
3198 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3199 
3200 		if (team->debug_info.debugger_changed_condition != NULL) {
3201 			team->debug_info.debugger_changed_condition->Add(
3202 				&waitForDebuggerEntry);
3203 			waitForDebugger = true;
3204 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3205 			// The team is being debugged. That will stop with the termination
3206 			// of the nub thread. Since we set the team state to death, no one
3207 			// can install a debugger anymore. We fetch the debugger's port to
3208 			// send it a message at the bitter end.
3209 			debuggerPort = team->debug_info.debugger_port;
3210 		}
3211 
3212 		debugInfoLocker.Unlock();
3213 
3214 		if (!waitForDebugger)
3215 			break;
3216 
3217 		// wait for the debugger change to be finished
3218 		teamLocker.Unlock();
3219 
3220 		waitForDebuggerEntry.Wait();
3221 
3222 		teamLocker.Lock();
3223 	}
3224 
3225 	// Mark the team as shutting down. That will prevent new threads from being
3226 	// created and debugger changes from taking place.
3227 	team->state = TEAM_STATE_SHUTDOWN;
3228 
3229 	// delete all timers
3230 	team->DeleteUserTimers(false);
3231 
3232 	// deactivate CPU time user timers for the team
3233 	InterruptsSpinLocker timeLocker(team->time_lock);
3234 
3235 	if (team->HasActiveCPUTimeUserTimers())
3236 		team->DeactivateCPUTimeUserTimers();
3237 
3238 	timeLocker.Unlock();
3239 
3240 	// kill all threads but the main thread
3241 	team_death_entry deathEntry;
3242 	deathEntry.condition.Init(team, "team death");
3243 
3244 	while (true) {
3245 		team->death_entry = &deathEntry;
3246 		deathEntry.remaining_threads = 0;
3247 
3248 		Thread* thread = team->thread_list;
3249 		while (thread != NULL) {
3250 			if (thread != team->main_thread) {
3251 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3252 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3253 				deathEntry.remaining_threads++;
3254 			}
3255 
3256 			thread = thread->team_next;
3257 		}
3258 
3259 		if (deathEntry.remaining_threads == 0)
3260 			break;
3261 
3262 		// there are threads to wait for
3263 		ConditionVariableEntry entry;
3264 		deathEntry.condition.Add(&entry);
3265 
3266 		teamLocker.Unlock();
3267 
3268 		entry.Wait();
3269 
3270 		teamLocker.Lock();
3271 	}
3272 
3273 	team->death_entry = NULL;
3274 
3275 	return debuggerPort;
3276 }
3277 
3278 
3279 /*!	Called on team exit to notify threads waiting on the team and free most
3280 	resources associated with it.
3281 	The caller shouldn't hold any locks.
3282 */
3283 void
3284 team_delete_team(Team* team, port_id debuggerPort)
3285 {
3286 	// Not quite in our job description, but work that has been left by
3287 	// team_remove_team() and that can be done now that we're not holding any
3288 	// locks.
3289 	orphaned_process_group_check();
3290 
3291 	team_id teamID = team->id;
3292 
3293 	ASSERT(team->num_threads == 0);
3294 
3295 	// If someone is waiting for this team to be loaded, but it dies
3296 	// unexpectedly before being done, we need to notify the waiting
3297 	// thread now.
3298 
3299 	TeamLocker teamLocker(team);
3300 
3301 	if (team->loading_info != NULL) {
3302 		// there's indeed someone waiting
3303 		team->loading_info->result = B_ERROR;
3304 
3305 		// wake up the waiting thread
3306 		team->loading_info->condition.NotifyAll();
3307 		team->loading_info = NULL;
3308 	}
3309 
3310 	// notify team watchers
3311 
3312 	{
3313 		// we're not reachable from anyone anymore at this point, so we
3314 		// can safely access the list without any locking
3315 		struct team_watcher* watcher;
3316 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3317 				&team->watcher_list)) != NULL) {
3318 			watcher->hook(teamID, watcher->data);
3319 			free(watcher);
3320 		}
3321 	}
3322 
3323 	teamLocker.Unlock();
3324 
3325 	sNotificationService.Notify(TEAM_REMOVED, team);
3326 
3327 	// free team resources
3328 
3329 	delete_realtime_sem_context(team->realtime_sem_context);
3330 	xsi_sem_undo(team);
3331 	remove_images(team);
3332 	team->address_space->RemoveAndPut();
3333 
3334 	team->ReleaseReference();
3335 
3336 	// notify the debugger, that the team is gone
3337 	user_debug_team_deleted(teamID, debuggerPort);
3338 }
3339 
3340 
3341 Team*
3342 team_get_kernel_team(void)
3343 {
3344 	return sKernelTeam;
3345 }
3346 
3347 
3348 team_id
3349 team_get_kernel_team_id(void)
3350 {
3351 	if (!sKernelTeam)
3352 		return 0;
3353 
3354 	return sKernelTeam->id;
3355 }
3356 
3357 
3358 team_id
3359 team_get_current_team_id(void)
3360 {
3361 	return thread_get_current_thread()->team->id;
3362 }
3363 
3364 
3365 status_t
3366 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3367 {
3368 	if (id == sKernelTeam->id) {
3369 		// we're the kernel team, so we don't have to go through all
3370 		// the hassle (locking and hash lookup)
3371 		*_addressSpace = VMAddressSpace::GetKernel();
3372 		return B_OK;
3373 	}
3374 
3375 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3376 
3377 	Team* team = team_get_team_struct_locked(id);
3378 	if (team == NULL)
3379 		return B_BAD_VALUE;
3380 
3381 	team->address_space->Get();
3382 	*_addressSpace = team->address_space;
3383 	return B_OK;
3384 }
3385 
3386 
3387 /*!	Sets the team's job control state.
3388 	The caller must hold the parent team's lock. Interrupts are allowed to be
3389 	enabled or disabled.
3390 	\a team The team whose job control state shall be set.
3391 	\a newState The new state to be set.
3392 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3393 		the caller is responsible for filling in the following fields of the
3394 		entry before releasing the parent team's lock, unless the new state is
3395 		\c JOB_CONTROL_STATE_NONE:
3396 		- \c signal: The number of the signal causing the state change.
3397 		- \c signaling_user: The real UID of the user sending the signal.
3398 */
3399 void
3400 team_set_job_control_state(Team* team, job_control_state newState,
3401 	Signal* signal)
3402 {
3403 	if (team == NULL || team->job_control_entry == NULL)
3404 		return;
3405 
3406 	// don't touch anything, if the state stays the same or the team is already
3407 	// dead
3408 	job_control_entry* entry = team->job_control_entry;
3409 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3410 		return;
3411 
3412 	T(SetJobControlState(team->id, newState, signal));
3413 
3414 	// remove from the old list
3415 	switch (entry->state) {
3416 		case JOB_CONTROL_STATE_NONE:
3417 			// entry is in no list ATM
3418 			break;
3419 		case JOB_CONTROL_STATE_DEAD:
3420 			// can't get here
3421 			break;
3422 		case JOB_CONTROL_STATE_STOPPED:
3423 			team->parent->stopped_children.entries.Remove(entry);
3424 			break;
3425 		case JOB_CONTROL_STATE_CONTINUED:
3426 			team->parent->continued_children.entries.Remove(entry);
3427 			break;
3428 	}
3429 
3430 	entry->state = newState;
3431 
3432 	if (signal != NULL) {
3433 		entry->signal = signal->Number();
3434 		entry->signaling_user = signal->SendingUser();
3435 	}
3436 
3437 	// add to new list
3438 	team_job_control_children* childList = NULL;
3439 	switch (entry->state) {
3440 		case JOB_CONTROL_STATE_NONE:
3441 			// entry doesn't get into any list
3442 			break;
3443 		case JOB_CONTROL_STATE_DEAD:
3444 			childList = &team->parent->dead_children;
3445 			team->parent->dead_children.count++;
3446 			break;
3447 		case JOB_CONTROL_STATE_STOPPED:
3448 			childList = &team->parent->stopped_children;
3449 			break;
3450 		case JOB_CONTROL_STATE_CONTINUED:
3451 			childList = &team->parent->continued_children;
3452 			break;
3453 	}
3454 
3455 	if (childList != NULL) {
3456 		childList->entries.Add(entry);
3457 		team->parent->dead_children.condition_variable.NotifyAll();
3458 	}
3459 }
3460 
3461 
3462 /*!	Inits the given team's exit information, if not yet initialized, to some
3463 	generic "killed" status.
3464 	The caller must not hold the team's lock. Interrupts must be enabled.
3465 
3466 	\param team The team whose exit info shall be initialized.
3467 */
3468 void
3469 team_init_exit_info_on_error(Team* team)
3470 {
3471 	TeamLocker teamLocker(team);
3472 
3473 	if (!team->exit.initialized) {
3474 		team->exit.reason = CLD_KILLED;
3475 		team->exit.signal = SIGKILL;
3476 		team->exit.signaling_user = geteuid();
3477 		team->exit.status = 0;
3478 		team->exit.initialized = true;
3479 	}
3480 }
3481 
3482 
3483 /*! Adds a hook to the team that is called as soon as this team goes away.
3484 	This call might get public in the future.
3485 */
3486 status_t
3487 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3488 {
3489 	if (hook == NULL || teamID < B_OK)
3490 		return B_BAD_VALUE;
3491 
3492 	// create the watcher object
3493 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3494 	if (watcher == NULL)
3495 		return B_NO_MEMORY;
3496 
3497 	watcher->hook = hook;
3498 	watcher->data = data;
3499 
3500 	// add watcher, if the team isn't already dying
3501 	// get the team
3502 	Team* team = Team::GetAndLock(teamID);
3503 	if (team == NULL) {
3504 		free(watcher);
3505 		return B_BAD_TEAM_ID;
3506 	}
3507 
3508 	list_add_item(&team->watcher_list, watcher);
3509 
3510 	team->UnlockAndReleaseReference();
3511 
3512 	return B_OK;
3513 }
3514 
3515 
3516 status_t
3517 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3518 {
3519 	if (hook == NULL || teamID < 0)
3520 		return B_BAD_VALUE;
3521 
3522 	// get team and remove watcher (if present)
3523 	Team* team = Team::GetAndLock(teamID);
3524 	if (team == NULL)
3525 		return B_BAD_TEAM_ID;
3526 
3527 	// search for watcher
3528 	team_watcher* watcher = NULL;
3529 	while ((watcher = (team_watcher*)list_get_next_item(
3530 			&team->watcher_list, watcher)) != NULL) {
3531 		if (watcher->hook == hook && watcher->data == data) {
3532 			// got it!
3533 			list_remove_item(&team->watcher_list, watcher);
3534 			break;
3535 		}
3536 	}
3537 
3538 	team->UnlockAndReleaseReference();
3539 
3540 	if (watcher == NULL)
3541 		return B_ENTRY_NOT_FOUND;
3542 
3543 	free(watcher);
3544 	return B_OK;
3545 }
3546 
3547 
3548 /*!	Allocates a user_thread structure from the team.
3549 	The team lock must be held, unless the function is called for the team's
3550 	main thread. Interrupts must be enabled.
3551 */
3552 struct user_thread*
3553 team_allocate_user_thread(Team* team)
3554 {
3555 	if (team->user_data == 0)
3556 		return NULL;
3557 
3558 	// take an entry from the free list, if any
3559 	if (struct free_user_thread* entry = team->free_user_threads) {
3560 		user_thread* thread = entry->thread;
3561 		team->free_user_threads = entry->next;
3562 		free(entry);
3563 		return thread;
3564 	}
3565 
3566 	while (true) {
3567 		// enough space left?
3568 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3569 		if (team->user_data_size - team->used_user_data < needed) {
3570 			// try to resize the area
3571 			if (resize_area(team->user_data_area,
3572 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3573 				return NULL;
3574 			}
3575 
3576 			// resized user area successfully -- try to allocate the user_thread
3577 			// again
3578 			team->user_data_size += B_PAGE_SIZE;
3579 			continue;
3580 		}
3581 
3582 		// allocate the user_thread
3583 		user_thread* thread
3584 			= (user_thread*)(team->user_data + team->used_user_data);
3585 		team->used_user_data += needed;
3586 
3587 		return thread;
3588 	}
3589 }
3590 
3591 
3592 /*!	Frees the given user_thread structure.
3593 	The team's lock must not be held. Interrupts must be enabled.
3594 	\param team The team the user thread was allocated from.
3595 	\param userThread The user thread to free.
3596 */
3597 void
3598 team_free_user_thread(Team* team, struct user_thread* userThread)
3599 {
3600 	if (userThread == NULL)
3601 		return;
3602 
3603 	// create a free list entry
3604 	free_user_thread* entry
3605 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3606 	if (entry == NULL) {
3607 		// we have to leak the user thread :-/
3608 		return;
3609 	}
3610 
3611 	// add to free list
3612 	TeamLocker teamLocker(team);
3613 
3614 	entry->thread = userThread;
3615 	entry->next = team->free_user_threads;
3616 	team->free_user_threads = entry;
3617 }
3618 
3619 
3620 //	#pragma mark - Associated data interface
3621 
3622 
3623 AssociatedData::AssociatedData()
3624 	:
3625 	fOwner(NULL)
3626 {
3627 }
3628 
3629 
3630 AssociatedData::~AssociatedData()
3631 {
3632 }
3633 
3634 
3635 void
3636 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3637 {
3638 }
3639 
3640 
3641 AssociatedDataOwner::AssociatedDataOwner()
3642 {
3643 	mutex_init(&fLock, "associated data owner");
3644 }
3645 
3646 
3647 AssociatedDataOwner::~AssociatedDataOwner()
3648 {
3649 	mutex_destroy(&fLock);
3650 }
3651 
3652 
3653 bool
3654 AssociatedDataOwner::AddData(AssociatedData* data)
3655 {
3656 	MutexLocker locker(fLock);
3657 
3658 	if (data->Owner() != NULL)
3659 		return false;
3660 
3661 	data->AcquireReference();
3662 	fList.Add(data);
3663 	data->SetOwner(this);
3664 
3665 	return true;
3666 }
3667 
3668 
3669 bool
3670 AssociatedDataOwner::RemoveData(AssociatedData* data)
3671 {
3672 	MutexLocker locker(fLock);
3673 
3674 	if (data->Owner() != this)
3675 		return false;
3676 
3677 	data->SetOwner(NULL);
3678 	fList.Remove(data);
3679 
3680 	locker.Unlock();
3681 
3682 	data->ReleaseReference();
3683 
3684 	return true;
3685 }
3686 
3687 
3688 void
3689 AssociatedDataOwner::PrepareForDeletion()
3690 {
3691 	MutexLocker locker(fLock);
3692 
3693 	// move all data to a temporary list and unset the owner
3694 	DataList list;
3695 	list.MoveFrom(&fList);
3696 
3697 	for (DataList::Iterator it = list.GetIterator();
3698 		AssociatedData* data = it.Next();) {
3699 		data->SetOwner(NULL);
3700 	}
3701 
3702 	locker.Unlock();
3703 
3704 	// call the notification hooks and release our references
3705 	while (AssociatedData* data = list.RemoveHead()) {
3706 		data->OwnerDeleted(this);
3707 		data->ReleaseReference();
3708 	}
3709 }
3710 
3711 
3712 /*!	Associates data with the current team.
3713 	When the team is deleted, the data object is notified.
3714 	The team acquires a reference to the object.
3715 
3716 	\param data The data object.
3717 	\return \c true on success, \c false otherwise. Fails only when the supplied
3718 		data object is already associated with another owner.
3719 */
3720 bool
3721 team_associate_data(AssociatedData* data)
3722 {
3723 	return thread_get_current_thread()->team->AddData(data);
3724 }
3725 
3726 
3727 /*!	Dissociates data from the current team.
3728 	Balances an earlier call to team_associate_data().
3729 
3730 	\param data The data object.
3731 	\return \c true on success, \c false otherwise. Fails only when the data
3732 		object is not associated with the current team.
3733 */
3734 bool
3735 team_dissociate_data(AssociatedData* data)
3736 {
3737 	return thread_get_current_thread()->team->RemoveData(data);
3738 }
3739 
3740 
3741 //	#pragma mark - Public kernel API
3742 
3743 
3744 thread_id
3745 load_image(int32 argCount, const char** args, const char** env)
3746 {
3747 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3748 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3749 }
3750 
3751 
3752 thread_id
3753 load_image_etc(int32 argCount, const char* const* args,
3754 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3755 {
3756 	// we need to flatten the args and environment
3757 
3758 	if (args == NULL)
3759 		return B_BAD_VALUE;
3760 
3761 	// determine total needed size
3762 	int32 argSize = 0;
3763 	for (int32 i = 0; i < argCount; i++)
3764 		argSize += strlen(args[i]) + 1;
3765 
3766 	int32 envCount = 0;
3767 	int32 envSize = 0;
3768 	while (env != NULL && env[envCount] != NULL)
3769 		envSize += strlen(env[envCount++]) + 1;
3770 
3771 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3772 	if (size > MAX_PROCESS_ARGS_SIZE)
3773 		return B_TOO_MANY_ARGS;
3774 
3775 	// allocate space
3776 	char** flatArgs = (char**)malloc(size);
3777 	if (flatArgs == NULL)
3778 		return B_NO_MEMORY;
3779 
3780 	char** slot = flatArgs;
3781 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3782 
3783 	// copy arguments and environment
3784 	for (int32 i = 0; i < argCount; i++) {
3785 		int32 argSize = strlen(args[i]) + 1;
3786 		memcpy(stringSpace, args[i], argSize);
3787 		*slot++ = stringSpace;
3788 		stringSpace += argSize;
3789 	}
3790 
3791 	*slot++ = NULL;
3792 
3793 	for (int32 i = 0; i < envCount; i++) {
3794 		int32 envSize = strlen(env[i]) + 1;
3795 		memcpy(stringSpace, env[i], envSize);
3796 		*slot++ = stringSpace;
3797 		stringSpace += envSize;
3798 	}
3799 
3800 	*slot++ = NULL;
3801 
3802 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3803 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3804 
3805 	free(flatArgs);
3806 		// load_image_internal() unset our variable if it took over ownership
3807 
3808 	return thread;
3809 }
3810 
3811 
3812 status_t
3813 wait_for_team(team_id id, status_t* _returnCode)
3814 {
3815 	// check whether the team exists
3816 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3817 
3818 	Team* team = team_get_team_struct_locked(id);
3819 	if (team == NULL)
3820 		return B_BAD_TEAM_ID;
3821 
3822 	id = team->id;
3823 
3824 	teamsLocker.Unlock();
3825 
3826 	// wait for the main thread (it has the same ID as the team)
3827 	return wait_for_thread(id, _returnCode);
3828 }
3829 
3830 
3831 status_t
3832 kill_team(team_id id)
3833 {
3834 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3835 
3836 	Team* team = team_get_team_struct_locked(id);
3837 	if (team == NULL)
3838 		return B_BAD_TEAM_ID;
3839 
3840 	id = team->id;
3841 
3842 	teamsLocker.Unlock();
3843 
3844 	if (team == sKernelTeam)
3845 		return B_NOT_ALLOWED;
3846 
3847 	// Just kill the team's main thread (it has same ID as the team). The
3848 	// cleanup code there will take care of the team.
3849 	return kill_thread(id);
3850 }
3851 
3852 
3853 status_t
3854 _get_team_info(team_id id, team_info* info, size_t size)
3855 {
3856 	// get the team
3857 	Team* team = Team::Get(id);
3858 	if (team == NULL)
3859 		return B_BAD_TEAM_ID;
3860 	BReference<Team> teamReference(team, true);
3861 
3862 	// fill in the info
3863 	return fill_team_info(team, info, size);
3864 }
3865 
3866 
3867 status_t
3868 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3869 {
3870 	int32 slot = *cookie;
3871 	if (slot < 1)
3872 		slot = 1;
3873 
3874 	InterruptsReadSpinLocker locker(sTeamHashLock);
3875 
3876 	team_id lastTeamID = peek_next_thread_id();
3877 		// TODO: This is broken, since the id can wrap around!
3878 
3879 	// get next valid team
3880 	Team* team = NULL;
3881 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3882 		slot++;
3883 
3884 	if (team == NULL)
3885 		return B_BAD_TEAM_ID;
3886 
3887 	// get a reference to the team and unlock
3888 	BReference<Team> teamReference(team);
3889 	locker.Unlock();
3890 
3891 	// fill in the info
3892 	*cookie = ++slot;
3893 	return fill_team_info(team, info, size);
3894 }
3895 
3896 
3897 status_t
3898 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3899 {
3900 	if (size != sizeof(team_usage_info))
3901 		return B_BAD_VALUE;
3902 
3903 	return common_get_team_usage_info(id, who, info, 0);
3904 }
3905 
3906 
3907 pid_t
3908 getpid(void)
3909 {
3910 	return thread_get_current_thread()->team->id;
3911 }
3912 
3913 
3914 pid_t
3915 getppid()
3916 {
3917 	return _getppid(0);
3918 }
3919 
3920 
3921 pid_t
3922 getpgid(pid_t id)
3923 {
3924 	if (id < 0) {
3925 		errno = EINVAL;
3926 		return -1;
3927 	}
3928 
3929 	if (id == 0) {
3930 		// get process group of the calling process
3931 		Team* team = thread_get_current_thread()->team;
3932 		TeamLocker teamLocker(team);
3933 		return team->group_id;
3934 	}
3935 
3936 	// get the team
3937 	Team* team = Team::GetAndLock(id);
3938 	if (team == NULL) {
3939 		errno = ESRCH;
3940 		return -1;
3941 	}
3942 
3943 	// get the team's process group ID
3944 	pid_t groupID = team->group_id;
3945 
3946 	team->UnlockAndReleaseReference();
3947 
3948 	return groupID;
3949 }
3950 
3951 
3952 pid_t
3953 getsid(pid_t id)
3954 {
3955 	if (id < 0) {
3956 		errno = EINVAL;
3957 		return -1;
3958 	}
3959 
3960 	if (id == 0) {
3961 		// get session of the calling process
3962 		Team* team = thread_get_current_thread()->team;
3963 		TeamLocker teamLocker(team);
3964 		return team->session_id;
3965 	}
3966 
3967 	// get the team
3968 	Team* team = Team::GetAndLock(id);
3969 	if (team == NULL) {
3970 		errno = ESRCH;
3971 		return -1;
3972 	}
3973 
3974 	// get the team's session ID
3975 	pid_t sessionID = team->session_id;
3976 
3977 	team->UnlockAndReleaseReference();
3978 
3979 	return sessionID;
3980 }
3981 
3982 
3983 //	#pragma mark - User syscalls
3984 
3985 
3986 status_t
3987 _user_exec(const char* userPath, const char* const* userFlatArgs,
3988 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3989 {
3990 	// NOTE: Since this function normally doesn't return, don't use automatic
3991 	// variables that need destruction in the function scope.
3992 	char path[B_PATH_NAME_LENGTH];
3993 
3994 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3995 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3996 		return B_BAD_ADDRESS;
3997 
3998 	// copy and relocate the flat arguments
3999 	char** flatArgs;
4000 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4001 		argCount, envCount, flatArgs);
4002 
4003 	if (error == B_OK) {
4004 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
4005 			envCount, umask);
4006 			// this one only returns in case of error
4007 	}
4008 
4009 	free(flatArgs);
4010 	return error;
4011 }
4012 
4013 
4014 thread_id
4015 _user_fork(void)
4016 {
4017 	return fork_team();
4018 }
4019 
4020 
4021 pid_t
4022 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4023 	team_usage_info* usageInfo)
4024 {
4025 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4026 		return B_BAD_ADDRESS;
4027 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4028 		return B_BAD_ADDRESS;
4029 
4030 	siginfo_t info;
4031 	team_usage_info usage_info;
4032 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4033 	if (foundChild < 0)
4034 		return syscall_restart_handle_post(foundChild);
4035 
4036 	// copy info back to userland
4037 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4038 		return B_BAD_ADDRESS;
4039 	// copy usage_info back to userland
4040 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4041 		sizeof(usage_info)) != B_OK) {
4042 		return B_BAD_ADDRESS;
4043 	}
4044 
4045 	return foundChild;
4046 }
4047 
4048 
4049 pid_t
4050 _user_process_info(pid_t process, int32 which)
4051 {
4052 	pid_t result;
4053 	switch (which) {
4054 		case SESSION_ID:
4055 			result = getsid(process);
4056 			break;
4057 		case GROUP_ID:
4058 			result = getpgid(process);
4059 			break;
4060 		case PARENT_ID:
4061 			result = _getppid(process);
4062 			break;
4063 		default:
4064 			return B_BAD_VALUE;
4065 	}
4066 
4067 	return result >= 0 ? result : errno;
4068 }
4069 
4070 
4071 pid_t
4072 _user_setpgid(pid_t processID, pid_t groupID)
4073 {
4074 	// setpgid() can be called either by the parent of the target process or
4075 	// by the process itself to do one of two things:
4076 	// * Create a new process group with the target process' ID and the target
4077 	//   process as group leader.
4078 	// * Set the target process' process group to an already existing one in the
4079 	//   same session.
4080 
4081 	if (groupID < 0)
4082 		return B_BAD_VALUE;
4083 
4084 	Team* currentTeam = thread_get_current_thread()->team;
4085 	if (processID == 0)
4086 		processID = currentTeam->id;
4087 
4088 	// if the group ID is not specified, use the target process' ID
4089 	if (groupID == 0)
4090 		groupID = processID;
4091 
4092 	// We loop when running into the following race condition: We create a new
4093 	// process group, because there isn't one with that ID yet, but later when
4094 	// trying to publish it, we find that someone else created and published
4095 	// a group with that ID in the meantime. In that case we just restart the
4096 	// whole action.
4097 	while (true) {
4098 		// Look up the process group by ID. If it doesn't exist yet and we are
4099 		// allowed to create a new one, do that.
4100 		ProcessGroup* group = ProcessGroup::Get(groupID);
4101 		bool newGroup = false;
4102 		if (group == NULL) {
4103 			if (groupID != processID)
4104 				return B_NOT_ALLOWED;
4105 
4106 			group = new(std::nothrow) ProcessGroup(groupID);
4107 			if (group == NULL)
4108 				return B_NO_MEMORY;
4109 
4110 			newGroup = true;
4111 		}
4112 		BReference<ProcessGroup> groupReference(group, true);
4113 
4114 		// get the target team
4115 		Team* team = Team::Get(processID);
4116 		if (team == NULL)
4117 			return ESRCH;
4118 		BReference<Team> teamReference(team, true);
4119 
4120 		// lock the new process group and the team's current process group
4121 		while (true) {
4122 			// lock the team's current process group
4123 			team->LockProcessGroup();
4124 
4125 			ProcessGroup* oldGroup = team->group;
4126 			if (oldGroup == NULL) {
4127 				// This can only happen if the team is exiting.
4128 				ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4129 				return ESRCH;
4130 			}
4131 
4132 			if (oldGroup == group) {
4133 				// it's the same as the target group, so just bail out
4134 				oldGroup->Unlock();
4135 				return group->id;
4136 			}
4137 
4138 			oldGroup->AcquireReference();
4139 
4140 			// lock the target process group, if locking order allows it
4141 			if (newGroup || group->id > oldGroup->id) {
4142 				group->Lock();
4143 				break;
4144 			}
4145 
4146 			// try to lock
4147 			if (group->TryLock())
4148 				break;
4149 
4150 			// no dice -- unlock the team's current process group and relock in
4151 			// the correct order
4152 			oldGroup->Unlock();
4153 
4154 			group->Lock();
4155 			oldGroup->Lock();
4156 
4157 			// check whether things are still the same
4158 			TeamLocker teamLocker(team);
4159 			if (team->group == oldGroup)
4160 				break;
4161 
4162 			// something changed -- unlock everything and retry
4163 			teamLocker.Unlock();
4164 			oldGroup->Unlock();
4165 			group->Unlock();
4166 			oldGroup->ReleaseReference();
4167 		}
4168 
4169 		// we now have references and locks of both new and old process group
4170 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4171 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4172 		AutoLocker<ProcessGroup> groupLocker(group, true);
4173 
4174 		// also lock the target team and its parent
4175 		team->LockTeamAndParent(false);
4176 		TeamLocker parentLocker(team->parent, true);
4177 		TeamLocker teamLocker(team, true);
4178 
4179 		// perform the checks
4180 		if (team == currentTeam) {
4181 			// we set our own group
4182 
4183 			// we must not change our process group ID if we're a session leader
4184 			if (is_session_leader(currentTeam))
4185 				return B_NOT_ALLOWED;
4186 		} else {
4187 			// Calling team != target team. The target team must be a child of
4188 			// the calling team and in the same session. (If that's the case it
4189 			// isn't a session leader either.)
4190 			if (team->parent != currentTeam
4191 				|| team->session_id != currentTeam->session_id) {
4192 				return B_NOT_ALLOWED;
4193 			}
4194 
4195 			// The call is also supposed to fail on a child, when the child has
4196 			// already executed exec*() [EACCES].
4197 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4198 				return EACCES;
4199 		}
4200 
4201 		// If we created a new process group, publish it now.
4202 		if (newGroup) {
4203 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4204 			if (sGroupHash.Lookup(groupID)) {
4205 				// A group with the group ID appeared since we first checked.
4206 				// Back to square one.
4207 				continue;
4208 			}
4209 
4210 			group->PublishLocked(team->group->Session());
4211 		} else if (group->Session()->id != team->session_id) {
4212 			// The existing target process group belongs to a different session.
4213 			// That's not allowed.
4214 			return B_NOT_ALLOWED;
4215 		}
4216 
4217 		// Everything is ready -- set the group.
4218 		remove_team_from_group(team);
4219 		insert_team_into_group(group, team);
4220 
4221 		// Changing the process group might have changed the situation for a
4222 		// parent waiting in wait_for_child(). Hence we notify it.
4223 		team->parent->dead_children.condition_variable.NotifyAll();
4224 
4225 		return group->id;
4226 	}
4227 }
4228 
4229 
4230 pid_t
4231 _user_setsid(void)
4232 {
4233 	Team* team = thread_get_current_thread()->team;
4234 
4235 	// create a new process group and session
4236 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4237 	if (group == NULL)
4238 		return B_NO_MEMORY;
4239 	BReference<ProcessGroup> groupReference(group, true);
4240 	AutoLocker<ProcessGroup> groupLocker(group);
4241 
4242 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4243 	if (session == NULL)
4244 		return B_NO_MEMORY;
4245 	BReference<ProcessSession> sessionReference(session, true);
4246 
4247 	// lock the team's current process group, parent, and the team itself
4248 	team->LockTeamParentAndProcessGroup();
4249 	BReference<ProcessGroup> oldGroupReference(team->group);
4250 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4251 	TeamLocker parentLocker(team->parent, true);
4252 	TeamLocker teamLocker(team, true);
4253 
4254 	// the team must not already be a process group leader
4255 	if (is_process_group_leader(team))
4256 		return B_NOT_ALLOWED;
4257 
4258 	// remove the team from the old and add it to the new process group
4259 	remove_team_from_group(team);
4260 	group->Publish(session);
4261 	insert_team_into_group(group, team);
4262 
4263 	// Changing the process group might have changed the situation for a
4264 	// parent waiting in wait_for_child(). Hence we notify it.
4265 	team->parent->dead_children.condition_variable.NotifyAll();
4266 
4267 	return group->id;
4268 }
4269 
4270 
4271 status_t
4272 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4273 {
4274 	status_t returnCode;
4275 	status_t status;
4276 
4277 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4278 		return B_BAD_ADDRESS;
4279 
4280 	status = wait_for_team(id, &returnCode);
4281 	if (status >= B_OK && _userReturnCode != NULL) {
4282 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4283 				!= B_OK)
4284 			return B_BAD_ADDRESS;
4285 		return B_OK;
4286 	}
4287 
4288 	return syscall_restart_handle_post(status);
4289 }
4290 
4291 
4292 thread_id
4293 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4294 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4295 	port_id errorPort, uint32 errorToken)
4296 {
4297 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4298 
4299 	if (argCount < 1)
4300 		return B_BAD_VALUE;
4301 
4302 	// copy and relocate the flat arguments
4303 	char** flatArgs;
4304 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4305 		argCount, envCount, flatArgs);
4306 	if (error != B_OK)
4307 		return error;
4308 
4309 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4310 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4311 		errorToken);
4312 
4313 	free(flatArgs);
4314 		// load_image_internal() unset our variable if it took over ownership
4315 
4316 	return thread;
4317 }
4318 
4319 
4320 void
4321 _user_exit_team(status_t returnValue)
4322 {
4323 	Thread* thread = thread_get_current_thread();
4324 	Team* team = thread->team;
4325 
4326 	// set this thread's exit status
4327 	thread->exit.status = returnValue;
4328 
4329 	// set the team exit status
4330 	TeamLocker teamLocker(team);
4331 
4332 	if (!team->exit.initialized) {
4333 		team->exit.reason = CLD_EXITED;
4334 		team->exit.signal = 0;
4335 		team->exit.signaling_user = 0;
4336 		team->exit.status = returnValue;
4337 		team->exit.initialized = true;
4338 	}
4339 
4340 	teamLocker.Unlock();
4341 
4342 	// Stop the thread, if the team is being debugged and that has been
4343 	// requested.
4344 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4345 		user_debug_stop_thread();
4346 
4347 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4348 	// userland. The signal handling code forwards the signal to the main
4349 	// thread (if that's not already this one), which will take the team down.
4350 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4351 	send_signal_to_thread(thread, signal, 0);
4352 }
4353 
4354 
4355 status_t
4356 _user_kill_team(team_id team)
4357 {
4358 	return kill_team(team);
4359 }
4360 
4361 
4362 status_t
4363 _user_get_team_info(team_id id, team_info* userInfo)
4364 {
4365 	status_t status;
4366 	team_info info;
4367 
4368 	if (!IS_USER_ADDRESS(userInfo))
4369 		return B_BAD_ADDRESS;
4370 
4371 	status = _get_team_info(id, &info, sizeof(team_info));
4372 	if (status == B_OK) {
4373 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4374 			return B_BAD_ADDRESS;
4375 	}
4376 
4377 	return status;
4378 }
4379 
4380 
4381 status_t
4382 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4383 {
4384 	status_t status;
4385 	team_info info;
4386 	int32 cookie;
4387 
4388 	if (!IS_USER_ADDRESS(userCookie)
4389 		|| !IS_USER_ADDRESS(userInfo)
4390 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4391 		return B_BAD_ADDRESS;
4392 
4393 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4394 	if (status != B_OK)
4395 		return status;
4396 
4397 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4398 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4399 		return B_BAD_ADDRESS;
4400 
4401 	return status;
4402 }
4403 
4404 
4405 team_id
4406 _user_get_current_team(void)
4407 {
4408 	return team_get_current_team_id();
4409 }
4410 
4411 
4412 status_t
4413 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4414 	size_t size)
4415 {
4416 	if (size != sizeof(team_usage_info))
4417 		return B_BAD_VALUE;
4418 
4419 	team_usage_info info;
4420 	status_t status = common_get_team_usage_info(team, who, &info,
4421 		B_CHECK_PERMISSION);
4422 
4423 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4424 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4425 		return B_BAD_ADDRESS;
4426 	}
4427 
4428 	return status;
4429 }
4430 
4431 
4432 status_t
4433 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4434 	size_t size, size_t* _sizeNeeded)
4435 {
4436 	// check parameters
4437 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4438 		|| (buffer == NULL && size > 0)
4439 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4440 		return B_BAD_ADDRESS;
4441 	}
4442 
4443 	KMessage info;
4444 
4445 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4446 		// allocate memory for a copy of the needed team data
4447 		struct ExtendedTeamData {
4448 			team_id	id;
4449 			pid_t	group_id;
4450 			pid_t	session_id;
4451 			uid_t	real_uid;
4452 			gid_t	real_gid;
4453 			uid_t	effective_uid;
4454 			gid_t	effective_gid;
4455 			char	name[B_OS_NAME_LENGTH];
4456 		} teamClone;
4457 
4458 		io_context* ioContext;
4459 		{
4460 			// get the team structure
4461 			Team* team = Team::GetAndLock(teamID);
4462 			if (team == NULL)
4463 				return B_BAD_TEAM_ID;
4464 			BReference<Team> teamReference(team, true);
4465 			TeamLocker teamLocker(team, true);
4466 
4467 			// copy the data
4468 			teamClone.id = team->id;
4469 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4470 			teamClone.group_id = team->group_id;
4471 			teamClone.session_id = team->session_id;
4472 			teamClone.real_uid = team->real_uid;
4473 			teamClone.real_gid = team->real_gid;
4474 			teamClone.effective_uid = team->effective_uid;
4475 			teamClone.effective_gid = team->effective_gid;
4476 
4477 			// also fetch a reference to the I/O context
4478 			ioContext = team->io_context;
4479 			vfs_get_io_context(ioContext);
4480 		}
4481 		CObjectDeleter<io_context, void, vfs_put_io_context>
4482 			ioContextPutter(ioContext);
4483 
4484 		// add the basic data to the info message
4485 		if (info.AddInt32("id", teamClone.id) != B_OK
4486 			|| info.AddString("name", teamClone.name) != B_OK
4487 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4488 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4489 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4490 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4491 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4492 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4493 			return B_NO_MEMORY;
4494 		}
4495 
4496 		// get the current working directory from the I/O context
4497 		dev_t cwdDevice;
4498 		ino_t cwdDirectory;
4499 		{
4500 			MutexLocker ioContextLocker(ioContext->io_mutex);
4501 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4502 		}
4503 
4504 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4505 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4506 			return B_NO_MEMORY;
4507 		}
4508 	}
4509 
4510 	// TODO: Support the other flags!
4511 
4512 	// copy the needed size and, if it fits, the message back to userland
4513 	size_t sizeNeeded = info.ContentSize();
4514 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4515 		return B_BAD_ADDRESS;
4516 
4517 	if (sizeNeeded > size)
4518 		return B_BUFFER_OVERFLOW;
4519 
4520 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4521 		return B_BAD_ADDRESS;
4522 
4523 	return B_OK;
4524 }
4525