xref: /haiku/src/system/kernel/team.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 #include <util/ThreadAutoLock.h>
62 
63 #include "TeamThreadTables.h"
64 
65 
66 //#define TRACE_TEAM
67 #ifdef TRACE_TEAM
68 #	define TRACE(x) dprintf x
69 #else
70 #	define TRACE(x) ;
71 #endif
72 
73 
74 struct team_key {
75 	team_id id;
76 };
77 
78 struct team_arg {
79 	char	*path;
80 	char	**flat_args;
81 	size_t	flat_args_size;
82 	uint32	arg_count;
83 	uint32	env_count;
84 	mode_t	umask;
85 	uint32	flags;
86 	port_id	error_port;
87 	uint32	error_token;
88 };
89 
90 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
91 
92 
93 namespace {
94 
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 // #pragma mark - TeamTable
105 
106 
107 typedef BKernel::TeamThreadTable<Team> TeamTable;
108 
109 
110 // #pragma mark - ProcessGroupHashDefinition
111 
112 
113 struct ProcessGroupHashDefinition {
114 	typedef pid_t			KeyType;
115 	typedef	ProcessGroup	ValueType;
116 
117 	size_t HashKey(pid_t key) const
118 	{
119 		return key;
120 	}
121 
122 	size_t Hash(ProcessGroup* value) const
123 	{
124 		return HashKey(value->id);
125 	}
126 
127 	bool Compare(pid_t key, ProcessGroup* value) const
128 	{
129 		return value->id == key;
130 	}
131 
132 	ProcessGroup*& GetLink(ProcessGroup* value) const
133 	{
134 		return value->next;
135 	}
136 };
137 
138 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
139 
140 
141 }	// unnamed namespace
142 
143 
144 // #pragma mark -
145 
146 
147 // the team_id -> Team hash table and the lock protecting it
148 static TeamTable sTeamHash;
149 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
150 
151 // the pid_t -> ProcessGroup hash table and the lock protecting it
152 static ProcessGroupHashTable sGroupHash;
153 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
154 
155 static Team* sKernelTeam = NULL;
156 static bool sDisableUserAddOns = false;
157 
158 // A list of process groups of children of dying session leaders that need to
159 // be signalled, if they have become orphaned and contain stopped processes.
160 static ProcessGroupList sOrphanedCheckProcessGroups;
161 static mutex sOrphanedCheckLock
162 	= MUTEX_INITIALIZER("orphaned process group check");
163 
164 // some arbitrarily chosen limits -- should probably depend on the available
165 // memory (the limit is not yet enforced)
166 static int32 sMaxTeams = 2048;
167 static int32 sUsedTeams = 1;
168 
169 static TeamNotificationService sNotificationService;
170 
171 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
172 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
173 
174 
175 // #pragma mark - TeamListIterator
176 
177 
178 TeamListIterator::TeamListIterator()
179 {
180 	// queue the entry
181 	InterruptsWriteSpinLocker locker(sTeamHashLock);
182 	sTeamHash.InsertIteratorEntry(&fEntry);
183 }
184 
185 
186 TeamListIterator::~TeamListIterator()
187 {
188 	// remove the entry
189 	InterruptsWriteSpinLocker locker(sTeamHashLock);
190 	sTeamHash.RemoveIteratorEntry(&fEntry);
191 }
192 
193 
194 Team*
195 TeamListIterator::Next()
196 {
197 	// get the next team -- if there is one, get reference for it
198 	InterruptsWriteSpinLocker locker(sTeamHashLock);
199 	Team* team = sTeamHash.NextElement(&fEntry);
200 	if (team != NULL)
201 		team->AcquireReference();
202 
203 	return team;
204 }
205 
206 
207 // #pragma mark - Tracing
208 
209 
210 #if TEAM_TRACING
211 namespace TeamTracing {
212 
213 class TeamForked : public AbstractTraceEntry {
214 public:
215 	TeamForked(thread_id forkedThread)
216 		:
217 		fForkedThread(forkedThread)
218 	{
219 		Initialized();
220 	}
221 
222 	virtual void AddDump(TraceOutput& out)
223 	{
224 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
225 	}
226 
227 private:
228 	thread_id			fForkedThread;
229 };
230 
231 
232 class ExecTeam : public AbstractTraceEntry {
233 public:
234 	ExecTeam(const char* path, int32 argCount, const char* const* args,
235 			int32 envCount, const char* const* env)
236 		:
237 		fArgCount(argCount),
238 		fArgs(NULL)
239 	{
240 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
241 			false);
242 
243 		// determine the buffer size we need for the args
244 		size_t argBufferSize = 0;
245 		for (int32 i = 0; i < argCount; i++)
246 			argBufferSize += strlen(args[i]) + 1;
247 
248 		// allocate a buffer
249 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
250 		if (fArgs) {
251 			char* buffer = fArgs;
252 			for (int32 i = 0; i < argCount; i++) {
253 				size_t argSize = strlen(args[i]) + 1;
254 				memcpy(buffer, args[i], argSize);
255 				buffer += argSize;
256 			}
257 		}
258 
259 		// ignore env for the time being
260 		(void)envCount;
261 		(void)env;
262 
263 		Initialized();
264 	}
265 
266 	virtual void AddDump(TraceOutput& out)
267 	{
268 		out.Print("team exec, \"%p\", args:", fPath);
269 
270 		if (fArgs != NULL) {
271 			char* args = fArgs;
272 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
273 				out.Print(" \"%s\"", args);
274 				args += strlen(args) + 1;
275 			}
276 		} else
277 			out.Print(" <too long>");
278 	}
279 
280 private:
281 	char*	fPath;
282 	int32	fArgCount;
283 	char*	fArgs;
284 };
285 
286 
287 static const char*
288 job_control_state_name(job_control_state state)
289 {
290 	switch (state) {
291 		case JOB_CONTROL_STATE_NONE:
292 			return "none";
293 		case JOB_CONTROL_STATE_STOPPED:
294 			return "stopped";
295 		case JOB_CONTROL_STATE_CONTINUED:
296 			return "continued";
297 		case JOB_CONTROL_STATE_DEAD:
298 			return "dead";
299 		default:
300 			return "invalid";
301 	}
302 }
303 
304 
305 class SetJobControlState : public AbstractTraceEntry {
306 public:
307 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
308 		:
309 		fTeam(team),
310 		fNewState(newState),
311 		fSignal(signal != NULL ? signal->Number() : 0)
312 	{
313 		Initialized();
314 	}
315 
316 	virtual void AddDump(TraceOutput& out)
317 	{
318 		out.Print("team set job control state, team %" B_PRId32 ", "
319 			"new state: %s, signal: %d",
320 			fTeam, job_control_state_name(fNewState), fSignal);
321 	}
322 
323 private:
324 	team_id				fTeam;
325 	job_control_state	fNewState;
326 	int					fSignal;
327 };
328 
329 
330 class WaitForChild : public AbstractTraceEntry {
331 public:
332 	WaitForChild(pid_t child, uint32 flags)
333 		:
334 		fChild(child),
335 		fFlags(flags)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("team wait for child, child: %" B_PRId32 ", "
343 			"flags: %#" B_PRIx32, fChild, fFlags);
344 	}
345 
346 private:
347 	pid_t	fChild;
348 	uint32	fFlags;
349 };
350 
351 
352 class WaitForChildDone : public AbstractTraceEntry {
353 public:
354 	WaitForChildDone(const job_control_entry& entry)
355 		:
356 		fState(entry.state),
357 		fTeam(entry.thread),
358 		fStatus(entry.status),
359 		fReason(entry.reason),
360 		fSignal(entry.signal)
361 	{
362 		Initialized();
363 	}
364 
365 	WaitForChildDone(status_t error)
366 		:
367 		fTeam(error)
368 	{
369 		Initialized();
370 	}
371 
372 	virtual void AddDump(TraceOutput& out)
373 	{
374 		if (fTeam >= 0) {
375 			out.Print("team wait for child done, team: %" B_PRId32 ", "
376 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
377 				fTeam, job_control_state_name(fState), fStatus, fReason,
378 				fSignal);
379 		} else {
380 			out.Print("team wait for child failed, error: "
381 				"%#" B_PRIx32 ", ", fTeam);
382 		}
383 	}
384 
385 private:
386 	job_control_state	fState;
387 	team_id				fTeam;
388 	status_t			fStatus;
389 	uint16				fReason;
390 	uint16				fSignal;
391 };
392 
393 }	// namespace TeamTracing
394 
395 #	define T(x) new(std::nothrow) TeamTracing::x;
396 #else
397 #	define T(x) ;
398 #endif
399 
400 
401 //	#pragma mark - TeamNotificationService
402 
403 
404 TeamNotificationService::TeamNotificationService()
405 	: DefaultNotificationService("teams")
406 {
407 }
408 
409 
410 void
411 TeamNotificationService::Notify(uint32 eventCode, Team* team)
412 {
413 	char eventBuffer[128];
414 	KMessage event;
415 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
416 	event.AddInt32("event", eventCode);
417 	event.AddInt32("team", team->id);
418 	event.AddPointer("teamStruct", team);
419 
420 	DefaultNotificationService::Notify(event, eventCode);
421 }
422 
423 
424 //	#pragma mark - Team
425 
426 
427 Team::Team(team_id id, bool kernel)
428 {
429 	// allocate an ID
430 	this->id = id;
431 	visible = true;
432 	serial_number = -1;
433 
434 	// init mutex
435 	if (kernel) {
436 		mutex_init(&fLock, "Team:kernel");
437 	} else {
438 		char lockName[16];
439 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
440 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
441 	}
442 
443 	hash_next = siblings_next = children = parent = NULL;
444 	fName[0] = '\0';
445 	fArgs[0] = '\0';
446 	num_threads = 0;
447 	io_context = NULL;
448 	address_space = NULL;
449 	realtime_sem_context = NULL;
450 	xsi_sem_context = NULL;
451 	thread_list = NULL;
452 	main_thread = NULL;
453 	loading_info = NULL;
454 	state = TEAM_STATE_BIRTH;
455 	flags = 0;
456 	death_entry = NULL;
457 	user_data_area = -1;
458 	user_data = 0;
459 	used_user_data = 0;
460 	user_data_size = 0;
461 	free_user_threads = NULL;
462 
463 	commpage_address = NULL;
464 
465 	supplementary_groups = NULL;
466 	supplementary_group_count = 0;
467 
468 	dead_threads_kernel_time = 0;
469 	dead_threads_user_time = 0;
470 	cpu_clock_offset = 0;
471 
472 	// dead threads
473 	list_init(&dead_threads);
474 
475 	// dead children
476 	dead_children.count = 0;
477 	dead_children.kernel_time = 0;
478 	dead_children.user_time = 0;
479 
480 	// job control entry
481 	job_control_entry = new(nothrow) ::job_control_entry;
482 	if (job_control_entry != NULL) {
483 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
484 		job_control_entry->thread = id;
485 		job_control_entry->team = this;
486 	}
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	list_init(&sem_list);
492 	list_init_etc(&port_list, port_team_link_offset());
493 	list_init(&image_list);
494 	list_init(&watcher_list);
495 
496 	clear_team_debug_info(&debug_info, true);
497 
498 	// init dead/stopped/continued children condition vars
499 	dead_children.condition_variable.Init(&dead_children, "team children");
500 
501 	B_INITIALIZE_SPINLOCK(&time_lock);
502 	B_INITIALIZE_SPINLOCK(&signal_lock);
503 
504 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
505 		kernel ? -1 : MAX_QUEUED_SIGNALS);
506 	memset(fSignalActions, 0, sizeof(fSignalActions));
507 
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	malloc_referenced_release(supplementary_groups);
545 
546 	delete job_control_entry;
547 		// usually already NULL and transferred to the parent
548 
549 	mutex_destroy(&fLock);
550 }
551 
552 
553 /*static*/ Team*
554 Team::Create(team_id id, const char* name, bool kernel)
555 {
556 	// create the team object
557 	Team* team = new(std::nothrow) Team(id, kernel);
558 	if (team == NULL)
559 		return NULL;
560 	ObjectDeleter<Team> teamDeleter(team);
561 
562 	if (name != NULL)
563 		team->SetName(name);
564 
565 	// check initialization
566 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
567 		return NULL;
568 
569 	// finish initialization (arch specifics)
570 	if (arch_team_init_team_struct(team, kernel) != B_OK)
571 		return NULL;
572 
573 	if (!kernel) {
574 		status_t error = user_timer_create_team_timers(team);
575 		if (error != B_OK)
576 			return NULL;
577 	}
578 
579 	// everything went fine
580 	return teamDeleter.Detach();
581 }
582 
583 
584 /*!	\brief Returns the team with the given ID.
585 	Returns a reference to the team.
586 	Team and thread spinlock must not be held.
587 */
588 /*static*/ Team*
589 Team::Get(team_id id)
590 {
591 	if (id == B_CURRENT_TEAM) {
592 		Team* team = thread_get_current_thread()->team;
593 		team->AcquireReference();
594 		return team;
595 	}
596 
597 	InterruptsReadSpinLocker locker(sTeamHashLock);
598 	Team* team = sTeamHash.Lookup(id);
599 	if (team != NULL)
600 		team->AcquireReference();
601 	return team;
602 }
603 
604 
605 /*!	\brief Returns the team with the given ID in a locked state.
606 	Returns a reference to the team.
607 	Team and thread spinlock must not be held.
608 */
609 /*static*/ Team*
610 Team::GetAndLock(team_id id)
611 {
612 	// get the team
613 	Team* team = Get(id);
614 	if (team == NULL)
615 		return NULL;
616 
617 	// lock it
618 	team->Lock();
619 
620 	// only return the team, when it isn't already dying
621 	if (team->state >= TEAM_STATE_SHUTDOWN) {
622 		team->Unlock();
623 		team->ReleaseReference();
624 		return NULL;
625 	}
626 
627 	return team;
628 }
629 
630 
631 /*!	Locks the team and its parent team (if any).
632 	The caller must hold a reference to the team or otherwise make sure that
633 	it won't be deleted.
634 	If the team doesn't have a parent, only the team itself is locked. If the
635 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
636 	only the team itself is locked.
637 
638 	\param dontLockParentIfKernel If \c true, the team's parent team is only
639 		locked, if it is not the kernel team.
640 */
641 void
642 Team::LockTeamAndParent(bool dontLockParentIfKernel)
643 {
644 	// The locking order is parent -> child. Since the parent can change as long
645 	// as we don't lock the team, we need to do a trial and error loop.
646 	Lock();
647 
648 	while (true) {
649 		// If the team doesn't have a parent, we're done. Otherwise try to lock
650 		// the parent.This will succeed in most cases, simplifying things.
651 		Team* parent = this->parent;
652 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
653 			|| parent->TryLock()) {
654 			return;
655 		}
656 
657 		// get a temporary reference to the parent, unlock this team, lock the
658 		// parent, and re-lock this team
659 		BReference<Team> parentReference(parent);
660 
661 		Unlock();
662 		parent->Lock();
663 		Lock();
664 
665 		// If the parent hasn't changed in the meantime, we're done.
666 		if (this->parent == parent)
667 			return;
668 
669 		// The parent has changed -- unlock and retry.
670 		parent->Unlock();
671 	}
672 }
673 
674 
675 /*!	Unlocks the team and its parent team (if any).
676 */
677 void
678 Team::UnlockTeamAndParent()
679 {
680 	if (parent != NULL)
681 		parent->Unlock();
682 
683 	Unlock();
684 }
685 
686 
687 /*!	Locks the team, its parent team (if any), and the team's process group.
688 	The caller must hold a reference to the team or otherwise make sure that
689 	it won't be deleted.
690 	If the team doesn't have a parent, only the team itself is locked.
691 */
692 void
693 Team::LockTeamParentAndProcessGroup()
694 {
695 	LockTeamAndProcessGroup();
696 
697 	// We hold the group's and the team's lock, but not the parent team's lock.
698 	// If we have a parent, try to lock it.
699 	if (this->parent == NULL || this->parent->TryLock())
700 		return;
701 
702 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
703 	// the job.
704 	Unlock();
705 	LockTeamAndParent(false);
706 }
707 
708 
709 /*!	Unlocks the team, its parent team (if any), and the team's process group.
710 */
711 void
712 Team::UnlockTeamParentAndProcessGroup()
713 {
714 	group->Unlock();
715 
716 	if (parent != NULL)
717 		parent->Unlock();
718 
719 	Unlock();
720 }
721 
722 
723 void
724 Team::LockTeamAndProcessGroup()
725 {
726 	// The locking order is process group -> child. Since the process group can
727 	// change as long as we don't lock the team, we need to do a trial and error
728 	// loop.
729 	Lock();
730 
731 	while (true) {
732 		// Try to lock the group. This will succeed in most cases, simplifying
733 		// things.
734 		ProcessGroup* group = this->group;
735 		if (group->TryLock())
736 			return;
737 
738 		// get a temporary reference to the group, unlock this team, lock the
739 		// group, and re-lock this team
740 		BReference<ProcessGroup> groupReference(group);
741 
742 		Unlock();
743 		group->Lock();
744 		Lock();
745 
746 		// If the group hasn't changed in the meantime, we're done.
747 		if (this->group == group)
748 			return;
749 
750 		// The group has changed -- unlock and retry.
751 		group->Unlock();
752 	}
753 }
754 
755 
756 void
757 Team::UnlockTeamAndProcessGroup()
758 {
759 	group->Unlock();
760 	Unlock();
761 }
762 
763 
764 void
765 Team::SetName(const char* name)
766 {
767 	if (const char* lastSlash = strrchr(name, '/'))
768 		name = lastSlash + 1;
769 
770 	strlcpy(fName, name, B_OS_NAME_LENGTH);
771 }
772 
773 
774 void
775 Team::SetArgs(const char* args)
776 {
777 	strlcpy(fArgs, args, sizeof(fArgs));
778 }
779 
780 
781 void
782 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
783 {
784 	fArgs[0] = '\0';
785 	strlcpy(fArgs, path, sizeof(fArgs));
786 	for (int i = 0; i < otherArgCount; i++) {
787 		strlcat(fArgs, " ", sizeof(fArgs));
788 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
789 	}
790 }
791 
792 
793 void
794 Team::ResetSignalsOnExec()
795 {
796 	// We are supposed to keep pending signals. Signal actions shall be reset
797 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
798 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
799 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
800 	// flags, but since there aren't any handlers, they make little sense, so
801 	// we clear them.
802 
803 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
804 		struct sigaction& action = SignalActionFor(i);
805 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
806 			action.sa_handler = SIG_DFL;
807 
808 		action.sa_mask = 0;
809 		action.sa_flags = 0;
810 		action.sa_userdata = NULL;
811 	}
812 }
813 
814 
815 void
816 Team::InheritSignalActions(Team* parent)
817 {
818 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
819 }
820 
821 
822 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
823 	ID.
824 
825 	The caller must hold the team's lock.
826 
827 	\param timer The timer to be added. If it doesn't have an ID yet, it is
828 		considered user-defined and will be assigned an ID.
829 	\return \c B_OK, if the timer was added successfully, another error code
830 		otherwise.
831 */
832 status_t
833 Team::AddUserTimer(UserTimer* timer)
834 {
835 	// don't allow addition of timers when already shutting the team down
836 	if (state >= TEAM_STATE_SHUTDOWN)
837 		return B_BAD_TEAM_ID;
838 
839 	// If the timer is user-defined, check timer limit and increment
840 	// user-defined count.
841 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
842 		return EAGAIN;
843 
844 	fUserTimers.AddTimer(timer);
845 
846 	return B_OK;
847 }
848 
849 
850 /*!	Removes the given user timer from the team.
851 
852 	The caller must hold the team's lock.
853 
854 	\param timer The timer to be removed.
855 
856 */
857 void
858 Team::RemoveUserTimer(UserTimer* timer)
859 {
860 	fUserTimers.RemoveTimer(timer);
861 
862 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
863 		UserDefinedTimersRemoved(1);
864 }
865 
866 
867 /*!	Deletes all (or all user-defined) user timers of the team.
868 
869 	Timer's belonging to the team's threads are not affected.
870 	The caller must hold the team's lock.
871 
872 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
873 		otherwise all timers are deleted.
874 */
875 void
876 Team::DeleteUserTimers(bool userDefinedOnly)
877 {
878 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
879 	UserDefinedTimersRemoved(count);
880 }
881 
882 
883 /*!	If not at the limit yet, increments the team's user-defined timer count.
884 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
885 */
886 bool
887 Team::CheckAddUserDefinedTimer()
888 {
889 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
890 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
891 		atomic_add(&fUserDefinedTimerCount, -1);
892 		return false;
893 	}
894 
895 	return true;
896 }
897 
898 
899 /*!	Subtracts the given count for the team's user-defined timer count.
900 	\param count The count to subtract.
901 */
902 void
903 Team::UserDefinedTimersRemoved(int32 count)
904 {
905 	atomic_add(&fUserDefinedTimerCount, -count);
906 }
907 
908 
909 void
910 Team::DeactivateCPUTimeUserTimers()
911 {
912 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
913 		timer->Deactivate();
914 
915 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
916 		timer->Deactivate();
917 }
918 
919 
920 /*!	Returns the team's current total CPU time (kernel + user + offset).
921 
922 	The caller must hold \c time_lock.
923 
924 	\param ignoreCurrentRun If \c true and the current thread is one team's
925 		threads, don't add the time since the last time \c last_time was
926 		updated. Should be used in "thread unscheduled" scheduler callbacks,
927 		since although the thread is still running at that time, its time has
928 		already been stopped.
929 	\return The team's current total CPU time.
930 */
931 bigtime_t
932 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
933 {
934 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
935 		+ dead_threads_user_time;
936 
937 	Thread* currentThread = thread_get_current_thread();
938 	bigtime_t now = system_time();
939 
940 	for (Thread* thread = thread_list; thread != NULL;
941 			thread = thread->team_next) {
942 		bool alreadyLocked = thread == lockedThread;
943 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
944 		time += thread->kernel_time + thread->user_time;
945 
946 		if (thread->last_time != 0) {
947 			if (!ignoreCurrentRun || thread != currentThread)
948 				time += now - thread->last_time;
949 		}
950 
951 		if (alreadyLocked)
952 			threadTimeLocker.Detach();
953 	}
954 
955 	return time;
956 }
957 
958 
959 /*!	Returns the team's current user CPU time.
960 
961 	The caller must hold \c time_lock.
962 
963 	\return The team's current user CPU time.
964 */
965 bigtime_t
966 Team::UserCPUTime() const
967 {
968 	bigtime_t time = dead_threads_user_time;
969 
970 	bigtime_t now = system_time();
971 
972 	for (Thread* thread = thread_list; thread != NULL;
973 			thread = thread->team_next) {
974 		SpinLocker threadTimeLocker(thread->time_lock);
975 		time += thread->user_time;
976 
977 		if (thread->last_time != 0 && !thread->in_kernel)
978 			time += now - thread->last_time;
979 	}
980 
981 	return time;
982 }
983 
984 
985 //	#pragma mark - ProcessGroup
986 
987 
988 ProcessGroup::ProcessGroup(pid_t id)
989 	:
990 	id(id),
991 	teams(NULL),
992 	fSession(NULL),
993 	fInOrphanedCheckList(false)
994 {
995 	char lockName[32];
996 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
997 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
998 }
999 
1000 
1001 ProcessGroup::~ProcessGroup()
1002 {
1003 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1004 
1005 	// If the group is in the orphaned check list, remove it.
1006 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1007 
1008 	if (fInOrphanedCheckList)
1009 		sOrphanedCheckProcessGroups.Remove(this);
1010 
1011 	orphanedCheckLocker.Unlock();
1012 
1013 	// remove group from the hash table and from the session
1014 	if (fSession != NULL) {
1015 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1016 		sGroupHash.RemoveUnchecked(this);
1017 		groupHashLocker.Unlock();
1018 
1019 		fSession->ReleaseReference();
1020 	}
1021 
1022 	mutex_destroy(&fLock);
1023 }
1024 
1025 
1026 /*static*/ ProcessGroup*
1027 ProcessGroup::Get(pid_t id)
1028 {
1029 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1030 	ProcessGroup* group = sGroupHash.Lookup(id);
1031 	if (group != NULL)
1032 		group->AcquireReference();
1033 	return group;
1034 }
1035 
1036 
1037 /*!	Adds the group the given session and makes it publicly accessible.
1038 	The caller must not hold the process group hash lock.
1039 */
1040 void
1041 ProcessGroup::Publish(ProcessSession* session)
1042 {
1043 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1044 	PublishLocked(session);
1045 }
1046 
1047 
1048 /*!	Adds the group to the given session and makes it publicly accessible.
1049 	The caller must hold the process group hash lock.
1050 */
1051 void
1052 ProcessGroup::PublishLocked(ProcessSession* session)
1053 {
1054 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1055 
1056 	fSession = session;
1057 	fSession->AcquireReference();
1058 
1059 	sGroupHash.InsertUnchecked(this);
1060 }
1061 
1062 
1063 /*!	Checks whether the process group is orphaned.
1064 	The caller must hold the group's lock.
1065 	\return \c true, if the group is orphaned, \c false otherwise.
1066 */
1067 bool
1068 ProcessGroup::IsOrphaned() const
1069 {
1070 	// Orphaned Process Group: "A process group in which the parent of every
1071 	// member is either itself a member of the group or is not a member of the
1072 	// group's session." (Open Group Base Specs Issue 7)
1073 	bool orphaned = true;
1074 
1075 	Team* team = teams;
1076 	while (orphaned && team != NULL) {
1077 		team->LockTeamAndParent(false);
1078 
1079 		Team* parent = team->parent;
1080 		if (parent != NULL && parent->group_id != id
1081 			&& parent->session_id == fSession->id) {
1082 			orphaned = false;
1083 		}
1084 
1085 		team->UnlockTeamAndParent();
1086 
1087 		team = team->group_next;
1088 	}
1089 
1090 	return orphaned;
1091 }
1092 
1093 
1094 void
1095 ProcessGroup::ScheduleOrphanedCheck()
1096 {
1097 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1098 
1099 	if (!fInOrphanedCheckList) {
1100 		sOrphanedCheckProcessGroups.Add(this);
1101 		fInOrphanedCheckList = true;
1102 	}
1103 }
1104 
1105 
1106 void
1107 ProcessGroup::UnsetOrphanedCheck()
1108 {
1109 	fInOrphanedCheckList = false;
1110 }
1111 
1112 
1113 //	#pragma mark - ProcessSession
1114 
1115 
1116 ProcessSession::ProcessSession(pid_t id)
1117 	:
1118 	id(id),
1119 	controlling_tty(-1),
1120 	foreground_group(-1)
1121 {
1122 	char lockName[32];
1123 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1124 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1125 }
1126 
1127 
1128 ProcessSession::~ProcessSession()
1129 {
1130 	mutex_destroy(&fLock);
1131 }
1132 
1133 
1134 //	#pragma mark - KDL functions
1135 
1136 
1137 static void
1138 _dump_team_info(Team* team)
1139 {
1140 	kprintf("TEAM: %p\n", team);
1141 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1142 		team->id);
1143 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1144 	kprintf("name:             '%s'\n", team->Name());
1145 	kprintf("args:             '%s'\n", team->Args());
1146 	kprintf("hash_next:        %p\n", team->hash_next);
1147 	kprintf("parent:           %p", team->parent);
1148 	if (team->parent != NULL) {
1149 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1150 	} else
1151 		kprintf("\n");
1152 
1153 	kprintf("children:         %p\n", team->children);
1154 	kprintf("num_threads:      %d\n", team->num_threads);
1155 	kprintf("state:            %d\n", team->state);
1156 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1157 	kprintf("io_context:       %p\n", team->io_context);
1158 	if (team->address_space)
1159 		kprintf("address_space:    %p\n", team->address_space);
1160 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1161 		(void*)team->user_data, team->user_data_area);
1162 	kprintf("free user thread: %p\n", team->free_user_threads);
1163 	kprintf("main_thread:      %p\n", team->main_thread);
1164 	kprintf("thread_list:      %p\n", team->thread_list);
1165 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1166 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1167 }
1168 
1169 
1170 static int
1171 dump_team_info(int argc, char** argv)
1172 {
1173 	ulong arg;
1174 	bool found = false;
1175 
1176 	if (argc < 2) {
1177 		Thread* thread = thread_get_current_thread();
1178 		if (thread != NULL && thread->team != NULL)
1179 			_dump_team_info(thread->team);
1180 		else
1181 			kprintf("No current team!\n");
1182 		return 0;
1183 	}
1184 
1185 	arg = strtoul(argv[1], NULL, 0);
1186 	if (IS_KERNEL_ADDRESS(arg)) {
1187 		// semi-hack
1188 		_dump_team_info((Team*)arg);
1189 		return 0;
1190 	}
1191 
1192 	// walk through the thread list, trying to match name or id
1193 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1194 		Team* team = it.Next();) {
1195 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1196 			|| team->id == (team_id)arg) {
1197 			_dump_team_info(team);
1198 			found = true;
1199 			break;
1200 		}
1201 	}
1202 
1203 	if (!found)
1204 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1205 	return 0;
1206 }
1207 
1208 
1209 static int
1210 dump_teams(int argc, char** argv)
1211 {
1212 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1213 		B_PRINTF_POINTER_WIDTH, "parent");
1214 
1215 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1216 		Team* team = it.Next();) {
1217 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 
1224 //	#pragma mark - Private functions
1225 
1226 
1227 /*! Get the parent of a given process.
1228 
1229 	Used in the implementation of getppid (where a process can get its own
1230 	parent, only) as well as in user_process_info where the information is
1231 	available to anyone (allowing to display a tree of running processes)
1232 */
1233 static pid_t
1234 _getppid(pid_t id)
1235 {
1236 	if (id < 0) {
1237 		errno = EINVAL;
1238 		return -1;
1239 	}
1240 
1241 	if (id == 0) {
1242 		Team* team = thread_get_current_thread()->team;
1243 		TeamLocker teamLocker(team);
1244 		if (team->parent == NULL) {
1245 			errno = EINVAL;
1246 			return -1;
1247 		}
1248 		return team->parent->id;
1249 	}
1250 
1251 	Team* team = Team::GetAndLock(id);
1252 	if (team == NULL) {
1253 		errno = ESRCH;
1254 		return -1;
1255 	}
1256 
1257 	pid_t parentID;
1258 
1259 	if (team->parent == NULL) {
1260 		errno = EINVAL;
1261 		parentID = -1;
1262 	} else
1263 		parentID = team->parent->id;
1264 
1265 	team->UnlockAndReleaseReference();
1266 
1267 	return parentID;
1268 }
1269 
1270 
1271 /*!	Inserts team \a team into the child list of team \a parent.
1272 
1273 	The caller must hold the lock of both \a parent and \a team.
1274 
1275 	\param parent The parent team.
1276 	\param team The team to be inserted into \a parent's child list.
1277 */
1278 static void
1279 insert_team_into_parent(Team* parent, Team* team)
1280 {
1281 	ASSERT(parent != NULL);
1282 
1283 	team->siblings_next = parent->children;
1284 	parent->children = team;
1285 	team->parent = parent;
1286 }
1287 
1288 
1289 /*!	Removes team \a team from the child list of team \a parent.
1290 
1291 	The caller must hold the lock of both \a parent and \a team.
1292 
1293 	\param parent The parent team.
1294 	\param team The team to be removed from \a parent's child list.
1295 */
1296 static void
1297 remove_team_from_parent(Team* parent, Team* team)
1298 {
1299 	Team* child;
1300 	Team* last = NULL;
1301 
1302 	for (child = parent->children; child != NULL;
1303 			child = child->siblings_next) {
1304 		if (child == team) {
1305 			if (last == NULL)
1306 				parent->children = child->siblings_next;
1307 			else
1308 				last->siblings_next = child->siblings_next;
1309 
1310 			team->parent = NULL;
1311 			break;
1312 		}
1313 		last = child;
1314 	}
1315 }
1316 
1317 
1318 /*!	Returns whether the given team is a session leader.
1319 	The caller must hold the team's lock or its process group's lock.
1320 */
1321 static bool
1322 is_session_leader(Team* team)
1323 {
1324 	return team->session_id == team->id;
1325 }
1326 
1327 
1328 /*!	Returns whether the given team is a process group leader.
1329 	The caller must hold the team's lock or its process group's lock.
1330 */
1331 static bool
1332 is_process_group_leader(Team* team)
1333 {
1334 	return team->group_id == team->id;
1335 }
1336 
1337 
1338 /*!	Inserts the given team into the given process group.
1339 	The caller must hold the process group's lock, the team's lock, and the
1340 	team's parent's lock.
1341 */
1342 static void
1343 insert_team_into_group(ProcessGroup* group, Team* team)
1344 {
1345 	team->group = group;
1346 	team->group_id = group->id;
1347 	team->session_id = group->Session()->id;
1348 
1349 	team->group_next = group->teams;
1350 	group->teams = team;
1351 	group->AcquireReference();
1352 }
1353 
1354 
1355 /*!	Removes the given team from its process group.
1356 
1357 	The caller must hold the process group's lock, the team's lock, and the
1358 	team's parent's lock. Interrupts must be enabled.
1359 
1360 	\param team The team that'll be removed from its process group.
1361 */
1362 static void
1363 remove_team_from_group(Team* team)
1364 {
1365 	ProcessGroup* group = team->group;
1366 	Team* current;
1367 	Team* last = NULL;
1368 
1369 	// the team must be in a process group to let this function have any effect
1370 	if  (group == NULL)
1371 		return;
1372 
1373 	for (current = group->teams; current != NULL;
1374 			current = current->group_next) {
1375 		if (current == team) {
1376 			if (last == NULL)
1377 				group->teams = current->group_next;
1378 			else
1379 				last->group_next = current->group_next;
1380 
1381 			team->group = NULL;
1382 			break;
1383 		}
1384 		last = current;
1385 	}
1386 
1387 	team->group = NULL;
1388 	team->group_next = NULL;
1389 
1390 	group->ReleaseReference();
1391 }
1392 
1393 
1394 static status_t
1395 create_team_user_data(Team* team, void* exactAddress = NULL)
1396 {
1397 	void* address;
1398 	uint32 addressSpec;
1399 
1400 	if (exactAddress != NULL) {
1401 		address = exactAddress;
1402 		addressSpec = B_EXACT_ADDRESS;
1403 	} else {
1404 		address = (void*)KERNEL_USER_DATA_BASE;
1405 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1406 	}
1407 
1408 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1409 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1410 
1411 	virtual_address_restrictions virtualRestrictions = {};
1412 	if (result == B_OK || exactAddress != NULL) {
1413 		if (exactAddress != NULL)
1414 			virtualRestrictions.address = exactAddress;
1415 		else
1416 			virtualRestrictions.address = address;
1417 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1418 	} else {
1419 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1420 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1421 	}
1422 
1423 	physical_address_restrictions physicalRestrictions = {};
1424 	team->user_data_area = create_area_etc(team->id, "user area",
1425 		kTeamUserDataInitialSize, B_FULL_LOCK,
1426 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1427 		&virtualRestrictions, &physicalRestrictions, &address);
1428 	if (team->user_data_area < 0)
1429 		return team->user_data_area;
1430 
1431 	team->user_data = (addr_t)address;
1432 	team->used_user_data = 0;
1433 	team->user_data_size = kTeamUserDataInitialSize;
1434 	team->free_user_threads = NULL;
1435 
1436 	return B_OK;
1437 }
1438 
1439 
1440 static void
1441 delete_team_user_data(Team* team)
1442 {
1443 	if (team->user_data_area >= 0) {
1444 		vm_delete_area(team->id, team->user_data_area, true);
1445 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1446 			kTeamUserDataReservedSize);
1447 
1448 		team->user_data = 0;
1449 		team->used_user_data = 0;
1450 		team->user_data_size = 0;
1451 		team->user_data_area = -1;
1452 		while (free_user_thread* entry = team->free_user_threads) {
1453 			team->free_user_threads = entry->next;
1454 			free(entry);
1455 		}
1456 	}
1457 }
1458 
1459 
1460 static status_t
1461 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1462 	int32 argCount, int32 envCount, char**& _flatArgs)
1463 {
1464 	if (argCount < 0 || envCount < 0)
1465 		return B_BAD_VALUE;
1466 
1467 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1468 		return B_TOO_MANY_ARGS;
1469 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1470 		return B_BAD_VALUE;
1471 
1472 	if (!IS_USER_ADDRESS(userFlatArgs))
1473 		return B_BAD_ADDRESS;
1474 
1475 	// allocate kernel memory
1476 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1477 	if (flatArgs == NULL)
1478 		return B_NO_MEMORY;
1479 
1480 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1481 		free(flatArgs);
1482 		return B_BAD_ADDRESS;
1483 	}
1484 
1485 	// check and relocate the array
1486 	status_t error = B_OK;
1487 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1488 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1489 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1490 		if (i == argCount || i == argCount + envCount + 1) {
1491 			// check array null termination
1492 			if (flatArgs[i] != NULL) {
1493 				error = B_BAD_VALUE;
1494 				break;
1495 			}
1496 		} else {
1497 			// check string
1498 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1499 			size_t maxLen = stringEnd - arg;
1500 			if (arg < stringBase || arg >= stringEnd
1501 					|| strnlen(arg, maxLen) == maxLen) {
1502 				error = B_BAD_VALUE;
1503 				break;
1504 			}
1505 
1506 			flatArgs[i] = arg;
1507 		}
1508 	}
1509 
1510 	if (error == B_OK)
1511 		_flatArgs = flatArgs;
1512 	else
1513 		free(flatArgs);
1514 
1515 	return error;
1516 }
1517 
1518 
1519 static void
1520 free_team_arg(struct team_arg* teamArg)
1521 {
1522 	if (teamArg != NULL) {
1523 		free(teamArg->flat_args);
1524 		free(teamArg->path);
1525 		free(teamArg);
1526 	}
1527 }
1528 
1529 
1530 static status_t
1531 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1532 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1533 	port_id port, uint32 token)
1534 {
1535 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1536 	if (teamArg == NULL)
1537 		return B_NO_MEMORY;
1538 
1539 	teamArg->path = strdup(path);
1540 	if (teamArg->path == NULL) {
1541 		free(teamArg);
1542 		return B_NO_MEMORY;
1543 	}
1544 
1545 	// copy the args over
1546 	teamArg->flat_args = flatArgs;
1547 	teamArg->flat_args_size = flatArgsSize;
1548 	teamArg->arg_count = argCount;
1549 	teamArg->env_count = envCount;
1550 	teamArg->flags = 0;
1551 	teamArg->umask = umask;
1552 	teamArg->error_port = port;
1553 	teamArg->error_token = token;
1554 
1555 	// determine the flags from the environment
1556 	const char* const* env = flatArgs + argCount + 1;
1557 	for (int32 i = 0; i < envCount; i++) {
1558 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1559 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1560 			break;
1561 		}
1562 	}
1563 
1564 	*_teamArg = teamArg;
1565 	return B_OK;
1566 }
1567 
1568 
1569 static status_t
1570 team_create_thread_start_internal(void* args)
1571 {
1572 	status_t err;
1573 	Thread* thread;
1574 	Team* team;
1575 	struct team_arg* teamArgs = (struct team_arg*)args;
1576 	const char* path;
1577 	addr_t entry;
1578 	char** userArgs;
1579 	char** userEnv;
1580 	struct user_space_program_args* programArgs;
1581 	uint32 argCount, envCount;
1582 
1583 	thread = thread_get_current_thread();
1584 	team = thread->team;
1585 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1586 
1587 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1588 		thread->id));
1589 
1590 	// Main stack area layout is currently as follows (starting from 0):
1591 	//
1592 	// size								| usage
1593 	// ---------------------------------+--------------------------------
1594 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1595 	// TLS_SIZE							| TLS data
1596 	// sizeof(user_space_program_args)	| argument structure for the runtime
1597 	//									| loader
1598 	// flat arguments size				| flat process arguments and environment
1599 
1600 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1601 	// the heap
1602 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1603 
1604 	argCount = teamArgs->arg_count;
1605 	envCount = teamArgs->env_count;
1606 
1607 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1608 		+ thread->user_stack_size + TLS_SIZE);
1609 
1610 	userArgs = (char**)(programArgs + 1);
1611 	userEnv = userArgs + argCount + 1;
1612 	path = teamArgs->path;
1613 
1614 	if (user_strlcpy(programArgs->program_path, path,
1615 				sizeof(programArgs->program_path)) < B_OK
1616 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1617 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1618 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1619 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1620 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1621 				sizeof(port_id)) < B_OK
1622 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1623 				sizeof(uint32)) < B_OK
1624 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1625 		|| user_memcpy(&programArgs->disable_user_addons,
1626 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1627 		|| user_memcpy(userArgs, teamArgs->flat_args,
1628 				teamArgs->flat_args_size) < B_OK) {
1629 		// the team deletion process will clean this mess
1630 		free_team_arg(teamArgs);
1631 		return B_BAD_ADDRESS;
1632 	}
1633 
1634 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1635 
1636 	// set team args and update state
1637 	team->Lock();
1638 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1639 	team->state = TEAM_STATE_NORMAL;
1640 	team->Unlock();
1641 
1642 	free_team_arg(teamArgs);
1643 		// the arguments are already on the user stack, we no longer need
1644 		// them in this form
1645 
1646 	// Clone commpage area
1647 	area_id commPageArea = clone_commpage_area(team->id,
1648 		&team->commpage_address);
1649 	if (commPageArea  < B_OK) {
1650 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1651 			strerror(commPageArea)));
1652 		return commPageArea;
1653 	}
1654 
1655 	// Register commpage image
1656 	image_id commPageImage = get_commpage_image();
1657 	extended_image_info imageInfo;
1658 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1659 	if (err != B_OK) {
1660 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1661 			strerror(err)));
1662 		return err;
1663 	}
1664 	imageInfo.basic_info.text = team->commpage_address;
1665 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1666 	imageInfo.symbol_table = NULL;
1667 	imageInfo.symbol_hash = NULL;
1668 	imageInfo.string_table = NULL;
1669 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1670 	if (image < 0) {
1671 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1672 			strerror(image)));
1673 		return image;
1674 	}
1675 
1676 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1677 	// automatic variables with function scope will never be destroyed.
1678 	{
1679 		// find runtime_loader path
1680 		KPath runtimeLoaderPath;
1681 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1682 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1683 		if (err < B_OK) {
1684 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1685 				strerror(err)));
1686 			return err;
1687 		}
1688 		runtimeLoaderPath.UnlockBuffer();
1689 		err = runtimeLoaderPath.Append("runtime_loader");
1690 
1691 		if (err == B_OK) {
1692 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1693 				&entry);
1694 		}
1695 	}
1696 
1697 	if (err < B_OK) {
1698 		// Luckily, we don't have to clean up the mess we created - that's
1699 		// done for us by the normal team deletion process
1700 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1701 			"%s\n", strerror(err)));
1702 		return err;
1703 	}
1704 
1705 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1706 
1707 	// enter userspace -- returns only in case of error
1708 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1709 		programArgs, team->commpage_address);
1710 }
1711 
1712 
1713 static status_t
1714 team_create_thread_start(void* args)
1715 {
1716 	team_create_thread_start_internal(args);
1717 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1718 	thread_exit();
1719 		// does not return
1720 	return B_OK;
1721 }
1722 
1723 
1724 static thread_id
1725 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1726 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1727 	port_id errorPort, uint32 errorToken)
1728 {
1729 	char** flatArgs = _flatArgs;
1730 	thread_id thread;
1731 	status_t status;
1732 	struct team_arg* teamArgs;
1733 	struct team_loading_info loadingInfo;
1734 	ConditionVariableEntry loadingWaitEntry;
1735 	io_context* parentIOContext = NULL;
1736 	team_id teamID;
1737 	bool teamLimitReached = false;
1738 
1739 	if (flatArgs == NULL || argCount == 0)
1740 		return B_BAD_VALUE;
1741 
1742 	const char* path = flatArgs[0];
1743 
1744 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1745 		"\n", path, flatArgs, argCount));
1746 
1747 	// cut the path from the main thread name
1748 	const char* threadName = strrchr(path, '/');
1749 	if (threadName != NULL)
1750 		threadName++;
1751 	else
1752 		threadName = path;
1753 
1754 	// create the main thread object
1755 	Thread* mainThread;
1756 	status = Thread::Create(threadName, mainThread);
1757 	if (status != B_OK)
1758 		return status;
1759 	BReference<Thread> mainThreadReference(mainThread, true);
1760 
1761 	// create team object
1762 	Team* team = Team::Create(mainThread->id, path, false);
1763 	if (team == NULL)
1764 		return B_NO_MEMORY;
1765 	BReference<Team> teamReference(team, true);
1766 
1767 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1768 		loadingInfo.condition.Init(team, "image load");
1769 		loadingInfo.condition.Add(&loadingWaitEntry);
1770 		loadingInfo.result = B_ERROR;
1771 		team->loading_info = &loadingInfo;
1772 	}
1773 
1774 	// get the parent team
1775 	Team* parent = Team::Get(parentID);
1776 	if (parent == NULL)
1777 		return B_BAD_TEAM_ID;
1778 	BReference<Team> parentReference(parent, true);
1779 
1780 	parent->LockTeamAndProcessGroup();
1781 	team->Lock();
1782 
1783 	// inherit the parent's user/group
1784 	inherit_parent_user_and_group(team, parent);
1785 
1786 	// get a reference to the parent's I/O context -- we need it to create ours
1787 	parentIOContext = parent->io_context;
1788 	vfs_get_io_context(parentIOContext);
1789 
1790 	team->Unlock();
1791 	parent->UnlockTeamAndProcessGroup();
1792 
1793 	// check the executable's set-user/group-id permission
1794 	update_set_id_user_and_group(team, path);
1795 
1796 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1797 		envCount, (mode_t)-1, errorPort, errorToken);
1798 	if (status != B_OK)
1799 		goto err1;
1800 
1801 	_flatArgs = NULL;
1802 		// args are owned by the team_arg structure now
1803 
1804 	// create a new io_context for this team
1805 	team->io_context = vfs_new_io_context(parentIOContext, true);
1806 	if (!team->io_context) {
1807 		status = B_NO_MEMORY;
1808 		goto err2;
1809 	}
1810 
1811 	// We don't need the parent's I/O context any longer.
1812 	vfs_put_io_context(parentIOContext);
1813 	parentIOContext = NULL;
1814 
1815 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1816 	vfs_exec_io_context(team->io_context);
1817 
1818 	// create an address space for this team
1819 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1820 		&team->address_space);
1821 	if (status != B_OK)
1822 		goto err2;
1823 
1824 	team->address_space->SetRandomizingEnabled(
1825 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1826 
1827 	// create the user data area
1828 	status = create_team_user_data(team);
1829 	if (status != B_OK)
1830 		goto err4;
1831 
1832 	// insert the team into its parent and the teams hash
1833 	parent->LockTeamAndProcessGroup();
1834 	team->Lock();
1835 
1836 	{
1837 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1838 
1839 		sTeamHash.Insert(team);
1840 		teamLimitReached = sUsedTeams >= sMaxTeams;
1841 		if (!teamLimitReached)
1842 			sUsedTeams++;
1843 	}
1844 
1845 	insert_team_into_parent(parent, team);
1846 	insert_team_into_group(parent->group, team);
1847 
1848 	team->Unlock();
1849 	parent->UnlockTeamAndProcessGroup();
1850 
1851 	// notify team listeners
1852 	sNotificationService.Notify(TEAM_ADDED, team);
1853 
1854 	if (teamLimitReached) {
1855 		status = B_NO_MORE_TEAMS;
1856 		goto err6;
1857 	}
1858 
1859 	// In case we start the main thread, we shouldn't access the team object
1860 	// afterwards, so cache the team's ID.
1861 	teamID = team->id;
1862 
1863 	// Create a kernel thread, but under the context of the new team
1864 	// The new thread will take over ownership of teamArgs.
1865 	{
1866 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1867 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1868 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1869 			+ teamArgs->flat_args_size;
1870 		thread = thread_create_thread(threadAttributes, false);
1871 		if (thread < 0) {
1872 			status = thread;
1873 			goto err6;
1874 		}
1875 	}
1876 
1877 	// The team has been created successfully, so we keep the reference. Or
1878 	// more precisely: It's owned by the team's main thread, now.
1879 	teamReference.Detach();
1880 
1881 	// wait for the loader of the new team to finish its work
1882 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1883 		if (mainThread != NULL) {
1884 			// resume the team's main thread
1885 			thread_continue(mainThread);
1886 		}
1887 
1888 		// Now wait until loading is finished. We will be woken either by the
1889 		// thread, when it finished or aborted loading, or when the team is
1890 		// going to die (e.g. is killed). In either case the one notifying is
1891 		// responsible for unsetting `loading_info` in the team structure.
1892 		loadingWaitEntry.Wait();
1893 
1894 		if (loadingInfo.result < B_OK)
1895 			return loadingInfo.result;
1896 	}
1897 
1898 	// notify the debugger
1899 	user_debug_team_created(teamID);
1900 
1901 	return thread;
1902 
1903 err6:
1904 	// Remove the team structure from the process group, the parent team, and
1905 	// the team hash table and delete the team structure.
1906 	parent->LockTeamAndProcessGroup();
1907 	team->Lock();
1908 
1909 	remove_team_from_group(team);
1910 	remove_team_from_parent(team->parent, team);
1911 
1912 	team->Unlock();
1913 	parent->UnlockTeamAndProcessGroup();
1914 
1915 	{
1916 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1917 		sTeamHash.Remove(team);
1918 		if (!teamLimitReached)
1919 			sUsedTeams--;
1920 	}
1921 
1922 	sNotificationService.Notify(TEAM_REMOVED, team);
1923 
1924 	delete_team_user_data(team);
1925 err4:
1926 	team->address_space->Put();
1927 err2:
1928 	free_team_arg(teamArgs);
1929 err1:
1930 	if (parentIOContext != NULL)
1931 		vfs_put_io_context(parentIOContext);
1932 
1933 	return status;
1934 }
1935 
1936 
1937 /*!	Almost shuts down the current team and loads a new image into it.
1938 	If successful, this function does not return and will takeover ownership of
1939 	the arguments provided.
1940 	This function may only be called in a userland team (caused by one of the
1941 	exec*() syscalls).
1942 */
1943 static status_t
1944 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1945 	int32 argCount, int32 envCount, mode_t umask)
1946 {
1947 	// NOTE: Since this function normally doesn't return, don't use automatic
1948 	// variables that need destruction in the function scope.
1949 	char** flatArgs = _flatArgs;
1950 	Team* team = thread_get_current_thread()->team;
1951 	struct team_arg* teamArgs;
1952 	const char* threadName;
1953 	thread_id nubThreadID = -1;
1954 
1955 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1956 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1957 		team->id));
1958 
1959 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1960 
1961 	// switching the kernel at run time is probably not a good idea :)
1962 	if (team == team_get_kernel_team())
1963 		return B_NOT_ALLOWED;
1964 
1965 	// we currently need to be single threaded here
1966 	// TODO: maybe we should just kill all other threads and
1967 	//	make the current thread the team's main thread?
1968 	Thread* currentThread = thread_get_current_thread();
1969 	if (currentThread != team->main_thread)
1970 		return B_NOT_ALLOWED;
1971 
1972 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1973 	// We iterate through the thread list to make sure that there's no other
1974 	// thread.
1975 	TeamLocker teamLocker(team);
1976 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1977 
1978 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1979 		nubThreadID = team->debug_info.nub_thread;
1980 
1981 	debugInfoLocker.Unlock();
1982 
1983 	for (Thread* thread = team->thread_list; thread != NULL;
1984 			thread = thread->team_next) {
1985 		if (thread != team->main_thread && thread->id != nubThreadID)
1986 			return B_NOT_ALLOWED;
1987 	}
1988 
1989 	team->DeleteUserTimers(true);
1990 	team->ResetSignalsOnExec();
1991 
1992 	teamLocker.Unlock();
1993 
1994 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1995 		argCount, envCount, umask, -1, 0);
1996 	if (status != B_OK)
1997 		return status;
1998 
1999 	_flatArgs = NULL;
2000 		// args are owned by the team_arg structure now
2001 
2002 	// TODO: remove team resources if there are any left
2003 	// thread_atkernel_exit() might not be called at all
2004 
2005 	thread_reset_for_exec();
2006 
2007 	user_debug_prepare_for_exec();
2008 
2009 	delete_team_user_data(team);
2010 	vm_delete_areas(team->address_space, false);
2011 	xsi_sem_undo(team);
2012 	delete_owned_ports(team);
2013 	sem_delete_owned_sems(team);
2014 	remove_images(team);
2015 	vfs_exec_io_context(team->io_context);
2016 	delete_realtime_sem_context(team->realtime_sem_context);
2017 	team->realtime_sem_context = NULL;
2018 
2019 	// update ASLR
2020 	team->address_space->SetRandomizingEnabled(
2021 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2022 
2023 	status = create_team_user_data(team);
2024 	if (status != B_OK) {
2025 		// creating the user data failed -- we're toast
2026 		free_team_arg(teamArgs);
2027 		exit_thread(status);
2028 		return status;
2029 	}
2030 
2031 	user_debug_finish_after_exec();
2032 
2033 	// rename the team
2034 
2035 	team->Lock();
2036 	team->SetName(path);
2037 	team->Unlock();
2038 
2039 	// cut the path from the team name and rename the main thread, too
2040 	threadName = strrchr(path, '/');
2041 	if (threadName != NULL)
2042 		threadName++;
2043 	else
2044 		threadName = path;
2045 	rename_thread(thread_get_current_thread_id(), threadName);
2046 
2047 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2048 
2049 	// Update user/group according to the executable's set-user/group-id
2050 	// permission.
2051 	update_set_id_user_and_group(team, path);
2052 
2053 	user_debug_team_exec();
2054 
2055 	// notify team listeners
2056 	sNotificationService.Notify(TEAM_EXEC, team);
2057 
2058 	// get a user thread for the thread
2059 	user_thread* userThread = team_allocate_user_thread(team);
2060 		// cannot fail (the allocation for the team would have failed already)
2061 	ThreadLocker currentThreadLocker(currentThread);
2062 	currentThread->user_thread = userThread;
2063 	currentThreadLocker.Unlock();
2064 
2065 	// create the user stack for the thread
2066 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2067 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2068 	if (status == B_OK) {
2069 		// prepare the stack, load the runtime loader, and enter userspace
2070 		team_create_thread_start(teamArgs);
2071 			// does never return
2072 	} else
2073 		free_team_arg(teamArgs);
2074 
2075 	// Sorry, we have to kill ourselves, there is no way out anymore
2076 	// (without any areas left and all that).
2077 	exit_thread(status);
2078 
2079 	// We return a status here since the signal that is sent by the
2080 	// call above is not immediately handled.
2081 	return B_ERROR;
2082 }
2083 
2084 
2085 static thread_id
2086 fork_team(void)
2087 {
2088 	Thread* parentThread = thread_get_current_thread();
2089 	Team* parentTeam = parentThread->team;
2090 	Team* team;
2091 	arch_fork_arg* forkArgs;
2092 	struct area_info info;
2093 	thread_id threadID;
2094 	status_t status;
2095 	ssize_t areaCookie;
2096 	bool teamLimitReached = false;
2097 
2098 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2099 
2100 	if (parentTeam == team_get_kernel_team())
2101 		return B_NOT_ALLOWED;
2102 
2103 	// create a new team
2104 	// TODO: this is very similar to load_image_internal() - maybe we can do
2105 	// something about it :)
2106 
2107 	// create the main thread object
2108 	Thread* thread;
2109 	status = Thread::Create(parentThread->name, thread);
2110 	if (status != B_OK)
2111 		return status;
2112 	BReference<Thread> threadReference(thread, true);
2113 
2114 	// create the team object
2115 	team = Team::Create(thread->id, NULL, false);
2116 	if (team == NULL)
2117 		return B_NO_MEMORY;
2118 
2119 	parentTeam->LockTeamAndProcessGroup();
2120 	team->Lock();
2121 
2122 	team->SetName(parentTeam->Name());
2123 	team->SetArgs(parentTeam->Args());
2124 
2125 	team->commpage_address = parentTeam->commpage_address;
2126 
2127 	// Inherit the parent's user/group.
2128 	inherit_parent_user_and_group(team, parentTeam);
2129 
2130 	// inherit signal handlers
2131 	team->InheritSignalActions(parentTeam);
2132 
2133 	team->Unlock();
2134 	parentTeam->UnlockTeamAndProcessGroup();
2135 
2136 	// inherit some team debug flags
2137 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2138 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2139 
2140 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2141 	if (forkArgs == NULL) {
2142 		status = B_NO_MEMORY;
2143 		goto err1;
2144 	}
2145 
2146 	// create a new io_context for this team
2147 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2148 	if (!team->io_context) {
2149 		status = B_NO_MEMORY;
2150 		goto err2;
2151 	}
2152 
2153 	// duplicate the realtime sem context
2154 	if (parentTeam->realtime_sem_context) {
2155 		team->realtime_sem_context = clone_realtime_sem_context(
2156 			parentTeam->realtime_sem_context);
2157 		if (team->realtime_sem_context == NULL) {
2158 			status = B_NO_MEMORY;
2159 			goto err2;
2160 		}
2161 	}
2162 
2163 	// create an address space for this team
2164 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2165 		&team->address_space);
2166 	if (status < B_OK)
2167 		goto err3;
2168 
2169 	// copy all areas of the team
2170 	// TODO: should be able to handle stack areas differently (ie. don't have
2171 	// them copy-on-write)
2172 
2173 	areaCookie = 0;
2174 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2175 		if (info.area == parentTeam->user_data_area) {
2176 			// don't clone the user area; just create a new one
2177 			status = create_team_user_data(team, info.address);
2178 			if (status != B_OK)
2179 				break;
2180 
2181 			thread->user_thread = team_allocate_user_thread(team);
2182 		} else {
2183 			void* address;
2184 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2185 				&address, B_CLONE_ADDRESS, info.area);
2186 			if (area < B_OK) {
2187 				status = area;
2188 				break;
2189 			}
2190 
2191 			if (info.area == parentThread->user_stack_area)
2192 				thread->user_stack_area = area;
2193 		}
2194 	}
2195 
2196 	if (status < B_OK)
2197 		goto err4;
2198 
2199 	if (thread->user_thread == NULL) {
2200 #if KDEBUG
2201 		panic("user data area not found, parent area is %" B_PRId32,
2202 			parentTeam->user_data_area);
2203 #endif
2204 		status = B_ERROR;
2205 		goto err4;
2206 	}
2207 
2208 	thread->user_stack_base = parentThread->user_stack_base;
2209 	thread->user_stack_size = parentThread->user_stack_size;
2210 	thread->user_local_storage = parentThread->user_local_storage;
2211 	thread->sig_block_mask = parentThread->sig_block_mask;
2212 	thread->signal_stack_base = parentThread->signal_stack_base;
2213 	thread->signal_stack_size = parentThread->signal_stack_size;
2214 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2215 
2216 	arch_store_fork_frame(forkArgs);
2217 
2218 	// copy image list
2219 	if (copy_images(parentTeam->id, team) != B_OK)
2220 		goto err5;
2221 
2222 	// insert the team into its parent and the teams hash
2223 	parentTeam->LockTeamAndProcessGroup();
2224 	team->Lock();
2225 
2226 	{
2227 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2228 
2229 		sTeamHash.Insert(team);
2230 		teamLimitReached = sUsedTeams >= sMaxTeams;
2231 		if (!teamLimitReached)
2232 			sUsedTeams++;
2233 	}
2234 
2235 	insert_team_into_parent(parentTeam, team);
2236 	insert_team_into_group(parentTeam->group, team);
2237 
2238 	team->Unlock();
2239 	parentTeam->UnlockTeamAndProcessGroup();
2240 
2241 	// notify team listeners
2242 	sNotificationService.Notify(TEAM_ADDED, team);
2243 
2244 	if (teamLimitReached) {
2245 		status = B_NO_MORE_TEAMS;
2246 		goto err6;
2247 	}
2248 
2249 	// create the main thread
2250 	{
2251 		ThreadCreationAttributes threadCreationAttributes(NULL,
2252 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2253 		threadCreationAttributes.forkArgs = forkArgs;
2254 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2255 		threadID = thread_create_thread(threadCreationAttributes, false);
2256 		if (threadID < 0) {
2257 			status = threadID;
2258 			goto err6;
2259 		}
2260 	}
2261 
2262 	// notify the debugger
2263 	user_debug_team_created(team->id);
2264 
2265 	T(TeamForked(threadID));
2266 
2267 	resume_thread(threadID);
2268 	return threadID;
2269 
2270 err6:
2271 	// Remove the team structure from the process group, the parent team, and
2272 	// the team hash table and delete the team structure.
2273 	parentTeam->LockTeamAndProcessGroup();
2274 	team->Lock();
2275 
2276 	remove_team_from_group(team);
2277 	remove_team_from_parent(team->parent, team);
2278 
2279 	team->Unlock();
2280 	parentTeam->UnlockTeamAndProcessGroup();
2281 
2282 	{
2283 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2284 		sTeamHash.Remove(team);
2285 		if (!teamLimitReached)
2286 			sUsedTeams--;
2287 	}
2288 
2289 	sNotificationService.Notify(TEAM_REMOVED, team);
2290 err5:
2291 	remove_images(team);
2292 err4:
2293 	team->address_space->RemoveAndPut();
2294 err3:
2295 	delete_realtime_sem_context(team->realtime_sem_context);
2296 err2:
2297 	free(forkArgs);
2298 err1:
2299 	team->ReleaseReference();
2300 
2301 	return status;
2302 }
2303 
2304 
2305 /*!	Returns if the specified team \a parent has any children belonging to the
2306 	process group with the specified ID \a groupID.
2307 	The caller must hold \a parent's lock.
2308 */
2309 static bool
2310 has_children_in_group(Team* parent, pid_t groupID)
2311 {
2312 	for (Team* child = parent->children; child != NULL;
2313 			child = child->siblings_next) {
2314 		TeamLocker childLocker(child);
2315 		if (child->group_id == groupID)
2316 			return true;
2317 	}
2318 
2319 	return false;
2320 }
2321 
2322 
2323 /*!	Returns the first job control entry from \a children, which matches \a id.
2324 	\a id can be:
2325 	- \code > 0 \endcode: Matching an entry with that team ID.
2326 	- \code == -1 \endcode: Matching any entry.
2327 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2328 	\c 0 is an invalid value for \a id.
2329 
2330 	The caller must hold the lock of the team that \a children belongs to.
2331 
2332 	\param children The job control entry list to check.
2333 	\param id The match criterion.
2334 	\return The first matching entry or \c NULL, if none matches.
2335 */
2336 static job_control_entry*
2337 get_job_control_entry(team_job_control_children& children, pid_t id)
2338 {
2339 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2340 		 job_control_entry* entry = it.Next();) {
2341 
2342 		if (id > 0) {
2343 			if (entry->thread == id)
2344 				return entry;
2345 		} else if (id == -1) {
2346 			return entry;
2347 		} else {
2348 			pid_t processGroup
2349 				= (entry->team ? entry->team->group_id : entry->group_id);
2350 			if (processGroup == -id)
2351 				return entry;
2352 		}
2353 	}
2354 
2355 	return NULL;
2356 }
2357 
2358 
2359 /*!	Returns the first job control entry from one of team's dead, continued, or
2360 	stopped children which matches \a id.
2361 	\a id can be:
2362 	- \code > 0 \endcode: Matching an entry with that team ID.
2363 	- \code == -1 \endcode: Matching any entry.
2364 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2365 	\c 0 is an invalid value for \a id.
2366 
2367 	The caller must hold \a team's lock.
2368 
2369 	\param team The team whose dead, stopped, and continued child lists shall be
2370 		checked.
2371 	\param id The match criterion.
2372 	\param flags Specifies which children shall be considered. Dead children
2373 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2374 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2375 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2376 		\c WCONTINUED.
2377 	\return The first matching entry or \c NULL, if none matches.
2378 */
2379 static job_control_entry*
2380 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2381 {
2382 	job_control_entry* entry = NULL;
2383 
2384 	if ((flags & WEXITED) != 0)
2385 		entry = get_job_control_entry(team->dead_children, id);
2386 
2387 	if (entry == NULL && (flags & WCONTINUED) != 0)
2388 		entry = get_job_control_entry(team->continued_children, id);
2389 
2390 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2391 		entry = get_job_control_entry(team->stopped_children, id);
2392 
2393 	return entry;
2394 }
2395 
2396 
2397 job_control_entry::job_control_entry()
2398 	:
2399 	has_group_ref(false)
2400 {
2401 }
2402 
2403 
2404 job_control_entry::~job_control_entry()
2405 {
2406 	if (has_group_ref) {
2407 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2408 
2409 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2410 		if (group == NULL) {
2411 			panic("job_control_entry::~job_control_entry(): unknown group "
2412 				"ID: %" B_PRId32, group_id);
2413 			return;
2414 		}
2415 
2416 		groupHashLocker.Unlock();
2417 
2418 		group->ReleaseReference();
2419 	}
2420 }
2421 
2422 
2423 /*!	Invoked when the owning team is dying, initializing the entry according to
2424 	the dead state.
2425 
2426 	The caller must hold the owning team's lock and the scheduler lock.
2427 */
2428 void
2429 job_control_entry::InitDeadState()
2430 {
2431 	if (team != NULL) {
2432 		ASSERT(team->exit.initialized);
2433 
2434 		group_id = team->group_id;
2435 		team->group->AcquireReference();
2436 		has_group_ref = true;
2437 
2438 		thread = team->id;
2439 		status = team->exit.status;
2440 		reason = team->exit.reason;
2441 		signal = team->exit.signal;
2442 		signaling_user = team->exit.signaling_user;
2443 		user_time = team->dead_threads_user_time
2444 			+ team->dead_children.user_time;
2445 		kernel_time = team->dead_threads_kernel_time
2446 			+ team->dead_children.kernel_time;
2447 
2448 		team = NULL;
2449 	}
2450 }
2451 
2452 
2453 job_control_entry&
2454 job_control_entry::operator=(const job_control_entry& other)
2455 {
2456 	state = other.state;
2457 	thread = other.thread;
2458 	signal = other.signal;
2459 	has_group_ref = false;
2460 	signaling_user = other.signaling_user;
2461 	team = other.team;
2462 	group_id = other.group_id;
2463 	status = other.status;
2464 	reason = other.reason;
2465 	user_time = other.user_time;
2466 	kernel_time = other.kernel_time;
2467 
2468 	return *this;
2469 }
2470 
2471 
2472 /*! This is the kernel backend for waitid().
2473 */
2474 static thread_id
2475 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2476 	team_usage_info& _usage_info)
2477 {
2478 	Thread* thread = thread_get_current_thread();
2479 	Team* team = thread->team;
2480 	struct job_control_entry foundEntry;
2481 	struct job_control_entry* freeDeathEntry = NULL;
2482 	status_t status = B_OK;
2483 
2484 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2485 		child, flags));
2486 
2487 	T(WaitForChild(child, flags));
2488 
2489 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2490 		T(WaitForChildDone(B_BAD_VALUE));
2491 		return B_BAD_VALUE;
2492 	}
2493 
2494 	pid_t originalChild = child;
2495 
2496 	bool ignoreFoundEntries = false;
2497 	bool ignoreFoundEntriesChecked = false;
2498 
2499 	while (true) {
2500 		// lock the team
2501 		TeamLocker teamLocker(team);
2502 
2503 		// A 0 child argument means to wait for all children in the process
2504 		// group of the calling team.
2505 		child = originalChild == 0 ? -team->group_id : originalChild;
2506 
2507 		// check whether any condition holds
2508 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2509 
2510 		// If we don't have an entry yet, check whether there are any children
2511 		// complying to the process group specification at all.
2512 		if (entry == NULL) {
2513 			// No success yet -- check whether there are any children complying
2514 			// to the process group specification at all.
2515 			bool childrenExist = false;
2516 			if (child == -1) {
2517 				childrenExist = team->children != NULL;
2518 			} else if (child < -1) {
2519 				childrenExist = has_children_in_group(team, -child);
2520 			} else if (child != team->id) {
2521 				if (Team* childTeam = Team::Get(child)) {
2522 					BReference<Team> childTeamReference(childTeam, true);
2523 					TeamLocker childTeamLocker(childTeam);
2524 					childrenExist = childTeam->parent == team;
2525 				}
2526 			}
2527 
2528 			if (!childrenExist) {
2529 				// there is no child we could wait for
2530 				status = ECHILD;
2531 			} else {
2532 				// the children we're waiting for are still running
2533 				status = B_WOULD_BLOCK;
2534 			}
2535 		} else {
2536 			// got something
2537 			foundEntry = *entry;
2538 
2539 			// unless WNOWAIT has been specified, "consume" the wait state
2540 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2541 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2542 					// The child is dead. Reap its death entry.
2543 					freeDeathEntry = entry;
2544 					team->dead_children.entries.Remove(entry);
2545 					team->dead_children.count--;
2546 				} else {
2547 					// The child is well. Reset its job control state.
2548 					team_set_job_control_state(entry->team,
2549 						JOB_CONTROL_STATE_NONE, NULL);
2550 				}
2551 			}
2552 		}
2553 
2554 		// If we haven't got anything yet, prepare for waiting for the
2555 		// condition variable.
2556 		ConditionVariableEntry deadWaitEntry;
2557 
2558 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2559 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2560 
2561 		teamLocker.Unlock();
2562 
2563 		// we got our entry and can return to our caller
2564 		if (status == B_OK) {
2565 			if (ignoreFoundEntries) {
2566 				// ... unless we shall ignore found entries
2567 				delete freeDeathEntry;
2568 				freeDeathEntry = NULL;
2569 				continue;
2570 			}
2571 
2572 			break;
2573 		}
2574 
2575 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2576 			T(WaitForChildDone(status));
2577 			return status;
2578 		}
2579 
2580 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2581 		if (status == B_INTERRUPTED) {
2582 			T(WaitForChildDone(status));
2583 			return status;
2584 		}
2585 
2586 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2587 		// all our children are dead and fail with ECHILD. We check the
2588 		// condition at this point.
2589 		if (!ignoreFoundEntriesChecked) {
2590 			teamLocker.Lock();
2591 
2592 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2593 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2594 				|| handler.sa_handler == SIG_IGN) {
2595 				ignoreFoundEntries = true;
2596 			}
2597 
2598 			teamLocker.Unlock();
2599 
2600 			ignoreFoundEntriesChecked = true;
2601 		}
2602 	}
2603 
2604 	delete freeDeathEntry;
2605 
2606 	// When we got here, we have a valid death entry, and already got
2607 	// unregistered from the team or group. Fill in the returned info.
2608 	memset(&_info, 0, sizeof(_info));
2609 	_info.si_signo = SIGCHLD;
2610 	_info.si_pid = foundEntry.thread;
2611 	_info.si_uid = foundEntry.signaling_user;
2612 	// TODO: Fill in si_errno?
2613 
2614 	switch (foundEntry.state) {
2615 		case JOB_CONTROL_STATE_DEAD:
2616 			_info.si_code = foundEntry.reason;
2617 			_info.si_status = foundEntry.reason == CLD_EXITED
2618 				? foundEntry.status : foundEntry.signal;
2619 			_usage_info.user_time = foundEntry.user_time;
2620 			_usage_info.kernel_time = foundEntry.kernel_time;
2621 			break;
2622 		case JOB_CONTROL_STATE_STOPPED:
2623 			_info.si_code = CLD_STOPPED;
2624 			_info.si_status = foundEntry.signal;
2625 			break;
2626 		case JOB_CONTROL_STATE_CONTINUED:
2627 			_info.si_code = CLD_CONTINUED;
2628 			_info.si_status = 0;
2629 			break;
2630 		case JOB_CONTROL_STATE_NONE:
2631 			// can't happen
2632 			break;
2633 	}
2634 
2635 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2636 	// status is available.
2637 	TeamLocker teamLocker(team);
2638 	InterruptsSpinLocker signalLocker(team->signal_lock);
2639 	SpinLocker threadCreationLocker(gThreadCreationLock);
2640 
2641 	if (is_team_signal_blocked(team, SIGCHLD)) {
2642 		if (get_job_control_entry(team, child, flags) == NULL)
2643 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2644 	}
2645 
2646 	threadCreationLocker.Unlock();
2647 	signalLocker.Unlock();
2648 	teamLocker.Unlock();
2649 
2650 	// When the team is dead, the main thread continues to live in the kernel
2651 	// team for a very short time. To avoid surprises for the caller we rather
2652 	// wait until the thread is really gone.
2653 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2654 		wait_for_thread(foundEntry.thread, NULL);
2655 
2656 	T(WaitForChildDone(foundEntry));
2657 
2658 	return foundEntry.thread;
2659 }
2660 
2661 
2662 /*! Fills the team_info structure with information from the specified team.
2663 	Interrupts must be enabled. The team must not be locked.
2664 */
2665 static status_t
2666 fill_team_info(Team* team, team_info* info, size_t size)
2667 {
2668 	if (size != sizeof(team_info))
2669 		return B_BAD_VALUE;
2670 
2671 	// TODO: Set more informations for team_info
2672 	memset(info, 0, size);
2673 
2674 	info->team = team->id;
2675 		// immutable
2676 	info->image_count = count_images(team);
2677 		// protected by sImageMutex
2678 
2679 	TeamLocker teamLocker(team);
2680 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2681 
2682 	info->thread_count = team->num_threads;
2683 	//info->area_count =
2684 	info->debugger_nub_thread = team->debug_info.nub_thread;
2685 	info->debugger_nub_port = team->debug_info.nub_port;
2686 	info->uid = team->effective_uid;
2687 	info->gid = team->effective_gid;
2688 
2689 	strlcpy(info->args, team->Args(), sizeof(info->args));
2690 	info->argc = 1;
2691 
2692 	return B_OK;
2693 }
2694 
2695 
2696 /*!	Returns whether the process group contains stopped processes.
2697 	The caller must hold the process group's lock.
2698 */
2699 static bool
2700 process_group_has_stopped_processes(ProcessGroup* group)
2701 {
2702 	Team* team = group->teams;
2703 	while (team != NULL) {
2704 		// the parent team's lock guards the job control entry -- acquire it
2705 		team->LockTeamAndParent(false);
2706 
2707 		if (team->job_control_entry != NULL
2708 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2709 			team->UnlockTeamAndParent();
2710 			return true;
2711 		}
2712 
2713 		team->UnlockTeamAndParent();
2714 
2715 		team = team->group_next;
2716 	}
2717 
2718 	return false;
2719 }
2720 
2721 
2722 /*!	Iterates through all process groups queued in team_remove_team() and signals
2723 	those that are orphaned and have stopped processes.
2724 	The caller must not hold any team or process group locks.
2725 */
2726 static void
2727 orphaned_process_group_check()
2728 {
2729 	// process as long as there are groups in the list
2730 	while (true) {
2731 		// remove the head from the list
2732 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2733 
2734 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2735 		if (group == NULL)
2736 			return;
2737 
2738 		group->UnsetOrphanedCheck();
2739 		BReference<ProcessGroup> groupReference(group);
2740 
2741 		orphanedCheckLocker.Unlock();
2742 
2743 		AutoLocker<ProcessGroup> groupLocker(group);
2744 
2745 		// If the group is orphaned and contains stopped processes, we're
2746 		// supposed to send SIGHUP + SIGCONT.
2747 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2748 			Thread* currentThread = thread_get_current_thread();
2749 
2750 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2751 			send_signal_to_process_group_locked(group, signal, 0);
2752 
2753 			signal.SetNumber(SIGCONT);
2754 			send_signal_to_process_group_locked(group, signal, 0);
2755 		}
2756 	}
2757 }
2758 
2759 
2760 static status_t
2761 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2762 	uint32 flags)
2763 {
2764 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2765 		return B_BAD_VALUE;
2766 
2767 	// get the team
2768 	Team* team = Team::GetAndLock(id);
2769 	if (team == NULL)
2770 		return B_BAD_TEAM_ID;
2771 	BReference<Team> teamReference(team, true);
2772 	TeamLocker teamLocker(team, true);
2773 
2774 	if ((flags & B_CHECK_PERMISSION) != 0) {
2775 		uid_t uid = geteuid();
2776 		if (uid != 0 && uid != team->effective_uid)
2777 			return B_NOT_ALLOWED;
2778 	}
2779 
2780 	bigtime_t kernelTime = 0;
2781 	bigtime_t userTime = 0;
2782 
2783 	switch (who) {
2784 		case B_TEAM_USAGE_SELF:
2785 		{
2786 			Thread* thread = team->thread_list;
2787 
2788 			for (; thread != NULL; thread = thread->team_next) {
2789 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2790 				kernelTime += thread->kernel_time;
2791 				userTime += thread->user_time;
2792 			}
2793 
2794 			kernelTime += team->dead_threads_kernel_time;
2795 			userTime += team->dead_threads_user_time;
2796 			break;
2797 		}
2798 
2799 		case B_TEAM_USAGE_CHILDREN:
2800 		{
2801 			Team* child = team->children;
2802 			for (; child != NULL; child = child->siblings_next) {
2803 				TeamLocker childLocker(child);
2804 
2805 				Thread* thread = team->thread_list;
2806 
2807 				for (; thread != NULL; thread = thread->team_next) {
2808 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2809 					kernelTime += thread->kernel_time;
2810 					userTime += thread->user_time;
2811 				}
2812 
2813 				kernelTime += child->dead_threads_kernel_time;
2814 				userTime += child->dead_threads_user_time;
2815 			}
2816 
2817 			kernelTime += team->dead_children.kernel_time;
2818 			userTime += team->dead_children.user_time;
2819 			break;
2820 		}
2821 	}
2822 
2823 	info->kernel_time = kernelTime;
2824 	info->user_time = userTime;
2825 
2826 	return B_OK;
2827 }
2828 
2829 
2830 //	#pragma mark - Private kernel API
2831 
2832 
2833 status_t
2834 team_init(kernel_args* args)
2835 {
2836 	// create the team hash table
2837 	new(&sTeamHash) TeamTable;
2838 	if (sTeamHash.Init(64) != B_OK)
2839 		panic("Failed to init team hash table!");
2840 
2841 	new(&sGroupHash) ProcessGroupHashTable;
2842 	if (sGroupHash.Init() != B_OK)
2843 		panic("Failed to init process group hash table!");
2844 
2845 	// create initial session and process groups
2846 
2847 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2848 	if (session == NULL)
2849 		panic("Could not create initial session.\n");
2850 	BReference<ProcessSession> sessionReference(session, true);
2851 
2852 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2853 	if (group == NULL)
2854 		panic("Could not create initial process group.\n");
2855 	BReference<ProcessGroup> groupReference(group, true);
2856 
2857 	group->Publish(session);
2858 
2859 	// create the kernel team
2860 	sKernelTeam = Team::Create(1, "kernel_team", true);
2861 	if (sKernelTeam == NULL)
2862 		panic("could not create kernel team!\n");
2863 
2864 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2865 	sKernelTeam->SetArgs(sKernelTeam->Name());
2866 	sKernelTeam->state = TEAM_STATE_NORMAL;
2867 
2868 	sKernelTeam->saved_set_uid = 0;
2869 	sKernelTeam->real_uid = 0;
2870 	sKernelTeam->effective_uid = 0;
2871 	sKernelTeam->saved_set_gid = 0;
2872 	sKernelTeam->real_gid = 0;
2873 	sKernelTeam->effective_gid = 0;
2874 	sKernelTeam->supplementary_groups = NULL;
2875 	sKernelTeam->supplementary_group_count = 0;
2876 
2877 	insert_team_into_group(group, sKernelTeam);
2878 
2879 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2880 	if (sKernelTeam->io_context == NULL)
2881 		panic("could not create io_context for kernel team!\n");
2882 
2883 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2884 		dprintf("Failed to resize FD table for kernel team!\n");
2885 
2886 	// stick it in the team hash
2887 	sTeamHash.Insert(sKernelTeam);
2888 
2889 	// check safe mode settings
2890 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2891 		false);
2892 
2893 	add_debugger_command_etc("team", &dump_team_info,
2894 		"Dump info about a particular team",
2895 		"[ <id> | <address> | <name> ]\n"
2896 		"Prints information about the specified team. If no argument is given\n"
2897 		"the current team is selected.\n"
2898 		"  <id>       - The ID of the team.\n"
2899 		"  <address>  - The address of the team structure.\n"
2900 		"  <name>     - The team's name.\n", 0);
2901 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2902 		"\n"
2903 		"Prints a list of all existing teams.\n", 0);
2904 
2905 	new(&sNotificationService) TeamNotificationService();
2906 
2907 	sNotificationService.Register();
2908 
2909 	return B_OK;
2910 }
2911 
2912 
2913 int32
2914 team_max_teams(void)
2915 {
2916 	return sMaxTeams;
2917 }
2918 
2919 
2920 int32
2921 team_used_teams(void)
2922 {
2923 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2924 	return sUsedTeams;
2925 }
2926 
2927 
2928 /*! Returns a death entry of a child team specified by ID (if any).
2929 	The caller must hold the team's lock.
2930 
2931 	\param team The team whose dead children list to check.
2932 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2933 	\param _deleteEntry Return variable, indicating whether the caller needs to
2934 		delete the returned entry.
2935 	\return The death entry of the matching team, or \c NULL, if no death entry
2936 		for the team was found.
2937 */
2938 job_control_entry*
2939 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2940 {
2941 	if (child <= 0)
2942 		return NULL;
2943 
2944 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2945 		child);
2946 	if (entry) {
2947 		// remove the entry only, if the caller is the parent of the found team
2948 		if (team_get_current_team_id() == entry->thread) {
2949 			team->dead_children.entries.Remove(entry);
2950 			team->dead_children.count--;
2951 			*_deleteEntry = true;
2952 		} else {
2953 			*_deleteEntry = false;
2954 		}
2955 	}
2956 
2957 	return entry;
2958 }
2959 
2960 
2961 /*! Quick check to see if we have a valid team ID. */
2962 bool
2963 team_is_valid(team_id id)
2964 {
2965 	if (id <= 0)
2966 		return false;
2967 
2968 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2969 	return team_get_team_struct_locked(id) != NULL;
2970 }
2971 
2972 
2973 Team*
2974 team_get_team_struct_locked(team_id id)
2975 {
2976 	return sTeamHash.Lookup(id);
2977 }
2978 
2979 
2980 void
2981 team_set_controlling_tty(int32 ttyIndex)
2982 {
2983 	// lock the team, so its session won't change while we're playing with it
2984 	Team* team = thread_get_current_thread()->team;
2985 	TeamLocker teamLocker(team);
2986 
2987 	// get and lock the session
2988 	ProcessSession* session = team->group->Session();
2989 	AutoLocker<ProcessSession> sessionLocker(session);
2990 
2991 	// set the session's fields
2992 	session->controlling_tty = ttyIndex;
2993 	session->foreground_group = -1;
2994 }
2995 
2996 
2997 int32
2998 team_get_controlling_tty()
2999 {
3000 	// lock the team, so its session won't change while we're playing with it
3001 	Team* team = thread_get_current_thread()->team;
3002 	TeamLocker teamLocker(team);
3003 
3004 	// get and lock the session
3005 	ProcessSession* session = team->group->Session();
3006 	AutoLocker<ProcessSession> sessionLocker(session);
3007 
3008 	// get the session's field
3009 	return session->controlling_tty;
3010 }
3011 
3012 
3013 status_t
3014 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3015 {
3016 	// lock the team, so its session won't change while we're playing with it
3017 	Thread* thread = thread_get_current_thread();
3018 	Team* team = thread->team;
3019 	TeamLocker teamLocker(team);
3020 
3021 	// get and lock the session
3022 	ProcessSession* session = team->group->Session();
3023 	AutoLocker<ProcessSession> sessionLocker(session);
3024 
3025 	// check given TTY -- must be the controlling tty of the calling process
3026 	if (session->controlling_tty != ttyIndex)
3027 		return ENOTTY;
3028 
3029 	// check given process group -- must belong to our session
3030 	{
3031 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3032 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3033 		if (group == NULL || group->Session() != session)
3034 			return B_BAD_VALUE;
3035 	}
3036 
3037 	// If we are a background group, we can do that unharmed only when we
3038 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3039 	if (session->foreground_group != -1
3040 		&& session->foreground_group != team->group_id
3041 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3042 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3043 		InterruptsSpinLocker signalLocker(team->signal_lock);
3044 
3045 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3046 			pid_t groupID = team->group_id;
3047 
3048 			signalLocker.Unlock();
3049 			sessionLocker.Unlock();
3050 			teamLocker.Unlock();
3051 
3052 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3053 			send_signal_to_process_group(groupID, signal, 0);
3054 			return B_INTERRUPTED;
3055 		}
3056 	}
3057 
3058 	session->foreground_group = processGroupID;
3059 
3060 	return B_OK;
3061 }
3062 
3063 
3064 uid_t
3065 team_geteuid(team_id id)
3066 {
3067 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3068 	Team* team = team_get_team_struct_locked(id);
3069 	if (team == NULL)
3070 		return (uid_t)-1;
3071 	return team->effective_uid;
3072 }
3073 
3074 
3075 /*!	Removes the specified team from the global team hash, from its process
3076 	group, and from its parent.
3077 	It also moves all of its children to the kernel team.
3078 
3079 	The caller must hold the following locks:
3080 	- \a team's process group's lock,
3081 	- the kernel team's lock,
3082 	- \a team's parent team's lock (might be the kernel team), and
3083 	- \a team's lock.
3084 */
3085 void
3086 team_remove_team(Team* team, pid_t& _signalGroup)
3087 {
3088 	Team* parent = team->parent;
3089 
3090 	// remember how long this team lasted
3091 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3092 		+ team->dead_children.kernel_time;
3093 	parent->dead_children.user_time += team->dead_threads_user_time
3094 		+ team->dead_children.user_time;
3095 
3096 	// remove the team from the hash table
3097 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3098 	sTeamHash.Remove(team);
3099 	sUsedTeams--;
3100 	teamsLocker.Unlock();
3101 
3102 	// The team can no longer be accessed by ID. Navigation to it is still
3103 	// possible from its process group and its parent and children, but that
3104 	// will be rectified shortly.
3105 	team->state = TEAM_STATE_DEATH;
3106 
3107 	// If we're a controlling process (i.e. a session leader with controlling
3108 	// terminal), there's a bit of signalling we have to do. We can't do any of
3109 	// the signaling here due to the bunch of locks we're holding, but we need
3110 	// to determine, whom to signal.
3111 	_signalGroup = -1;
3112 	bool isSessionLeader = false;
3113 	if (team->session_id == team->id
3114 		&& team->group->Session()->controlling_tty >= 0) {
3115 		isSessionLeader = true;
3116 
3117 		ProcessSession* session = team->group->Session();
3118 
3119 		AutoLocker<ProcessSession> sessionLocker(session);
3120 
3121 		session->controlling_tty = -1;
3122 		_signalGroup = session->foreground_group;
3123 	}
3124 
3125 	// remove us from our process group
3126 	remove_team_from_group(team);
3127 
3128 	// move the team's children to the kernel team
3129 	while (Team* child = team->children) {
3130 		// remove the child from the current team and add it to the kernel team
3131 		TeamLocker childLocker(child);
3132 
3133 		remove_team_from_parent(team, child);
3134 		insert_team_into_parent(sKernelTeam, child);
3135 
3136 		// move job control entries too
3137 		sKernelTeam->stopped_children.entries.MoveFrom(
3138 			&team->stopped_children.entries);
3139 		sKernelTeam->continued_children.entries.MoveFrom(
3140 			&team->continued_children.entries);
3141 
3142 		// If the team was a session leader with controlling terminal,
3143 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3144 		// groups with stopped processes. Due to locking complications we can't
3145 		// do that here, so we only check whether we were a reason for the
3146 		// child's process group not being an orphan and, if so, schedule a
3147 		// later check (cf. orphaned_process_group_check()).
3148 		if (isSessionLeader) {
3149 			ProcessGroup* childGroup = child->group;
3150 			if (childGroup->Session()->id == team->session_id
3151 				&& childGroup->id != team->group_id) {
3152 				childGroup->ScheduleOrphanedCheck();
3153 			}
3154 		}
3155 
3156 		// Note, we don't move the dead children entries. Those will be deleted
3157 		// when the team structure is deleted.
3158 	}
3159 
3160 	// remove us from our parent
3161 	remove_team_from_parent(parent, team);
3162 }
3163 
3164 
3165 /*!	Kills all threads but the main thread of the team and shuts down user
3166 	debugging for it.
3167 	To be called on exit of the team's main thread. No locks must be held.
3168 
3169 	\param team The team in question.
3170 	\return The port of the debugger for the team, -1 if none. To be passed to
3171 		team_delete_team().
3172 */
3173 port_id
3174 team_shutdown_team(Team* team)
3175 {
3176 	ASSERT(thread_get_current_thread() == team->main_thread);
3177 
3178 	TeamLocker teamLocker(team);
3179 
3180 	// Make sure debugging changes won't happen anymore.
3181 	port_id debuggerPort = -1;
3182 	while (true) {
3183 		// If a debugger change is in progress for the team, we'll have to
3184 		// wait until it is done.
3185 		ConditionVariableEntry waitForDebuggerEntry;
3186 		bool waitForDebugger = false;
3187 
3188 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3189 
3190 		if (team->debug_info.debugger_changed_condition != NULL) {
3191 			team->debug_info.debugger_changed_condition->Add(
3192 				&waitForDebuggerEntry);
3193 			waitForDebugger = true;
3194 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3195 			// The team is being debugged. That will stop with the termination
3196 			// of the nub thread. Since we set the team state to death, no one
3197 			// can install a debugger anymore. We fetch the debugger's port to
3198 			// send it a message at the bitter end.
3199 			debuggerPort = team->debug_info.debugger_port;
3200 		}
3201 
3202 		debugInfoLocker.Unlock();
3203 
3204 		if (!waitForDebugger)
3205 			break;
3206 
3207 		// wait for the debugger change to be finished
3208 		teamLocker.Unlock();
3209 
3210 		waitForDebuggerEntry.Wait();
3211 
3212 		teamLocker.Lock();
3213 	}
3214 
3215 	// Mark the team as shutting down. That will prevent new threads from being
3216 	// created and debugger changes from taking place.
3217 	team->state = TEAM_STATE_SHUTDOWN;
3218 
3219 	// delete all timers
3220 	team->DeleteUserTimers(false);
3221 
3222 	// deactivate CPU time user timers for the team
3223 	InterruptsSpinLocker timeLocker(team->time_lock);
3224 
3225 	if (team->HasActiveCPUTimeUserTimers())
3226 		team->DeactivateCPUTimeUserTimers();
3227 
3228 	timeLocker.Unlock();
3229 
3230 	// kill all threads but the main thread
3231 	team_death_entry deathEntry;
3232 	deathEntry.condition.Init(team, "team death");
3233 
3234 	while (true) {
3235 		team->death_entry = &deathEntry;
3236 		deathEntry.remaining_threads = 0;
3237 
3238 		Thread* thread = team->thread_list;
3239 		while (thread != NULL) {
3240 			if (thread != team->main_thread) {
3241 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3242 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3243 				deathEntry.remaining_threads++;
3244 			}
3245 
3246 			thread = thread->team_next;
3247 		}
3248 
3249 		if (deathEntry.remaining_threads == 0)
3250 			break;
3251 
3252 		// there are threads to wait for
3253 		ConditionVariableEntry entry;
3254 		deathEntry.condition.Add(&entry);
3255 
3256 		teamLocker.Unlock();
3257 
3258 		entry.Wait();
3259 
3260 		teamLocker.Lock();
3261 	}
3262 
3263 	team->death_entry = NULL;
3264 
3265 	return debuggerPort;
3266 }
3267 
3268 
3269 /*!	Called on team exit to notify threads waiting on the team and free most
3270 	resources associated with it.
3271 	The caller shouldn't hold any locks.
3272 */
3273 void
3274 team_delete_team(Team* team, port_id debuggerPort)
3275 {
3276 	// Not quite in our job description, but work that has been left by
3277 	// team_remove_team() and that can be done now that we're not holding any
3278 	// locks.
3279 	orphaned_process_group_check();
3280 
3281 	team_id teamID = team->id;
3282 
3283 	ASSERT(team->num_threads == 0);
3284 
3285 	// If someone is waiting for this team to be loaded, but it dies
3286 	// unexpectedly before being done, we need to notify the waiting
3287 	// thread now.
3288 
3289 	TeamLocker teamLocker(team);
3290 
3291 	if (team->loading_info) {
3292 		// there's indeed someone waiting
3293 		struct team_loading_info* loadingInfo = team->loading_info;
3294 		team->loading_info = NULL;
3295 
3296 		loadingInfo->result = B_ERROR;
3297 
3298 		// wake up the waiting thread
3299 		loadingInfo->condition.NotifyAll();
3300 	}
3301 
3302 	// notify team watchers
3303 
3304 	{
3305 		// we're not reachable from anyone anymore at this point, so we
3306 		// can safely access the list without any locking
3307 		struct team_watcher* watcher;
3308 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3309 				&team->watcher_list)) != NULL) {
3310 			watcher->hook(teamID, watcher->data);
3311 			free(watcher);
3312 		}
3313 	}
3314 
3315 	teamLocker.Unlock();
3316 
3317 	sNotificationService.Notify(TEAM_REMOVED, team);
3318 
3319 	// free team resources
3320 
3321 	delete_realtime_sem_context(team->realtime_sem_context);
3322 	xsi_sem_undo(team);
3323 	remove_images(team);
3324 	team->address_space->RemoveAndPut();
3325 
3326 	team->ReleaseReference();
3327 
3328 	// notify the debugger, that the team is gone
3329 	user_debug_team_deleted(teamID, debuggerPort);
3330 }
3331 
3332 
3333 Team*
3334 team_get_kernel_team(void)
3335 {
3336 	return sKernelTeam;
3337 }
3338 
3339 
3340 team_id
3341 team_get_kernel_team_id(void)
3342 {
3343 	if (!sKernelTeam)
3344 		return 0;
3345 
3346 	return sKernelTeam->id;
3347 }
3348 
3349 
3350 team_id
3351 team_get_current_team_id(void)
3352 {
3353 	return thread_get_current_thread()->team->id;
3354 }
3355 
3356 
3357 status_t
3358 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3359 {
3360 	if (id == sKernelTeam->id) {
3361 		// we're the kernel team, so we don't have to go through all
3362 		// the hassle (locking and hash lookup)
3363 		*_addressSpace = VMAddressSpace::GetKernel();
3364 		return B_OK;
3365 	}
3366 
3367 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3368 
3369 	Team* team = team_get_team_struct_locked(id);
3370 	if (team == NULL)
3371 		return B_BAD_VALUE;
3372 
3373 	team->address_space->Get();
3374 	*_addressSpace = team->address_space;
3375 	return B_OK;
3376 }
3377 
3378 
3379 /*!	Sets the team's job control state.
3380 	The caller must hold the parent team's lock. Interrupts are allowed to be
3381 	enabled or disabled.
3382 	\a team The team whose job control state shall be set.
3383 	\a newState The new state to be set.
3384 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3385 		the caller is responsible for filling in the following fields of the
3386 		entry before releasing the parent team's lock, unless the new state is
3387 		\c JOB_CONTROL_STATE_NONE:
3388 		- \c signal: The number of the signal causing the state change.
3389 		- \c signaling_user: The real UID of the user sending the signal.
3390 */
3391 void
3392 team_set_job_control_state(Team* team, job_control_state newState,
3393 	Signal* signal)
3394 {
3395 	if (team == NULL || team->job_control_entry == NULL)
3396 		return;
3397 
3398 	// don't touch anything, if the state stays the same or the team is already
3399 	// dead
3400 	job_control_entry* entry = team->job_control_entry;
3401 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3402 		return;
3403 
3404 	T(SetJobControlState(team->id, newState, signal));
3405 
3406 	// remove from the old list
3407 	switch (entry->state) {
3408 		case JOB_CONTROL_STATE_NONE:
3409 			// entry is in no list ATM
3410 			break;
3411 		case JOB_CONTROL_STATE_DEAD:
3412 			// can't get here
3413 			break;
3414 		case JOB_CONTROL_STATE_STOPPED:
3415 			team->parent->stopped_children.entries.Remove(entry);
3416 			break;
3417 		case JOB_CONTROL_STATE_CONTINUED:
3418 			team->parent->continued_children.entries.Remove(entry);
3419 			break;
3420 	}
3421 
3422 	entry->state = newState;
3423 
3424 	if (signal != NULL) {
3425 		entry->signal = signal->Number();
3426 		entry->signaling_user = signal->SendingUser();
3427 	}
3428 
3429 	// add to new list
3430 	team_job_control_children* childList = NULL;
3431 	switch (entry->state) {
3432 		case JOB_CONTROL_STATE_NONE:
3433 			// entry doesn't get into any list
3434 			break;
3435 		case JOB_CONTROL_STATE_DEAD:
3436 			childList = &team->parent->dead_children;
3437 			team->parent->dead_children.count++;
3438 			break;
3439 		case JOB_CONTROL_STATE_STOPPED:
3440 			childList = &team->parent->stopped_children;
3441 			break;
3442 		case JOB_CONTROL_STATE_CONTINUED:
3443 			childList = &team->parent->continued_children;
3444 			break;
3445 	}
3446 
3447 	if (childList != NULL) {
3448 		childList->entries.Add(entry);
3449 		team->parent->dead_children.condition_variable.NotifyAll();
3450 	}
3451 }
3452 
3453 
3454 /*!	Inits the given team's exit information, if not yet initialized, to some
3455 	generic "killed" status.
3456 	The caller must not hold the team's lock. Interrupts must be enabled.
3457 
3458 	\param team The team whose exit info shall be initialized.
3459 */
3460 void
3461 team_init_exit_info_on_error(Team* team)
3462 {
3463 	TeamLocker teamLocker(team);
3464 
3465 	if (!team->exit.initialized) {
3466 		team->exit.reason = CLD_KILLED;
3467 		team->exit.signal = SIGKILL;
3468 		team->exit.signaling_user = geteuid();
3469 		team->exit.status = 0;
3470 		team->exit.initialized = true;
3471 	}
3472 }
3473 
3474 
3475 /*! Adds a hook to the team that is called as soon as this team goes away.
3476 	This call might get public in the future.
3477 */
3478 status_t
3479 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3480 {
3481 	if (hook == NULL || teamID < B_OK)
3482 		return B_BAD_VALUE;
3483 
3484 	// create the watcher object
3485 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3486 	if (watcher == NULL)
3487 		return B_NO_MEMORY;
3488 
3489 	watcher->hook = hook;
3490 	watcher->data = data;
3491 
3492 	// add watcher, if the team isn't already dying
3493 	// get the team
3494 	Team* team = Team::GetAndLock(teamID);
3495 	if (team == NULL) {
3496 		free(watcher);
3497 		return B_BAD_TEAM_ID;
3498 	}
3499 
3500 	list_add_item(&team->watcher_list, watcher);
3501 
3502 	team->UnlockAndReleaseReference();
3503 
3504 	return B_OK;
3505 }
3506 
3507 
3508 status_t
3509 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3510 {
3511 	if (hook == NULL || teamID < 0)
3512 		return B_BAD_VALUE;
3513 
3514 	// get team and remove watcher (if present)
3515 	Team* team = Team::GetAndLock(teamID);
3516 	if (team == NULL)
3517 		return B_BAD_TEAM_ID;
3518 
3519 	// search for watcher
3520 	team_watcher* watcher = NULL;
3521 	while ((watcher = (team_watcher*)list_get_next_item(
3522 			&team->watcher_list, watcher)) != NULL) {
3523 		if (watcher->hook == hook && watcher->data == data) {
3524 			// got it!
3525 			list_remove_item(&team->watcher_list, watcher);
3526 			break;
3527 		}
3528 	}
3529 
3530 	team->UnlockAndReleaseReference();
3531 
3532 	if (watcher == NULL)
3533 		return B_ENTRY_NOT_FOUND;
3534 
3535 	free(watcher);
3536 	return B_OK;
3537 }
3538 
3539 
3540 /*!	Allocates a user_thread structure from the team.
3541 	The team lock must be held, unless the function is called for the team's
3542 	main thread. Interrupts must be enabled.
3543 */
3544 struct user_thread*
3545 team_allocate_user_thread(Team* team)
3546 {
3547 	if (team->user_data == 0)
3548 		return NULL;
3549 
3550 	// take an entry from the free list, if any
3551 	if (struct free_user_thread* entry = team->free_user_threads) {
3552 		user_thread* thread = entry->thread;
3553 		team->free_user_threads = entry->next;
3554 		free(entry);
3555 		return thread;
3556 	}
3557 
3558 	while (true) {
3559 		// enough space left?
3560 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3561 		if (team->user_data_size - team->used_user_data < needed) {
3562 			// try to resize the area
3563 			if (resize_area(team->user_data_area,
3564 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3565 				return NULL;
3566 			}
3567 
3568 			// resized user area successfully -- try to allocate the user_thread
3569 			// again
3570 			team->user_data_size += B_PAGE_SIZE;
3571 			continue;
3572 		}
3573 
3574 		// allocate the user_thread
3575 		user_thread* thread
3576 			= (user_thread*)(team->user_data + team->used_user_data);
3577 		team->used_user_data += needed;
3578 
3579 		return thread;
3580 	}
3581 }
3582 
3583 
3584 /*!	Frees the given user_thread structure.
3585 	The team's lock must not be held. Interrupts must be enabled.
3586 	\param team The team the user thread was allocated from.
3587 	\param userThread The user thread to free.
3588 */
3589 void
3590 team_free_user_thread(Team* team, struct user_thread* userThread)
3591 {
3592 	if (userThread == NULL)
3593 		return;
3594 
3595 	// create a free list entry
3596 	free_user_thread* entry
3597 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3598 	if (entry == NULL) {
3599 		// we have to leak the user thread :-/
3600 		return;
3601 	}
3602 
3603 	// add to free list
3604 	TeamLocker teamLocker(team);
3605 
3606 	entry->thread = userThread;
3607 	entry->next = team->free_user_threads;
3608 	team->free_user_threads = entry;
3609 }
3610 
3611 
3612 //	#pragma mark - Associated data interface
3613 
3614 
3615 AssociatedData::AssociatedData()
3616 	:
3617 	fOwner(NULL)
3618 {
3619 }
3620 
3621 
3622 AssociatedData::~AssociatedData()
3623 {
3624 }
3625 
3626 
3627 void
3628 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3629 {
3630 }
3631 
3632 
3633 AssociatedDataOwner::AssociatedDataOwner()
3634 {
3635 	mutex_init(&fLock, "associated data owner");
3636 }
3637 
3638 
3639 AssociatedDataOwner::~AssociatedDataOwner()
3640 {
3641 	mutex_destroy(&fLock);
3642 }
3643 
3644 
3645 bool
3646 AssociatedDataOwner::AddData(AssociatedData* data)
3647 {
3648 	MutexLocker locker(fLock);
3649 
3650 	if (data->Owner() != NULL)
3651 		return false;
3652 
3653 	data->AcquireReference();
3654 	fList.Add(data);
3655 	data->SetOwner(this);
3656 
3657 	return true;
3658 }
3659 
3660 
3661 bool
3662 AssociatedDataOwner::RemoveData(AssociatedData* data)
3663 {
3664 	MutexLocker locker(fLock);
3665 
3666 	if (data->Owner() != this)
3667 		return false;
3668 
3669 	data->SetOwner(NULL);
3670 	fList.Remove(data);
3671 
3672 	locker.Unlock();
3673 
3674 	data->ReleaseReference();
3675 
3676 	return true;
3677 }
3678 
3679 
3680 void
3681 AssociatedDataOwner::PrepareForDeletion()
3682 {
3683 	MutexLocker locker(fLock);
3684 
3685 	// move all data to a temporary list and unset the owner
3686 	DataList list;
3687 	list.MoveFrom(&fList);
3688 
3689 	for (DataList::Iterator it = list.GetIterator();
3690 		AssociatedData* data = it.Next();) {
3691 		data->SetOwner(NULL);
3692 	}
3693 
3694 	locker.Unlock();
3695 
3696 	// call the notification hooks and release our references
3697 	while (AssociatedData* data = list.RemoveHead()) {
3698 		data->OwnerDeleted(this);
3699 		data->ReleaseReference();
3700 	}
3701 }
3702 
3703 
3704 /*!	Associates data with the current team.
3705 	When the team is deleted, the data object is notified.
3706 	The team acquires a reference to the object.
3707 
3708 	\param data The data object.
3709 	\return \c true on success, \c false otherwise. Fails only when the supplied
3710 		data object is already associated with another owner.
3711 */
3712 bool
3713 team_associate_data(AssociatedData* data)
3714 {
3715 	return thread_get_current_thread()->team->AddData(data);
3716 }
3717 
3718 
3719 /*!	Dissociates data from the current team.
3720 	Balances an earlier call to team_associate_data().
3721 
3722 	\param data The data object.
3723 	\return \c true on success, \c false otherwise. Fails only when the data
3724 		object is not associated with the current team.
3725 */
3726 bool
3727 team_dissociate_data(AssociatedData* data)
3728 {
3729 	return thread_get_current_thread()->team->RemoveData(data);
3730 }
3731 
3732 
3733 //	#pragma mark - Public kernel API
3734 
3735 
3736 thread_id
3737 load_image(int32 argCount, const char** args, const char** env)
3738 {
3739 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3740 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3741 }
3742 
3743 
3744 thread_id
3745 load_image_etc(int32 argCount, const char* const* args,
3746 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3747 {
3748 	// we need to flatten the args and environment
3749 
3750 	if (args == NULL)
3751 		return B_BAD_VALUE;
3752 
3753 	// determine total needed size
3754 	int32 argSize = 0;
3755 	for (int32 i = 0; i < argCount; i++)
3756 		argSize += strlen(args[i]) + 1;
3757 
3758 	int32 envCount = 0;
3759 	int32 envSize = 0;
3760 	while (env != NULL && env[envCount] != NULL)
3761 		envSize += strlen(env[envCount++]) + 1;
3762 
3763 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3764 	if (size > MAX_PROCESS_ARGS_SIZE)
3765 		return B_TOO_MANY_ARGS;
3766 
3767 	// allocate space
3768 	char** flatArgs = (char**)malloc(size);
3769 	if (flatArgs == NULL)
3770 		return B_NO_MEMORY;
3771 
3772 	char** slot = flatArgs;
3773 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3774 
3775 	// copy arguments and environment
3776 	for (int32 i = 0; i < argCount; i++) {
3777 		int32 argSize = strlen(args[i]) + 1;
3778 		memcpy(stringSpace, args[i], argSize);
3779 		*slot++ = stringSpace;
3780 		stringSpace += argSize;
3781 	}
3782 
3783 	*slot++ = NULL;
3784 
3785 	for (int32 i = 0; i < envCount; i++) {
3786 		int32 envSize = strlen(env[i]) + 1;
3787 		memcpy(stringSpace, env[i], envSize);
3788 		*slot++ = stringSpace;
3789 		stringSpace += envSize;
3790 	}
3791 
3792 	*slot++ = NULL;
3793 
3794 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3795 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3796 
3797 	free(flatArgs);
3798 		// load_image_internal() unset our variable if it took over ownership
3799 
3800 	return thread;
3801 }
3802 
3803 
3804 status_t
3805 wait_for_team(team_id id, status_t* _returnCode)
3806 {
3807 	// check whether the team exists
3808 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3809 
3810 	Team* team = team_get_team_struct_locked(id);
3811 	if (team == NULL)
3812 		return B_BAD_TEAM_ID;
3813 
3814 	id = team->id;
3815 
3816 	teamsLocker.Unlock();
3817 
3818 	// wait for the main thread (it has the same ID as the team)
3819 	return wait_for_thread(id, _returnCode);
3820 }
3821 
3822 
3823 status_t
3824 kill_team(team_id id)
3825 {
3826 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3827 
3828 	Team* team = team_get_team_struct_locked(id);
3829 	if (team == NULL)
3830 		return B_BAD_TEAM_ID;
3831 
3832 	id = team->id;
3833 
3834 	teamsLocker.Unlock();
3835 
3836 	if (team == sKernelTeam)
3837 		return B_NOT_ALLOWED;
3838 
3839 	// Just kill the team's main thread (it has same ID as the team). The
3840 	// cleanup code there will take care of the team.
3841 	return kill_thread(id);
3842 }
3843 
3844 
3845 status_t
3846 _get_team_info(team_id id, team_info* info, size_t size)
3847 {
3848 	// get the team
3849 	Team* team = Team::Get(id);
3850 	if (team == NULL)
3851 		return B_BAD_TEAM_ID;
3852 	BReference<Team> teamReference(team, true);
3853 
3854 	// fill in the info
3855 	return fill_team_info(team, info, size);
3856 }
3857 
3858 
3859 status_t
3860 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3861 {
3862 	int32 slot = *cookie;
3863 	if (slot < 1)
3864 		slot = 1;
3865 
3866 	InterruptsReadSpinLocker locker(sTeamHashLock);
3867 
3868 	team_id lastTeamID = peek_next_thread_id();
3869 		// TODO: This is broken, since the id can wrap around!
3870 
3871 	// get next valid team
3872 	Team* team = NULL;
3873 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3874 		slot++;
3875 
3876 	if (team == NULL)
3877 		return B_BAD_TEAM_ID;
3878 
3879 	// get a reference to the team and unlock
3880 	BReference<Team> teamReference(team);
3881 	locker.Unlock();
3882 
3883 	// fill in the info
3884 	*cookie = ++slot;
3885 	return fill_team_info(team, info, size);
3886 }
3887 
3888 
3889 status_t
3890 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3891 {
3892 	if (size != sizeof(team_usage_info))
3893 		return B_BAD_VALUE;
3894 
3895 	return common_get_team_usage_info(id, who, info, 0);
3896 }
3897 
3898 
3899 pid_t
3900 getpid(void)
3901 {
3902 	return thread_get_current_thread()->team->id;
3903 }
3904 
3905 
3906 pid_t
3907 getppid()
3908 {
3909 	return _getppid(0);
3910 }
3911 
3912 
3913 pid_t
3914 getpgid(pid_t id)
3915 {
3916 	if (id < 0) {
3917 		errno = EINVAL;
3918 		return -1;
3919 	}
3920 
3921 	if (id == 0) {
3922 		// get process group of the calling process
3923 		Team* team = thread_get_current_thread()->team;
3924 		TeamLocker teamLocker(team);
3925 		return team->group_id;
3926 	}
3927 
3928 	// get the team
3929 	Team* team = Team::GetAndLock(id);
3930 	if (team == NULL) {
3931 		errno = ESRCH;
3932 		return -1;
3933 	}
3934 
3935 	// get the team's process group ID
3936 	pid_t groupID = team->group_id;
3937 
3938 	team->UnlockAndReleaseReference();
3939 
3940 	return groupID;
3941 }
3942 
3943 
3944 pid_t
3945 getsid(pid_t id)
3946 {
3947 	if (id < 0) {
3948 		errno = EINVAL;
3949 		return -1;
3950 	}
3951 
3952 	if (id == 0) {
3953 		// get session of the calling process
3954 		Team* team = thread_get_current_thread()->team;
3955 		TeamLocker teamLocker(team);
3956 		return team->session_id;
3957 	}
3958 
3959 	// get the team
3960 	Team* team = Team::GetAndLock(id);
3961 	if (team == NULL) {
3962 		errno = ESRCH;
3963 		return -1;
3964 	}
3965 
3966 	// get the team's session ID
3967 	pid_t sessionID = team->session_id;
3968 
3969 	team->UnlockAndReleaseReference();
3970 
3971 	return sessionID;
3972 }
3973 
3974 
3975 //	#pragma mark - User syscalls
3976 
3977 
3978 status_t
3979 _user_exec(const char* userPath, const char* const* userFlatArgs,
3980 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3981 {
3982 	// NOTE: Since this function normally doesn't return, don't use automatic
3983 	// variables that need destruction in the function scope.
3984 	char path[B_PATH_NAME_LENGTH];
3985 
3986 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3987 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3988 		return B_BAD_ADDRESS;
3989 
3990 	// copy and relocate the flat arguments
3991 	char** flatArgs;
3992 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3993 		argCount, envCount, flatArgs);
3994 
3995 	if (error == B_OK) {
3996 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3997 			envCount, umask);
3998 			// this one only returns in case of error
3999 	}
4000 
4001 	free(flatArgs);
4002 	return error;
4003 }
4004 
4005 
4006 thread_id
4007 _user_fork(void)
4008 {
4009 	return fork_team();
4010 }
4011 
4012 
4013 pid_t
4014 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4015 	team_usage_info* usageInfo)
4016 {
4017 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4018 		return B_BAD_ADDRESS;
4019 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4020 		return B_BAD_ADDRESS;
4021 
4022 	siginfo_t info;
4023 	team_usage_info usage_info;
4024 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4025 	if (foundChild < 0)
4026 		return syscall_restart_handle_post(foundChild);
4027 
4028 	// copy info back to userland
4029 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4030 		return B_BAD_ADDRESS;
4031 	// copy usage_info back to userland
4032 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4033 		sizeof(usage_info)) != B_OK) {
4034 		return B_BAD_ADDRESS;
4035 	}
4036 
4037 	return foundChild;
4038 }
4039 
4040 
4041 pid_t
4042 _user_process_info(pid_t process, int32 which)
4043 {
4044 	pid_t result;
4045 	switch (which) {
4046 		case SESSION_ID:
4047 			result = getsid(process);
4048 			break;
4049 		case GROUP_ID:
4050 			result = getpgid(process);
4051 			break;
4052 		case PARENT_ID:
4053 			result = _getppid(process);
4054 			break;
4055 		default:
4056 			return B_BAD_VALUE;
4057 	}
4058 
4059 	return result >= 0 ? result : errno;
4060 }
4061 
4062 
4063 pid_t
4064 _user_setpgid(pid_t processID, pid_t groupID)
4065 {
4066 	// setpgid() can be called either by the parent of the target process or
4067 	// by the process itself to do one of two things:
4068 	// * Create a new process group with the target process' ID and the target
4069 	//   process as group leader.
4070 	// * Set the target process' process group to an already existing one in the
4071 	//   same session.
4072 
4073 	if (groupID < 0)
4074 		return B_BAD_VALUE;
4075 
4076 	Team* currentTeam = thread_get_current_thread()->team;
4077 	if (processID == 0)
4078 		processID = currentTeam->id;
4079 
4080 	// if the group ID is not specified, use the target process' ID
4081 	if (groupID == 0)
4082 		groupID = processID;
4083 
4084 	// We loop when running into the following race condition: We create a new
4085 	// process group, because there isn't one with that ID yet, but later when
4086 	// trying to publish it, we find that someone else created and published
4087 	// a group with that ID in the meantime. In that case we just restart the
4088 	// whole action.
4089 	while (true) {
4090 		// Look up the process group by ID. If it doesn't exist yet and we are
4091 		// allowed to create a new one, do that.
4092 		ProcessGroup* group = ProcessGroup::Get(groupID);
4093 		bool newGroup = false;
4094 		if (group == NULL) {
4095 			if (groupID != processID)
4096 				return B_NOT_ALLOWED;
4097 
4098 			group = new(std::nothrow) ProcessGroup(groupID);
4099 			if (group == NULL)
4100 				return B_NO_MEMORY;
4101 
4102 			newGroup = true;
4103 		}
4104 		BReference<ProcessGroup> groupReference(group, true);
4105 
4106 		// get the target team
4107 		Team* team = Team::Get(processID);
4108 		if (team == NULL)
4109 			return ESRCH;
4110 		BReference<Team> teamReference(team, true);
4111 
4112 		// lock the new process group and the team's current process group
4113 		while (true) {
4114 			// lock the team's current process group
4115 			team->LockProcessGroup();
4116 
4117 			ProcessGroup* oldGroup = team->group;
4118 			if (oldGroup == group) {
4119 				// it's the same as the target group, so just bail out
4120 				oldGroup->Unlock();
4121 				return group->id;
4122 			}
4123 
4124 			oldGroup->AcquireReference();
4125 
4126 			// lock the target process group, if locking order allows it
4127 			if (newGroup || group->id > oldGroup->id) {
4128 				group->Lock();
4129 				break;
4130 			}
4131 
4132 			// try to lock
4133 			if (group->TryLock())
4134 				break;
4135 
4136 			// no dice -- unlock the team's current process group and relock in
4137 			// the correct order
4138 			oldGroup->Unlock();
4139 
4140 			group->Lock();
4141 			oldGroup->Lock();
4142 
4143 			// check whether things are still the same
4144 			TeamLocker teamLocker(team);
4145 			if (team->group == oldGroup)
4146 				break;
4147 
4148 			// something changed -- unlock everything and retry
4149 			teamLocker.Unlock();
4150 			oldGroup->Unlock();
4151 			group->Unlock();
4152 			oldGroup->ReleaseReference();
4153 		}
4154 
4155 		// we now have references and locks of both new and old process group
4156 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4157 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4158 		AutoLocker<ProcessGroup> groupLocker(group, true);
4159 
4160 		// also lock the target team and its parent
4161 		team->LockTeamAndParent(false);
4162 		TeamLocker parentLocker(team->parent, true);
4163 		TeamLocker teamLocker(team, true);
4164 
4165 		// perform the checks
4166 		if (team == currentTeam) {
4167 			// we set our own group
4168 
4169 			// we must not change our process group ID if we're a session leader
4170 			if (is_session_leader(currentTeam))
4171 				return B_NOT_ALLOWED;
4172 		} else {
4173 			// Calling team != target team. The target team must be a child of
4174 			// the calling team and in the same session. (If that's the case it
4175 			// isn't a session leader either.)
4176 			if (team->parent != currentTeam
4177 				|| team->session_id != currentTeam->session_id) {
4178 				return B_NOT_ALLOWED;
4179 			}
4180 
4181 			// The call is also supposed to fail on a child, when the child has
4182 			// already executed exec*() [EACCES].
4183 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4184 				return EACCES;
4185 		}
4186 
4187 		// If we created a new process group, publish it now.
4188 		if (newGroup) {
4189 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4190 			if (sGroupHash.Lookup(groupID)) {
4191 				// A group with the group ID appeared since we first checked.
4192 				// Back to square one.
4193 				continue;
4194 			}
4195 
4196 			group->PublishLocked(team->group->Session());
4197 		} else if (group->Session()->id != team->session_id) {
4198 			// The existing target process group belongs to a different session.
4199 			// That's not allowed.
4200 			return B_NOT_ALLOWED;
4201 		}
4202 
4203 		// Everything is ready -- set the group.
4204 		remove_team_from_group(team);
4205 		insert_team_into_group(group, team);
4206 
4207 		// Changing the process group might have changed the situation for a
4208 		// parent waiting in wait_for_child(). Hence we notify it.
4209 		team->parent->dead_children.condition_variable.NotifyAll();
4210 
4211 		return group->id;
4212 	}
4213 }
4214 
4215 
4216 pid_t
4217 _user_setsid(void)
4218 {
4219 	Team* team = thread_get_current_thread()->team;
4220 
4221 	// create a new process group and session
4222 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4223 	if (group == NULL)
4224 		return B_NO_MEMORY;
4225 	BReference<ProcessGroup> groupReference(group, true);
4226 	AutoLocker<ProcessGroup> groupLocker(group);
4227 
4228 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4229 	if (session == NULL)
4230 		return B_NO_MEMORY;
4231 	BReference<ProcessSession> sessionReference(session, true);
4232 
4233 	// lock the team's current process group, parent, and the team itself
4234 	team->LockTeamParentAndProcessGroup();
4235 	BReference<ProcessGroup> oldGroupReference(team->group);
4236 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4237 	TeamLocker parentLocker(team->parent, true);
4238 	TeamLocker teamLocker(team, true);
4239 
4240 	// the team must not already be a process group leader
4241 	if (is_process_group_leader(team))
4242 		return B_NOT_ALLOWED;
4243 
4244 	// remove the team from the old and add it to the new process group
4245 	remove_team_from_group(team);
4246 	group->Publish(session);
4247 	insert_team_into_group(group, team);
4248 
4249 	// Changing the process group might have changed the situation for a
4250 	// parent waiting in wait_for_child(). Hence we notify it.
4251 	team->parent->dead_children.condition_variable.NotifyAll();
4252 
4253 	return group->id;
4254 }
4255 
4256 
4257 status_t
4258 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4259 {
4260 	status_t returnCode;
4261 	status_t status;
4262 
4263 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4264 		return B_BAD_ADDRESS;
4265 
4266 	status = wait_for_team(id, &returnCode);
4267 	if (status >= B_OK && _userReturnCode != NULL) {
4268 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4269 				!= B_OK)
4270 			return B_BAD_ADDRESS;
4271 		return B_OK;
4272 	}
4273 
4274 	return syscall_restart_handle_post(status);
4275 }
4276 
4277 
4278 thread_id
4279 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4280 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4281 	port_id errorPort, uint32 errorToken)
4282 {
4283 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4284 
4285 	if (argCount < 1)
4286 		return B_BAD_VALUE;
4287 
4288 	// copy and relocate the flat arguments
4289 	char** flatArgs;
4290 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4291 		argCount, envCount, flatArgs);
4292 	if (error != B_OK)
4293 		return error;
4294 
4295 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4296 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4297 		errorToken);
4298 
4299 	free(flatArgs);
4300 		// load_image_internal() unset our variable if it took over ownership
4301 
4302 	return thread;
4303 }
4304 
4305 
4306 void
4307 _user_exit_team(status_t returnValue)
4308 {
4309 	Thread* thread = thread_get_current_thread();
4310 	Team* team = thread->team;
4311 
4312 	// set this thread's exit status
4313 	thread->exit.status = returnValue;
4314 
4315 	// set the team exit status
4316 	TeamLocker teamLocker(team);
4317 
4318 	if (!team->exit.initialized) {
4319 		team->exit.reason = CLD_EXITED;
4320 		team->exit.signal = 0;
4321 		team->exit.signaling_user = 0;
4322 		team->exit.status = returnValue;
4323 		team->exit.initialized = true;
4324 	}
4325 
4326 	teamLocker.Unlock();
4327 
4328 	// Stop the thread, if the team is being debugged and that has been
4329 	// requested.
4330 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4331 		user_debug_stop_thread();
4332 
4333 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4334 	// userland. The signal handling code forwards the signal to the main
4335 	// thread (if that's not already this one), which will take the team down.
4336 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4337 	send_signal_to_thread(thread, signal, 0);
4338 }
4339 
4340 
4341 status_t
4342 _user_kill_team(team_id team)
4343 {
4344 	return kill_team(team);
4345 }
4346 
4347 
4348 status_t
4349 _user_get_team_info(team_id id, team_info* userInfo)
4350 {
4351 	status_t status;
4352 	team_info info;
4353 
4354 	if (!IS_USER_ADDRESS(userInfo))
4355 		return B_BAD_ADDRESS;
4356 
4357 	status = _get_team_info(id, &info, sizeof(team_info));
4358 	if (status == B_OK) {
4359 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4360 			return B_BAD_ADDRESS;
4361 	}
4362 
4363 	return status;
4364 }
4365 
4366 
4367 status_t
4368 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4369 {
4370 	status_t status;
4371 	team_info info;
4372 	int32 cookie;
4373 
4374 	if (!IS_USER_ADDRESS(userCookie)
4375 		|| !IS_USER_ADDRESS(userInfo)
4376 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4377 		return B_BAD_ADDRESS;
4378 
4379 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4380 	if (status != B_OK)
4381 		return status;
4382 
4383 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4384 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4385 		return B_BAD_ADDRESS;
4386 
4387 	return status;
4388 }
4389 
4390 
4391 team_id
4392 _user_get_current_team(void)
4393 {
4394 	return team_get_current_team_id();
4395 }
4396 
4397 
4398 status_t
4399 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4400 	size_t size)
4401 {
4402 	if (size != sizeof(team_usage_info))
4403 		return B_BAD_VALUE;
4404 
4405 	team_usage_info info;
4406 	status_t status = common_get_team_usage_info(team, who, &info,
4407 		B_CHECK_PERMISSION);
4408 
4409 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4410 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4411 		return B_BAD_ADDRESS;
4412 	}
4413 
4414 	return status;
4415 }
4416 
4417 
4418 status_t
4419 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4420 	size_t size, size_t* _sizeNeeded)
4421 {
4422 	// check parameters
4423 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4424 		|| (buffer == NULL && size > 0)
4425 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4426 		return B_BAD_ADDRESS;
4427 	}
4428 
4429 	KMessage info;
4430 
4431 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4432 		// allocate memory for a copy of the needed team data
4433 		struct ExtendedTeamData {
4434 			team_id	id;
4435 			pid_t	group_id;
4436 			pid_t	session_id;
4437 			uid_t	real_uid;
4438 			gid_t	real_gid;
4439 			uid_t	effective_uid;
4440 			gid_t	effective_gid;
4441 			char	name[B_OS_NAME_LENGTH];
4442 		} teamClone;
4443 
4444 		io_context* ioContext;
4445 		{
4446 			// get the team structure
4447 			Team* team = Team::GetAndLock(teamID);
4448 			if (team == NULL)
4449 				return B_BAD_TEAM_ID;
4450 			BReference<Team> teamReference(team, true);
4451 			TeamLocker teamLocker(team, true);
4452 
4453 			// copy the data
4454 			teamClone.id = team->id;
4455 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4456 			teamClone.group_id = team->group_id;
4457 			teamClone.session_id = team->session_id;
4458 			teamClone.real_uid = team->real_uid;
4459 			teamClone.real_gid = team->real_gid;
4460 			teamClone.effective_uid = team->effective_uid;
4461 			teamClone.effective_gid = team->effective_gid;
4462 
4463 			// also fetch a reference to the I/O context
4464 			ioContext = team->io_context;
4465 			vfs_get_io_context(ioContext);
4466 		}
4467 		CObjectDeleter<io_context, void, vfs_put_io_context>
4468 			ioContextPutter(ioContext);
4469 
4470 		// add the basic data to the info message
4471 		if (info.AddInt32("id", teamClone.id) != B_OK
4472 			|| info.AddString("name", teamClone.name) != B_OK
4473 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4474 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4475 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4476 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4477 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4478 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4479 			return B_NO_MEMORY;
4480 		}
4481 
4482 		// get the current working directory from the I/O context
4483 		dev_t cwdDevice;
4484 		ino_t cwdDirectory;
4485 		{
4486 			MutexLocker ioContextLocker(ioContext->io_mutex);
4487 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4488 		}
4489 
4490 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4491 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4492 			return B_NO_MEMORY;
4493 		}
4494 	}
4495 
4496 	// TODO: Support the other flags!
4497 
4498 	// copy the needed size and, if it fits, the message back to userland
4499 	size_t sizeNeeded = info.ContentSize();
4500 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4501 		return B_BAD_ADDRESS;
4502 
4503 	if (sizeNeeded > size)
4504 		return B_BUFFER_OVERFLOW;
4505 
4506 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4507 		return B_BAD_ADDRESS;
4508 
4509 	return B_OK;
4510 }
4511