xref: /haiku/src/system/kernel/team.cpp (revision b2b94ad94f541e09456be05515d4dacaeb551bc2)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 #include <util/ThreadAutoLock.h>
62 
63 #include "TeamThreadTables.h"
64 
65 
66 //#define TRACE_TEAM
67 #ifdef TRACE_TEAM
68 #	define TRACE(x) dprintf x
69 #else
70 #	define TRACE(x) ;
71 #endif
72 
73 
74 struct team_key {
75 	team_id id;
76 };
77 
78 struct team_arg {
79 	char	*path;
80 	char	**flat_args;
81 	size_t	flat_args_size;
82 	uint32	arg_count;
83 	uint32	env_count;
84 	mode_t	umask;
85 	uint32	flags;
86 	port_id	error_port;
87 	uint32	error_token;
88 };
89 
90 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
91 
92 
93 namespace {
94 
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 // #pragma mark - TeamTable
105 
106 
107 typedef BKernel::TeamThreadTable<Team> TeamTable;
108 
109 
110 // #pragma mark - ProcessGroupHashDefinition
111 
112 
113 struct ProcessGroupHashDefinition {
114 	typedef pid_t			KeyType;
115 	typedef	ProcessGroup	ValueType;
116 
117 	size_t HashKey(pid_t key) const
118 	{
119 		return key;
120 	}
121 
122 	size_t Hash(ProcessGroup* value) const
123 	{
124 		return HashKey(value->id);
125 	}
126 
127 	bool Compare(pid_t key, ProcessGroup* value) const
128 	{
129 		return value->id == key;
130 	}
131 
132 	ProcessGroup*& GetLink(ProcessGroup* value) const
133 	{
134 		return value->next;
135 	}
136 };
137 
138 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
139 
140 
141 }	// unnamed namespace
142 
143 
144 // #pragma mark -
145 
146 
147 // the team_id -> Team hash table and the lock protecting it
148 static TeamTable sTeamHash;
149 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
150 
151 // the pid_t -> ProcessGroup hash table and the lock protecting it
152 static ProcessGroupHashTable sGroupHash;
153 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
154 
155 static Team* sKernelTeam = NULL;
156 static bool sDisableUserAddOns = false;
157 
158 // A list of process groups of children of dying session leaders that need to
159 // be signalled, if they have become orphaned and contain stopped processes.
160 static ProcessGroupList sOrphanedCheckProcessGroups;
161 static mutex sOrphanedCheckLock
162 	= MUTEX_INITIALIZER("orphaned process group check");
163 
164 // some arbitrarily chosen limits -- should probably depend on the available
165 // memory (the limit is not yet enforced)
166 static int32 sMaxTeams = 2048;
167 static int32 sUsedTeams = 1;
168 
169 static TeamNotificationService sNotificationService;
170 
171 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
172 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
173 
174 
175 // #pragma mark - TeamListIterator
176 
177 
178 TeamListIterator::TeamListIterator()
179 {
180 	// queue the entry
181 	InterruptsWriteSpinLocker locker(sTeamHashLock);
182 	sTeamHash.InsertIteratorEntry(&fEntry);
183 }
184 
185 
186 TeamListIterator::~TeamListIterator()
187 {
188 	// remove the entry
189 	InterruptsWriteSpinLocker locker(sTeamHashLock);
190 	sTeamHash.RemoveIteratorEntry(&fEntry);
191 }
192 
193 
194 Team*
195 TeamListIterator::Next()
196 {
197 	// get the next team -- if there is one, get reference for it
198 	InterruptsWriteSpinLocker locker(sTeamHashLock);
199 	Team* team = sTeamHash.NextElement(&fEntry);
200 	if (team != NULL)
201 		team->AcquireReference();
202 
203 	return team;
204 }
205 
206 
207 // #pragma mark - Tracing
208 
209 
210 #if TEAM_TRACING
211 namespace TeamTracing {
212 
213 class TeamForked : public AbstractTraceEntry {
214 public:
215 	TeamForked(thread_id forkedThread)
216 		:
217 		fForkedThread(forkedThread)
218 	{
219 		Initialized();
220 	}
221 
222 	virtual void AddDump(TraceOutput& out)
223 	{
224 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
225 	}
226 
227 private:
228 	thread_id			fForkedThread;
229 };
230 
231 
232 class ExecTeam : public AbstractTraceEntry {
233 public:
234 	ExecTeam(const char* path, int32 argCount, const char* const* args,
235 			int32 envCount, const char* const* env)
236 		:
237 		fArgCount(argCount),
238 		fArgs(NULL)
239 	{
240 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
241 			false);
242 
243 		// determine the buffer size we need for the args
244 		size_t argBufferSize = 0;
245 		for (int32 i = 0; i < argCount; i++)
246 			argBufferSize += strlen(args[i]) + 1;
247 
248 		// allocate a buffer
249 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
250 		if (fArgs) {
251 			char* buffer = fArgs;
252 			for (int32 i = 0; i < argCount; i++) {
253 				size_t argSize = strlen(args[i]) + 1;
254 				memcpy(buffer, args[i], argSize);
255 				buffer += argSize;
256 			}
257 		}
258 
259 		// ignore env for the time being
260 		(void)envCount;
261 		(void)env;
262 
263 		Initialized();
264 	}
265 
266 	virtual void AddDump(TraceOutput& out)
267 	{
268 		out.Print("team exec, \"%p\", args:", fPath);
269 
270 		if (fArgs != NULL) {
271 			char* args = fArgs;
272 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
273 				out.Print(" \"%s\"", args);
274 				args += strlen(args) + 1;
275 			}
276 		} else
277 			out.Print(" <too long>");
278 	}
279 
280 private:
281 	char*	fPath;
282 	int32	fArgCount;
283 	char*	fArgs;
284 };
285 
286 
287 static const char*
288 job_control_state_name(job_control_state state)
289 {
290 	switch (state) {
291 		case JOB_CONTROL_STATE_NONE:
292 			return "none";
293 		case JOB_CONTROL_STATE_STOPPED:
294 			return "stopped";
295 		case JOB_CONTROL_STATE_CONTINUED:
296 			return "continued";
297 		case JOB_CONTROL_STATE_DEAD:
298 			return "dead";
299 		default:
300 			return "invalid";
301 	}
302 }
303 
304 
305 class SetJobControlState : public AbstractTraceEntry {
306 public:
307 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
308 		:
309 		fTeam(team),
310 		fNewState(newState),
311 		fSignal(signal != NULL ? signal->Number() : 0)
312 	{
313 		Initialized();
314 	}
315 
316 	virtual void AddDump(TraceOutput& out)
317 	{
318 		out.Print("team set job control state, team %" B_PRId32 ", "
319 			"new state: %s, signal: %d",
320 			fTeam, job_control_state_name(fNewState), fSignal);
321 	}
322 
323 private:
324 	team_id				fTeam;
325 	job_control_state	fNewState;
326 	int					fSignal;
327 };
328 
329 
330 class WaitForChild : public AbstractTraceEntry {
331 public:
332 	WaitForChild(pid_t child, uint32 flags)
333 		:
334 		fChild(child),
335 		fFlags(flags)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("team wait for child, child: %" B_PRId32 ", "
343 			"flags: %#" B_PRIx32, fChild, fFlags);
344 	}
345 
346 private:
347 	pid_t	fChild;
348 	uint32	fFlags;
349 };
350 
351 
352 class WaitForChildDone : public AbstractTraceEntry {
353 public:
354 	WaitForChildDone(const job_control_entry& entry)
355 		:
356 		fState(entry.state),
357 		fTeam(entry.thread),
358 		fStatus(entry.status),
359 		fReason(entry.reason),
360 		fSignal(entry.signal)
361 	{
362 		Initialized();
363 	}
364 
365 	WaitForChildDone(status_t error)
366 		:
367 		fTeam(error)
368 	{
369 		Initialized();
370 	}
371 
372 	virtual void AddDump(TraceOutput& out)
373 	{
374 		if (fTeam >= 0) {
375 			out.Print("team wait for child done, team: %" B_PRId32 ", "
376 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
377 				fTeam, job_control_state_name(fState), fStatus, fReason,
378 				fSignal);
379 		} else {
380 			out.Print("team wait for child failed, error: "
381 				"%#" B_PRIx32 ", ", fTeam);
382 		}
383 	}
384 
385 private:
386 	job_control_state	fState;
387 	team_id				fTeam;
388 	status_t			fStatus;
389 	uint16				fReason;
390 	uint16				fSignal;
391 };
392 
393 }	// namespace TeamTracing
394 
395 #	define T(x) new(std::nothrow) TeamTracing::x;
396 #else
397 #	define T(x) ;
398 #endif
399 
400 
401 //	#pragma mark - TeamNotificationService
402 
403 
404 TeamNotificationService::TeamNotificationService()
405 	: DefaultNotificationService("teams")
406 {
407 }
408 
409 
410 void
411 TeamNotificationService::Notify(uint32 eventCode, Team* team)
412 {
413 	char eventBuffer[128];
414 	KMessage event;
415 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
416 	event.AddInt32("event", eventCode);
417 	event.AddInt32("team", team->id);
418 	event.AddPointer("teamStruct", team);
419 
420 	DefaultNotificationService::Notify(event, eventCode);
421 }
422 
423 
424 //	#pragma mark - Team
425 
426 
427 Team::Team(team_id id, bool kernel)
428 {
429 	// allocate an ID
430 	this->id = id;
431 	visible = true;
432 
433 	hash_next = siblings_next = parent = children = group_next = NULL;
434 	serial_number = -1;
435 
436 	group_id = session_id = -1;
437 	group = NULL;
438 
439 	num_threads = 0;
440 	state = TEAM_STATE_BIRTH;
441 	flags = 0;
442 	io_context = NULL;
443 	realtime_sem_context = NULL;
444 	xsi_sem_context = NULL;
445 	death_entry = NULL;
446 	list_init(&dead_threads);
447 
448 	dead_children.condition_variable.Init(&dead_children, "team children");
449 	dead_children.count = 0;
450 	dead_children.kernel_time = 0;
451 	dead_children.user_time = 0;
452 
453 	job_control_entry = new(nothrow) ::job_control_entry;
454 	if (job_control_entry != NULL) {
455 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
456 		job_control_entry->thread = id;
457 		job_control_entry->team = this;
458 	}
459 
460 	address_space = NULL;
461 	main_thread = NULL;
462 	thread_list = NULL;
463 	loading_info = NULL;
464 
465 	list_init(&image_list);
466 	list_init(&watcher_list);
467 	list_init(&sem_list);
468 	list_init_etc(&port_list, port_team_link_offset());
469 
470 	user_data = 0;
471 	user_data_area = -1;
472 	used_user_data = 0;
473 	user_data_size = 0;
474 	free_user_threads = NULL;
475 
476 	commpage_address = NULL;
477 
478 	clear_team_debug_info(&debug_info, true);
479 
480 	dead_threads_kernel_time = 0;
481 	dead_threads_user_time = 0;
482 	cpu_clock_offset = 0;
483 	B_INITIALIZE_SPINLOCK(&time_lock);
484 
485 	saved_set_uid = real_uid = effective_uid = -1;
486 	saved_set_gid = real_gid = effective_gid = -1;
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	B_INITIALIZE_SPINLOCK(&signal_lock);
492 
493 	// init mutex
494 	if (kernel) {
495 		mutex_init(&fLock, "Team:kernel");
496 	} else {
497 		char lockName[16];
498 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
499 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
500 	}
501 
502 	fName[0] = '\0';
503 	fArgs[0] = '\0';
504 
505 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
506 		kernel ? -1 : MAX_QUEUED_SIGNALS);
507 	memset(fSignalActions, 0, sizeof(fSignalActions));
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	delete job_control_entry;
545 		// usually already NULL and transferred to the parent
546 
547 	mutex_destroy(&fLock);
548 }
549 
550 
551 /*static*/ Team*
552 Team::Create(team_id id, const char* name, bool kernel)
553 {
554 	// create the team object
555 	Team* team = new(std::nothrow) Team(id, kernel);
556 	if (team == NULL)
557 		return NULL;
558 	ObjectDeleter<Team> teamDeleter(team);
559 
560 	if (name != NULL)
561 		team->SetName(name);
562 
563 	// check initialization
564 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
565 		return NULL;
566 
567 	// finish initialization (arch specifics)
568 	if (arch_team_init_team_struct(team, kernel) != B_OK)
569 		return NULL;
570 
571 	if (!kernel) {
572 		status_t error = user_timer_create_team_timers(team);
573 		if (error != B_OK)
574 			return NULL;
575 	}
576 
577 	team->start_time = system_time();
578 
579 	// everything went fine
580 	return teamDeleter.Detach();
581 }
582 
583 
584 /*!	\brief Returns the team with the given ID.
585 	Returns a reference to the team.
586 	Team and thread spinlock must not be held.
587 */
588 /*static*/ Team*
589 Team::Get(team_id id)
590 {
591 	if (id == B_CURRENT_TEAM) {
592 		Team* team = thread_get_current_thread()->team;
593 		team->AcquireReference();
594 		return team;
595 	}
596 
597 	InterruptsReadSpinLocker locker(sTeamHashLock);
598 	Team* team = sTeamHash.Lookup(id);
599 	if (team != NULL)
600 		team->AcquireReference();
601 	return team;
602 }
603 
604 
605 /*!	\brief Returns the team with the given ID in a locked state.
606 	Returns a reference to the team.
607 	Team and thread spinlock must not be held.
608 */
609 /*static*/ Team*
610 Team::GetAndLock(team_id id)
611 {
612 	// get the team
613 	Team* team = Get(id);
614 	if (team == NULL)
615 		return NULL;
616 
617 	// lock it
618 	team->Lock();
619 
620 	// only return the team, when it isn't already dying
621 	if (team->state >= TEAM_STATE_SHUTDOWN) {
622 		team->Unlock();
623 		team->ReleaseReference();
624 		return NULL;
625 	}
626 
627 	return team;
628 }
629 
630 
631 /*!	Locks the team and its parent team (if any).
632 	The caller must hold a reference to the team or otherwise make sure that
633 	it won't be deleted.
634 	If the team doesn't have a parent, only the team itself is locked. If the
635 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
636 	only the team itself is locked.
637 
638 	\param dontLockParentIfKernel If \c true, the team's parent team is only
639 		locked, if it is not the kernel team.
640 */
641 void
642 Team::LockTeamAndParent(bool dontLockParentIfKernel)
643 {
644 	// The locking order is parent -> child. Since the parent can change as long
645 	// as we don't lock the team, we need to do a trial and error loop.
646 	Lock();
647 
648 	while (true) {
649 		// If the team doesn't have a parent, we're done. Otherwise try to lock
650 		// the parent.This will succeed in most cases, simplifying things.
651 		Team* parent = this->parent;
652 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
653 			|| parent->TryLock()) {
654 			return;
655 		}
656 
657 		// get a temporary reference to the parent, unlock this team, lock the
658 		// parent, and re-lock this team
659 		BReference<Team> parentReference(parent);
660 
661 		Unlock();
662 		parent->Lock();
663 		Lock();
664 
665 		// If the parent hasn't changed in the meantime, we're done.
666 		if (this->parent == parent)
667 			return;
668 
669 		// The parent has changed -- unlock and retry.
670 		parent->Unlock();
671 	}
672 }
673 
674 
675 /*!	Unlocks the team and its parent team (if any).
676 */
677 void
678 Team::UnlockTeamAndParent()
679 {
680 	if (parent != NULL)
681 		parent->Unlock();
682 
683 	Unlock();
684 }
685 
686 
687 /*!	Locks the team, its parent team (if any), and the team's process group.
688 	The caller must hold a reference to the team or otherwise make sure that
689 	it won't be deleted.
690 	If the team doesn't have a parent, only the team itself is locked.
691 */
692 void
693 Team::LockTeamParentAndProcessGroup()
694 {
695 	LockTeamAndProcessGroup();
696 
697 	// We hold the group's and the team's lock, but not the parent team's lock.
698 	// If we have a parent, try to lock it.
699 	if (this->parent == NULL || this->parent->TryLock())
700 		return;
701 
702 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
703 	// the job.
704 	Unlock();
705 	LockTeamAndParent(false);
706 }
707 
708 
709 /*!	Unlocks the team, its parent team (if any), and the team's process group.
710 */
711 void
712 Team::UnlockTeamParentAndProcessGroup()
713 {
714 	group->Unlock();
715 
716 	if (parent != NULL)
717 		parent->Unlock();
718 
719 	Unlock();
720 }
721 
722 
723 void
724 Team::LockTeamAndProcessGroup()
725 {
726 	// The locking order is process group -> child. Since the process group can
727 	// change as long as we don't lock the team, we need to do a trial and error
728 	// loop.
729 	Lock();
730 
731 	while (true) {
732 		// Try to lock the group. This will succeed in most cases, simplifying
733 		// things.
734 		ProcessGroup* group = this->group;
735 		if (group == NULL)
736 			return;
737 
738 		if (group->TryLock())
739 			return;
740 
741 		// get a temporary reference to the group, unlock this team, lock the
742 		// group, and re-lock this team
743 		BReference<ProcessGroup> groupReference(group);
744 
745 		Unlock();
746 		group->Lock();
747 		Lock();
748 
749 		// If the group hasn't changed in the meantime, we're done.
750 		if (this->group == group)
751 			return;
752 
753 		// The group has changed -- unlock and retry.
754 		group->Unlock();
755 	}
756 }
757 
758 
759 void
760 Team::UnlockTeamAndProcessGroup()
761 {
762 	group->Unlock();
763 	Unlock();
764 }
765 
766 
767 void
768 Team::SetName(const char* name)
769 {
770 	if (const char* lastSlash = strrchr(name, '/'))
771 		name = lastSlash + 1;
772 
773 	strlcpy(fName, name, B_OS_NAME_LENGTH);
774 }
775 
776 
777 void
778 Team::SetArgs(const char* args)
779 {
780 	strlcpy(fArgs, args, sizeof(fArgs));
781 }
782 
783 
784 void
785 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
786 {
787 	fArgs[0] = '\0';
788 	strlcpy(fArgs, path, sizeof(fArgs));
789 	for (int i = 0; i < otherArgCount; i++) {
790 		strlcat(fArgs, " ", sizeof(fArgs));
791 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
792 	}
793 }
794 
795 
796 void
797 Team::ResetSignalsOnExec()
798 {
799 	// We are supposed to keep pending signals. Signal actions shall be reset
800 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
801 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
802 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
803 	// flags, but since there aren't any handlers, they make little sense, so
804 	// we clear them.
805 
806 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
807 		struct sigaction& action = SignalActionFor(i);
808 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
809 			action.sa_handler = SIG_DFL;
810 
811 		action.sa_mask = 0;
812 		action.sa_flags = 0;
813 		action.sa_userdata = NULL;
814 	}
815 }
816 
817 
818 void
819 Team::InheritSignalActions(Team* parent)
820 {
821 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
822 }
823 
824 
825 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
826 	ID.
827 
828 	The caller must hold the team's lock.
829 
830 	\param timer The timer to be added. If it doesn't have an ID yet, it is
831 		considered user-defined and will be assigned an ID.
832 	\return \c B_OK, if the timer was added successfully, another error code
833 		otherwise.
834 */
835 status_t
836 Team::AddUserTimer(UserTimer* timer)
837 {
838 	// don't allow addition of timers when already shutting the team down
839 	if (state >= TEAM_STATE_SHUTDOWN)
840 		return B_BAD_TEAM_ID;
841 
842 	// If the timer is user-defined, check timer limit and increment
843 	// user-defined count.
844 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
845 		return EAGAIN;
846 
847 	fUserTimers.AddTimer(timer);
848 
849 	return B_OK;
850 }
851 
852 
853 /*!	Removes the given user timer from the team.
854 
855 	The caller must hold the team's lock.
856 
857 	\param timer The timer to be removed.
858 
859 */
860 void
861 Team::RemoveUserTimer(UserTimer* timer)
862 {
863 	fUserTimers.RemoveTimer(timer);
864 
865 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
866 		UserDefinedTimersRemoved(1);
867 }
868 
869 
870 /*!	Deletes all (or all user-defined) user timers of the team.
871 
872 	Timer's belonging to the team's threads are not affected.
873 	The caller must hold the team's lock.
874 
875 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
876 		otherwise all timers are deleted.
877 */
878 void
879 Team::DeleteUserTimers(bool userDefinedOnly)
880 {
881 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
882 	UserDefinedTimersRemoved(count);
883 }
884 
885 
886 /*!	If not at the limit yet, increments the team's user-defined timer count.
887 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
888 */
889 bool
890 Team::CheckAddUserDefinedTimer()
891 {
892 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
893 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
894 		atomic_add(&fUserDefinedTimerCount, -1);
895 		return false;
896 	}
897 
898 	return true;
899 }
900 
901 
902 /*!	Subtracts the given count for the team's user-defined timer count.
903 	\param count The count to subtract.
904 */
905 void
906 Team::UserDefinedTimersRemoved(int32 count)
907 {
908 	atomic_add(&fUserDefinedTimerCount, -count);
909 }
910 
911 
912 void
913 Team::DeactivateCPUTimeUserTimers()
914 {
915 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
916 		timer->Deactivate();
917 
918 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
919 		timer->Deactivate();
920 }
921 
922 
923 /*!	Returns the team's current total CPU time (kernel + user + offset).
924 
925 	The caller must hold \c time_lock.
926 
927 	\param ignoreCurrentRun If \c true and the current thread is one team's
928 		threads, don't add the time since the last time \c last_time was
929 		updated. Should be used in "thread unscheduled" scheduler callbacks,
930 		since although the thread is still running at that time, its time has
931 		already been stopped.
932 	\return The team's current total CPU time.
933 */
934 bigtime_t
935 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
936 {
937 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
938 		+ dead_threads_user_time;
939 
940 	Thread* currentThread = thread_get_current_thread();
941 	bigtime_t now = system_time();
942 
943 	for (Thread* thread = thread_list; thread != NULL;
944 			thread = thread->team_next) {
945 		bool alreadyLocked = thread == lockedThread;
946 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
947 		time += thread->kernel_time + thread->user_time;
948 
949 		if (thread->last_time != 0) {
950 			if (!ignoreCurrentRun || thread != currentThread)
951 				time += now - thread->last_time;
952 		}
953 
954 		if (alreadyLocked)
955 			threadTimeLocker.Detach();
956 	}
957 
958 	return time;
959 }
960 
961 
962 /*!	Returns the team's current user CPU time.
963 
964 	The caller must hold \c time_lock.
965 
966 	\return The team's current user CPU time.
967 */
968 bigtime_t
969 Team::UserCPUTime() const
970 {
971 	bigtime_t time = dead_threads_user_time;
972 
973 	bigtime_t now = system_time();
974 
975 	for (Thread* thread = thread_list; thread != NULL;
976 			thread = thread->team_next) {
977 		SpinLocker threadTimeLocker(thread->time_lock);
978 		time += thread->user_time;
979 
980 		if (thread->last_time != 0 && !thread->in_kernel)
981 			time += now - thread->last_time;
982 	}
983 
984 	return time;
985 }
986 
987 
988 //	#pragma mark - ProcessGroup
989 
990 
991 ProcessGroup::ProcessGroup(pid_t id)
992 	:
993 	id(id),
994 	teams(NULL),
995 	fSession(NULL),
996 	fInOrphanedCheckList(false)
997 {
998 	char lockName[32];
999 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
1000 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1001 }
1002 
1003 
1004 ProcessGroup::~ProcessGroup()
1005 {
1006 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1007 
1008 	// If the group is in the orphaned check list, remove it.
1009 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1010 
1011 	if (fInOrphanedCheckList)
1012 		sOrphanedCheckProcessGroups.Remove(this);
1013 
1014 	orphanedCheckLocker.Unlock();
1015 
1016 	// remove group from the hash table and from the session
1017 	if (fSession != NULL) {
1018 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1019 		sGroupHash.RemoveUnchecked(this);
1020 		groupHashLocker.Unlock();
1021 
1022 		fSession->ReleaseReference();
1023 	}
1024 
1025 	mutex_destroy(&fLock);
1026 }
1027 
1028 
1029 /*static*/ ProcessGroup*
1030 ProcessGroup::Get(pid_t id)
1031 {
1032 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1033 	ProcessGroup* group = sGroupHash.Lookup(id);
1034 	if (group != NULL)
1035 		group->AcquireReference();
1036 	return group;
1037 }
1038 
1039 
1040 /*!	Adds the group the given session and makes it publicly accessible.
1041 	The caller must not hold the process group hash lock.
1042 */
1043 void
1044 ProcessGroup::Publish(ProcessSession* session)
1045 {
1046 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1047 	PublishLocked(session);
1048 }
1049 
1050 
1051 /*!	Adds the group to the given session and makes it publicly accessible.
1052 	The caller must hold the process group hash lock.
1053 */
1054 void
1055 ProcessGroup::PublishLocked(ProcessSession* session)
1056 {
1057 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1058 
1059 	fSession = session;
1060 	fSession->AcquireReference();
1061 
1062 	sGroupHash.InsertUnchecked(this);
1063 }
1064 
1065 
1066 /*!	Checks whether the process group is orphaned.
1067 	The caller must hold the group's lock.
1068 	\return \c true, if the group is orphaned, \c false otherwise.
1069 */
1070 bool
1071 ProcessGroup::IsOrphaned() const
1072 {
1073 	// Orphaned Process Group: "A process group in which the parent of every
1074 	// member is either itself a member of the group or is not a member of the
1075 	// group's session." (Open Group Base Specs Issue 7)
1076 	bool orphaned = true;
1077 
1078 	Team* team = teams;
1079 	while (orphaned && team != NULL) {
1080 		team->LockTeamAndParent(false);
1081 
1082 		Team* parent = team->parent;
1083 		if (parent != NULL && parent->group_id != id
1084 			&& parent->session_id == fSession->id) {
1085 			orphaned = false;
1086 		}
1087 
1088 		team->UnlockTeamAndParent();
1089 
1090 		team = team->group_next;
1091 	}
1092 
1093 	return orphaned;
1094 }
1095 
1096 
1097 void
1098 ProcessGroup::ScheduleOrphanedCheck()
1099 {
1100 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1101 
1102 	if (!fInOrphanedCheckList) {
1103 		sOrphanedCheckProcessGroups.Add(this);
1104 		fInOrphanedCheckList = true;
1105 	}
1106 }
1107 
1108 
1109 void
1110 ProcessGroup::UnsetOrphanedCheck()
1111 {
1112 	fInOrphanedCheckList = false;
1113 }
1114 
1115 
1116 //	#pragma mark - ProcessSession
1117 
1118 
1119 ProcessSession::ProcessSession(pid_t id)
1120 	:
1121 	id(id),
1122 	controlling_tty(NULL),
1123 	foreground_group(-1)
1124 {
1125 	char lockName[32];
1126 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1127 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1128 }
1129 
1130 
1131 ProcessSession::~ProcessSession()
1132 {
1133 	mutex_destroy(&fLock);
1134 }
1135 
1136 
1137 //	#pragma mark - KDL functions
1138 
1139 
1140 static void
1141 _dump_team_info(Team* team)
1142 {
1143 	kprintf("TEAM: %p\n", team);
1144 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1145 		team->id);
1146 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1147 	kprintf("name:             '%s'\n", team->Name());
1148 	kprintf("args:             '%s'\n", team->Args());
1149 	kprintf("hash_next:        %p\n", team->hash_next);
1150 	kprintf("parent:           %p", team->parent);
1151 	if (team->parent != NULL) {
1152 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1153 	} else
1154 		kprintf("\n");
1155 
1156 	kprintf("children:         %p\n", team->children);
1157 	kprintf("num_threads:      %d\n", team->num_threads);
1158 	kprintf("state:            %d\n", team->state);
1159 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1160 	kprintf("io_context:       %p\n", team->io_context);
1161 	if (team->address_space)
1162 		kprintf("address_space:    %p\n", team->address_space);
1163 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1164 		(void*)team->user_data, team->user_data_area);
1165 	kprintf("free user thread: %p\n", team->free_user_threads);
1166 	kprintf("main_thread:      %p\n", team->main_thread);
1167 	kprintf("thread_list:      %p\n", team->thread_list);
1168 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1169 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1170 }
1171 
1172 
1173 static int
1174 dump_team_info(int argc, char** argv)
1175 {
1176 	ulong arg;
1177 	bool found = false;
1178 
1179 	if (argc < 2) {
1180 		Thread* thread = thread_get_current_thread();
1181 		if (thread != NULL && thread->team != NULL)
1182 			_dump_team_info(thread->team);
1183 		else
1184 			kprintf("No current team!\n");
1185 		return 0;
1186 	}
1187 
1188 	arg = strtoul(argv[1], NULL, 0);
1189 	if (IS_KERNEL_ADDRESS(arg)) {
1190 		// semi-hack
1191 		_dump_team_info((Team*)arg);
1192 		return 0;
1193 	}
1194 
1195 	// walk through the thread list, trying to match name or id
1196 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1197 		Team* team = it.Next();) {
1198 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1199 			|| team->id == (team_id)arg) {
1200 			_dump_team_info(team);
1201 			found = true;
1202 			break;
1203 		}
1204 	}
1205 
1206 	if (!found)
1207 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1208 	return 0;
1209 }
1210 
1211 
1212 static int
1213 dump_teams(int argc, char** argv)
1214 {
1215 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1216 		B_PRINTF_POINTER_WIDTH, "parent");
1217 
1218 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1219 		Team* team = it.Next();) {
1220 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 
1227 //	#pragma mark - Private functions
1228 
1229 
1230 /*! Get the parent of a given process.
1231 
1232 	Used in the implementation of getppid (where a process can get its own
1233 	parent, only) as well as in user_process_info where the information is
1234 	available to anyone (allowing to display a tree of running processes)
1235 */
1236 static pid_t
1237 _getppid(pid_t id)
1238 {
1239 	if (id < 0) {
1240 		errno = EINVAL;
1241 		return -1;
1242 	}
1243 
1244 	if (id == 0) {
1245 		Team* team = thread_get_current_thread()->team;
1246 		TeamLocker teamLocker(team);
1247 		if (team->parent == NULL) {
1248 			errno = EINVAL;
1249 			return -1;
1250 		}
1251 		return team->parent->id;
1252 	}
1253 
1254 	Team* team = Team::GetAndLock(id);
1255 	if (team == NULL) {
1256 		errno = ESRCH;
1257 		return -1;
1258 	}
1259 
1260 	pid_t parentID;
1261 
1262 	if (team->parent == NULL) {
1263 		errno = EINVAL;
1264 		parentID = -1;
1265 	} else
1266 		parentID = team->parent->id;
1267 
1268 	team->UnlockAndReleaseReference();
1269 
1270 	return parentID;
1271 }
1272 
1273 
1274 /*!	Inserts team \a team into the child list of team \a parent.
1275 
1276 	The caller must hold the lock of both \a parent and \a team.
1277 
1278 	\param parent The parent team.
1279 	\param team The team to be inserted into \a parent's child list.
1280 */
1281 static void
1282 insert_team_into_parent(Team* parent, Team* team)
1283 {
1284 	ASSERT(parent != NULL);
1285 
1286 	team->siblings_next = parent->children;
1287 	parent->children = team;
1288 	team->parent = parent;
1289 }
1290 
1291 
1292 /*!	Removes team \a team from the child list of team \a parent.
1293 
1294 	The caller must hold the lock of both \a parent and \a team.
1295 
1296 	\param parent The parent team.
1297 	\param team The team to be removed from \a parent's child list.
1298 */
1299 static void
1300 remove_team_from_parent(Team* parent, Team* team)
1301 {
1302 	Team* child;
1303 	Team* last = NULL;
1304 
1305 	for (child = parent->children; child != NULL;
1306 			child = child->siblings_next) {
1307 		if (child == team) {
1308 			if (last == NULL)
1309 				parent->children = child->siblings_next;
1310 			else
1311 				last->siblings_next = child->siblings_next;
1312 
1313 			team->parent = NULL;
1314 			break;
1315 		}
1316 		last = child;
1317 	}
1318 }
1319 
1320 
1321 /*!	Returns whether the given team is a session leader.
1322 	The caller must hold the team's lock or its process group's lock.
1323 */
1324 static bool
1325 is_session_leader(Team* team)
1326 {
1327 	return team->session_id == team->id;
1328 }
1329 
1330 
1331 /*!	Returns whether the given team is a process group leader.
1332 	The caller must hold the team's lock or its process group's lock.
1333 */
1334 static bool
1335 is_process_group_leader(Team* team)
1336 {
1337 	return team->group_id == team->id;
1338 }
1339 
1340 
1341 /*!	Inserts the given team into the given process group.
1342 	The caller must hold the process group's lock, the team's lock, and the
1343 	team's parent's lock.
1344 */
1345 static void
1346 insert_team_into_group(ProcessGroup* group, Team* team)
1347 {
1348 	team->group = group;
1349 	team->group_id = group->id;
1350 	team->session_id = group->Session()->id;
1351 
1352 	team->group_next = group->teams;
1353 	group->teams = team;
1354 	group->AcquireReference();
1355 }
1356 
1357 
1358 /*!	Removes the given team from its process group.
1359 
1360 	The caller must hold the process group's lock, the team's lock, and the
1361 	team's parent's lock. Interrupts must be enabled.
1362 
1363 	\param team The team that'll be removed from its process group.
1364 */
1365 static void
1366 remove_team_from_group(Team* team)
1367 {
1368 	ProcessGroup* group = team->group;
1369 	Team* current;
1370 	Team* last = NULL;
1371 
1372 	// the team must be in a process group to let this function have any effect
1373 	if (group == NULL)
1374 		return;
1375 
1376 	for (current = group->teams; current != NULL;
1377 			current = current->group_next) {
1378 		if (current == team) {
1379 			if (last == NULL)
1380 				group->teams = current->group_next;
1381 			else
1382 				last->group_next = current->group_next;
1383 
1384 			break;
1385 		}
1386 		last = current;
1387 	}
1388 
1389 	team->group = NULL;
1390 	team->group_next = NULL;
1391 	team->group_id = -1;
1392 
1393 	group->ReleaseReference();
1394 }
1395 
1396 
1397 static status_t
1398 create_team_user_data(Team* team, void* exactAddress = NULL)
1399 {
1400 	void* address;
1401 	uint32 addressSpec;
1402 
1403 	if (exactAddress != NULL) {
1404 		address = exactAddress;
1405 		addressSpec = B_EXACT_ADDRESS;
1406 	} else {
1407 		address = (void*)KERNEL_USER_DATA_BASE;
1408 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1409 	}
1410 
1411 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1412 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1413 
1414 	virtual_address_restrictions virtualRestrictions = {};
1415 	if (result == B_OK || exactAddress != NULL) {
1416 		if (exactAddress != NULL)
1417 			virtualRestrictions.address = exactAddress;
1418 		else
1419 			virtualRestrictions.address = address;
1420 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1421 	} else {
1422 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1423 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1424 	}
1425 
1426 	physical_address_restrictions physicalRestrictions = {};
1427 	team->user_data_area = create_area_etc(team->id, "user area",
1428 		kTeamUserDataInitialSize, B_FULL_LOCK,
1429 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1430 		&virtualRestrictions, &physicalRestrictions, &address);
1431 	if (team->user_data_area < 0)
1432 		return team->user_data_area;
1433 
1434 	team->user_data = (addr_t)address;
1435 	team->used_user_data = 0;
1436 	team->user_data_size = kTeamUserDataInitialSize;
1437 	team->free_user_threads = NULL;
1438 
1439 	return B_OK;
1440 }
1441 
1442 
1443 static void
1444 delete_team_user_data(Team* team)
1445 {
1446 	if (team->user_data_area >= 0) {
1447 		vm_delete_area(team->id, team->user_data_area, true);
1448 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1449 			kTeamUserDataReservedSize);
1450 
1451 		team->user_data = 0;
1452 		team->used_user_data = 0;
1453 		team->user_data_size = 0;
1454 		team->user_data_area = -1;
1455 		while (free_user_thread* entry = team->free_user_threads) {
1456 			team->free_user_threads = entry->next;
1457 			free(entry);
1458 		}
1459 	}
1460 }
1461 
1462 
1463 static status_t
1464 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1465 	int32 argCount, int32 envCount, char**& _flatArgs)
1466 {
1467 	if (argCount < 0 || envCount < 0)
1468 		return B_BAD_VALUE;
1469 
1470 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1471 		return B_TOO_MANY_ARGS;
1472 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1473 		return B_BAD_VALUE;
1474 
1475 	if (!IS_USER_ADDRESS(userFlatArgs))
1476 		return B_BAD_ADDRESS;
1477 
1478 	// allocate kernel memory
1479 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1480 	if (flatArgs == NULL)
1481 		return B_NO_MEMORY;
1482 
1483 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1484 		free(flatArgs);
1485 		return B_BAD_ADDRESS;
1486 	}
1487 
1488 	// check and relocate the array
1489 	status_t error = B_OK;
1490 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1491 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1492 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1493 		if (i == argCount || i == argCount + envCount + 1) {
1494 			// check array null termination
1495 			if (flatArgs[i] != NULL) {
1496 				error = B_BAD_VALUE;
1497 				break;
1498 			}
1499 		} else {
1500 			// check string
1501 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1502 			size_t maxLen = stringEnd - arg;
1503 			if (arg < stringBase || arg >= stringEnd
1504 					|| strnlen(arg, maxLen) == maxLen) {
1505 				error = B_BAD_VALUE;
1506 				break;
1507 			}
1508 
1509 			flatArgs[i] = arg;
1510 		}
1511 	}
1512 
1513 	if (error == B_OK)
1514 		_flatArgs = flatArgs;
1515 	else
1516 		free(flatArgs);
1517 
1518 	return error;
1519 }
1520 
1521 
1522 static void
1523 free_team_arg(struct team_arg* teamArg)
1524 {
1525 	if (teamArg != NULL) {
1526 		free(teamArg->flat_args);
1527 		free(teamArg->path);
1528 		free(teamArg);
1529 	}
1530 }
1531 
1532 
1533 static status_t
1534 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1535 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1536 	port_id port, uint32 token)
1537 {
1538 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1539 	if (teamArg == NULL)
1540 		return B_NO_MEMORY;
1541 
1542 	teamArg->path = strdup(path);
1543 	if (teamArg->path == NULL) {
1544 		free(teamArg);
1545 		return B_NO_MEMORY;
1546 	}
1547 
1548 	// copy the args over
1549 	teamArg->flat_args = flatArgs;
1550 	teamArg->flat_args_size = flatArgsSize;
1551 	teamArg->arg_count = argCount;
1552 	teamArg->env_count = envCount;
1553 	teamArg->flags = 0;
1554 	teamArg->umask = umask;
1555 	teamArg->error_port = port;
1556 	teamArg->error_token = token;
1557 
1558 	// determine the flags from the environment
1559 	const char* const* env = flatArgs + argCount + 1;
1560 	for (int32 i = 0; i < envCount; i++) {
1561 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1562 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1563 			break;
1564 		}
1565 	}
1566 
1567 	*_teamArg = teamArg;
1568 	return B_OK;
1569 }
1570 
1571 
1572 static status_t
1573 team_create_thread_start_internal(void* args)
1574 {
1575 	status_t err;
1576 	Thread* thread;
1577 	Team* team;
1578 	struct team_arg* teamArgs = (struct team_arg*)args;
1579 	const char* path;
1580 	addr_t entry;
1581 	char** userArgs;
1582 	char** userEnv;
1583 	struct user_space_program_args* programArgs;
1584 	uint32 argCount, envCount;
1585 
1586 	thread = thread_get_current_thread();
1587 	team = thread->team;
1588 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1589 
1590 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1591 		thread->id));
1592 
1593 	// Main stack area layout is currently as follows (starting from 0):
1594 	//
1595 	// size								| usage
1596 	// ---------------------------------+--------------------------------
1597 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1598 	// TLS_SIZE							| TLS data
1599 	// sizeof(user_space_program_args)	| argument structure for the runtime
1600 	//									| loader
1601 	// flat arguments size				| flat process arguments and environment
1602 
1603 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1604 	// the heap
1605 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1606 
1607 	argCount = teamArgs->arg_count;
1608 	envCount = teamArgs->env_count;
1609 
1610 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1611 		+ thread->user_stack_size + TLS_SIZE);
1612 
1613 	userArgs = (char**)(programArgs + 1);
1614 	userEnv = userArgs + argCount + 1;
1615 	path = teamArgs->path;
1616 
1617 	if (user_strlcpy(programArgs->program_path, path,
1618 				sizeof(programArgs->program_path)) < B_OK
1619 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1620 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1621 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1622 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1623 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1624 				sizeof(port_id)) < B_OK
1625 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1626 				sizeof(uint32)) < B_OK
1627 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1628 		|| user_memcpy(&programArgs->disable_user_addons,
1629 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1630 		|| user_memcpy(userArgs, teamArgs->flat_args,
1631 				teamArgs->flat_args_size) < B_OK) {
1632 		// the team deletion process will clean this mess
1633 		free_team_arg(teamArgs);
1634 		return B_BAD_ADDRESS;
1635 	}
1636 
1637 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1638 
1639 	// set team args and update state
1640 	team->Lock();
1641 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1642 	team->state = TEAM_STATE_NORMAL;
1643 	team->Unlock();
1644 
1645 	free_team_arg(teamArgs);
1646 		// the arguments are already on the user stack, we no longer need
1647 		// them in this form
1648 
1649 	// Clone commpage area
1650 	area_id commPageArea = clone_commpage_area(team->id,
1651 		&team->commpage_address);
1652 	if (commPageArea  < B_OK) {
1653 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1654 			strerror(commPageArea)));
1655 		return commPageArea;
1656 	}
1657 
1658 	// Register commpage image
1659 	image_id commPageImage = get_commpage_image();
1660 	extended_image_info imageInfo;
1661 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1662 	if (err != B_OK) {
1663 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1664 			strerror(err)));
1665 		return err;
1666 	}
1667 	imageInfo.basic_info.text = team->commpage_address;
1668 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1669 	imageInfo.symbol_table = NULL;
1670 	imageInfo.symbol_hash = NULL;
1671 	imageInfo.string_table = NULL;
1672 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1673 	if (image < 0) {
1674 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1675 			strerror(image)));
1676 		return image;
1677 	}
1678 
1679 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1680 	// automatic variables with function scope will never be destroyed.
1681 	{
1682 		// find runtime_loader path
1683 		KPath runtimeLoaderPath;
1684 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1685 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1686 		if (err < B_OK) {
1687 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1688 				strerror(err)));
1689 			return err;
1690 		}
1691 		runtimeLoaderPath.UnlockBuffer();
1692 		err = runtimeLoaderPath.Append("runtime_loader");
1693 
1694 		if (err == B_OK) {
1695 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1696 				&entry);
1697 		}
1698 	}
1699 
1700 	if (err < B_OK) {
1701 		// Luckily, we don't have to clean up the mess we created - that's
1702 		// done for us by the normal team deletion process
1703 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1704 			"%s\n", strerror(err)));
1705 		return err;
1706 	}
1707 
1708 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1709 
1710 	// enter userspace -- returns only in case of error
1711 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1712 		programArgs, team->commpage_address);
1713 }
1714 
1715 
1716 static status_t
1717 team_create_thread_start(void* args)
1718 {
1719 	team_create_thread_start_internal(args);
1720 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1721 	thread_exit();
1722 		// does not return
1723 	return B_OK;
1724 }
1725 
1726 
1727 static thread_id
1728 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1729 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1730 	port_id errorPort, uint32 errorToken)
1731 {
1732 	char** flatArgs = _flatArgs;
1733 	thread_id thread;
1734 	status_t status;
1735 	struct team_arg* teamArgs;
1736 	struct team_loading_info loadingInfo;
1737 	ConditionVariableEntry loadingWaitEntry;
1738 	io_context* parentIOContext = NULL;
1739 	team_id teamID;
1740 	bool teamLimitReached = false;
1741 
1742 	if (flatArgs == NULL || argCount == 0)
1743 		return B_BAD_VALUE;
1744 
1745 	const char* path = flatArgs[0];
1746 
1747 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1748 		"\n", path, flatArgs, argCount));
1749 
1750 	// cut the path from the main thread name
1751 	const char* threadName = strrchr(path, '/');
1752 	if (threadName != NULL)
1753 		threadName++;
1754 	else
1755 		threadName = path;
1756 
1757 	// create the main thread object
1758 	Thread* mainThread;
1759 	status = Thread::Create(threadName, mainThread);
1760 	if (status != B_OK)
1761 		return status;
1762 	BReference<Thread> mainThreadReference(mainThread, true);
1763 
1764 	// create team object
1765 	Team* team = Team::Create(mainThread->id, path, false);
1766 	if (team == NULL)
1767 		return B_NO_MEMORY;
1768 	BReference<Team> teamReference(team, true);
1769 
1770 	BReference<Team> teamLoadingReference;
1771 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1772 		loadingInfo.condition.Init(team, "image load");
1773 		loadingInfo.condition.Add(&loadingWaitEntry);
1774 		loadingInfo.result = B_ERROR;
1775 		team->loading_info = &loadingInfo;
1776 		teamLoadingReference = teamReference;
1777 	}
1778 
1779 	// get the parent team
1780 	Team* parent = Team::Get(parentID);
1781 	if (parent == NULL)
1782 		return B_BAD_TEAM_ID;
1783 	BReference<Team> parentReference(parent, true);
1784 
1785 	parent->LockTeamAndProcessGroup();
1786 	team->Lock();
1787 
1788 	// inherit the parent's user/group
1789 	inherit_parent_user_and_group(team, parent);
1790 
1791 	// get a reference to the parent's I/O context -- we need it to create ours
1792 	parentIOContext = parent->io_context;
1793 	vfs_get_io_context(parentIOContext);
1794 
1795 	team->Unlock();
1796 	parent->UnlockTeamAndProcessGroup();
1797 
1798 	// check the executable's set-user/group-id permission
1799 	update_set_id_user_and_group(team, path);
1800 
1801 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1802 		envCount, (mode_t)-1, errorPort, errorToken);
1803 	if (status != B_OK)
1804 		goto err1;
1805 
1806 	_flatArgs = NULL;
1807 		// args are owned by the team_arg structure now
1808 
1809 	// create a new io_context for this team
1810 	team->io_context = vfs_new_io_context(parentIOContext, true);
1811 	if (!team->io_context) {
1812 		status = B_NO_MEMORY;
1813 		goto err2;
1814 	}
1815 
1816 	// We don't need the parent's I/O context any longer.
1817 	vfs_put_io_context(parentIOContext);
1818 	parentIOContext = NULL;
1819 
1820 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1821 	vfs_exec_io_context(team->io_context);
1822 
1823 	// create an address space for this team
1824 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1825 		&team->address_space);
1826 	if (status != B_OK)
1827 		goto err2;
1828 
1829 	team->address_space->SetRandomizingEnabled(
1830 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1831 
1832 	// create the user data area
1833 	status = create_team_user_data(team);
1834 	if (status != B_OK)
1835 		goto err4;
1836 
1837 	// insert the team into its parent and the teams hash
1838 	parent->LockTeamAndProcessGroup();
1839 	team->Lock();
1840 
1841 	{
1842 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1843 
1844 		sTeamHash.Insert(team);
1845 		teamLimitReached = sUsedTeams >= sMaxTeams;
1846 		if (!teamLimitReached)
1847 			sUsedTeams++;
1848 	}
1849 
1850 	insert_team_into_parent(parent, team);
1851 	insert_team_into_group(parent->group, team);
1852 
1853 	team->Unlock();
1854 	parent->UnlockTeamAndProcessGroup();
1855 
1856 	// notify team listeners
1857 	sNotificationService.Notify(TEAM_ADDED, team);
1858 
1859 	if (teamLimitReached) {
1860 		status = B_NO_MORE_TEAMS;
1861 		goto err6;
1862 	}
1863 
1864 	// In case we start the main thread, we shouldn't access the team object
1865 	// afterwards, so cache the team's ID.
1866 	teamID = team->id;
1867 
1868 	// Create a kernel thread, but under the context of the new team
1869 	// The new thread will take over ownership of teamArgs.
1870 	{
1871 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1872 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1873 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1874 			+ teamArgs->flat_args_size;
1875 		thread = thread_create_thread(threadAttributes, false);
1876 		if (thread < 0) {
1877 			status = thread;
1878 			goto err6;
1879 		}
1880 	}
1881 
1882 	// The team has been created successfully, so we keep the reference. Or
1883 	// more precisely: It's owned by the team's main thread, now.
1884 	teamReference.Detach();
1885 
1886 	// wait for the loader of the new team to finish its work
1887 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1888 		if (mainThread != NULL) {
1889 			// resume the team's main thread
1890 			thread_continue(mainThread);
1891 		}
1892 
1893 		// Now wait until loading is finished. We will be woken either by the
1894 		// thread, when it finished or aborted loading, or when the team is
1895 		// going to die (e.g. is killed). In either case the one notifying is
1896 		// responsible for unsetting `loading_info` in the team structure.
1897 		loadingWaitEntry.Wait();
1898 
1899 		// We must synchronize with the thread that woke us up, to ensure
1900 		// there are no remaining consumers of the team_loading_info.
1901 		team->Lock();
1902 		if (team->loading_info != NULL)
1903 			panic("team loading wait complete, but loading_info != NULL");
1904 		team->Unlock();
1905 		teamLoadingReference.Unset();
1906 
1907 		if (loadingInfo.result < B_OK)
1908 			return loadingInfo.result;
1909 	}
1910 
1911 	// notify the debugger
1912 	user_debug_team_created(teamID);
1913 
1914 	return thread;
1915 
1916 err6:
1917 	// Remove the team structure from the process group, the parent team, and
1918 	// the team hash table and delete the team structure.
1919 	parent->LockTeamAndProcessGroup();
1920 	team->Lock();
1921 
1922 	remove_team_from_group(team);
1923 	remove_team_from_parent(team->parent, team);
1924 
1925 	team->Unlock();
1926 	parent->UnlockTeamAndProcessGroup();
1927 
1928 	{
1929 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1930 		sTeamHash.Remove(team);
1931 		if (!teamLimitReached)
1932 			sUsedTeams--;
1933 	}
1934 
1935 	sNotificationService.Notify(TEAM_REMOVED, team);
1936 
1937 	delete_team_user_data(team);
1938 err4:
1939 	team->address_space->Put();
1940 err2:
1941 	free_team_arg(teamArgs);
1942 err1:
1943 	if (parentIOContext != NULL)
1944 		vfs_put_io_context(parentIOContext);
1945 
1946 	return status;
1947 }
1948 
1949 
1950 /*!	Almost shuts down the current team and loads a new image into it.
1951 	If successful, this function does not return and will takeover ownership of
1952 	the arguments provided.
1953 	This function may only be called in a userland team (caused by one of the
1954 	exec*() syscalls).
1955 */
1956 static status_t
1957 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1958 	int32 argCount, int32 envCount, mode_t umask)
1959 {
1960 	// NOTE: Since this function normally doesn't return, don't use automatic
1961 	// variables that need destruction in the function scope.
1962 	char** flatArgs = _flatArgs;
1963 	Team* team = thread_get_current_thread()->team;
1964 	struct team_arg* teamArgs;
1965 	const char* threadName;
1966 	thread_id nubThreadID = -1;
1967 
1968 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1969 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1970 		team->id));
1971 
1972 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1973 
1974 	// switching the kernel at run time is probably not a good idea :)
1975 	if (team == team_get_kernel_team())
1976 		return B_NOT_ALLOWED;
1977 
1978 	// we currently need to be single threaded here
1979 	// TODO: maybe we should just kill all other threads and
1980 	//	make the current thread the team's main thread?
1981 	Thread* currentThread = thread_get_current_thread();
1982 	if (currentThread != team->main_thread)
1983 		return B_NOT_ALLOWED;
1984 
1985 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1986 	// We iterate through the thread list to make sure that there's no other
1987 	// thread.
1988 	TeamLocker teamLocker(team);
1989 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1990 
1991 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1992 		nubThreadID = team->debug_info.nub_thread;
1993 
1994 	debugInfoLocker.Unlock();
1995 
1996 	for (Thread* thread = team->thread_list; thread != NULL;
1997 			thread = thread->team_next) {
1998 		if (thread != team->main_thread && thread->id != nubThreadID)
1999 			return B_NOT_ALLOWED;
2000 	}
2001 
2002 	team->DeleteUserTimers(true);
2003 	team->ResetSignalsOnExec();
2004 
2005 	teamLocker.Unlock();
2006 
2007 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
2008 		argCount, envCount, umask, -1, 0);
2009 	if (status != B_OK)
2010 		return status;
2011 
2012 	_flatArgs = NULL;
2013 		// args are owned by the team_arg structure now
2014 
2015 	// TODO: remove team resources if there are any left
2016 	// thread_atkernel_exit() might not be called at all
2017 
2018 	thread_reset_for_exec();
2019 
2020 	user_debug_prepare_for_exec();
2021 
2022 	delete_team_user_data(team);
2023 	vm_delete_areas(team->address_space, false);
2024 	xsi_sem_undo(team);
2025 	delete_owned_ports(team);
2026 	sem_delete_owned_sems(team);
2027 	remove_images(team);
2028 	vfs_exec_io_context(team->io_context);
2029 	delete_realtime_sem_context(team->realtime_sem_context);
2030 	team->realtime_sem_context = NULL;
2031 
2032 	// update ASLR
2033 	team->address_space->SetRandomizingEnabled(
2034 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2035 
2036 	status = create_team_user_data(team);
2037 	if (status != B_OK) {
2038 		// creating the user data failed -- we're toast
2039 		free_team_arg(teamArgs);
2040 		exit_thread(status);
2041 		return status;
2042 	}
2043 
2044 	user_debug_finish_after_exec();
2045 
2046 	// rename the team
2047 
2048 	team->Lock();
2049 	team->SetName(path);
2050 	team->Unlock();
2051 
2052 	// cut the path from the team name and rename the main thread, too
2053 	threadName = strrchr(path, '/');
2054 	if (threadName != NULL)
2055 		threadName++;
2056 	else
2057 		threadName = path;
2058 	rename_thread(thread_get_current_thread_id(), threadName);
2059 
2060 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2061 
2062 	// Update user/group according to the executable's set-user/group-id
2063 	// permission.
2064 	update_set_id_user_and_group(team, path);
2065 
2066 	user_debug_team_exec();
2067 
2068 	// notify team listeners
2069 	sNotificationService.Notify(TEAM_EXEC, team);
2070 
2071 	// get a user thread for the thread
2072 	user_thread* userThread = team_allocate_user_thread(team);
2073 		// cannot fail (the allocation for the team would have failed already)
2074 	ThreadLocker currentThreadLocker(currentThread);
2075 	currentThread->user_thread = userThread;
2076 	currentThreadLocker.Unlock();
2077 
2078 	// create the user stack for the thread
2079 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2080 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2081 	if (status == B_OK) {
2082 		// prepare the stack, load the runtime loader, and enter userspace
2083 		team_create_thread_start(teamArgs);
2084 			// does never return
2085 	} else
2086 		free_team_arg(teamArgs);
2087 
2088 	// Sorry, we have to kill ourselves, there is no way out anymore
2089 	// (without any areas left and all that).
2090 	exit_thread(status);
2091 
2092 	// We return a status here since the signal that is sent by the
2093 	// call above is not immediately handled.
2094 	return B_ERROR;
2095 }
2096 
2097 
2098 static thread_id
2099 fork_team(void)
2100 {
2101 	Thread* parentThread = thread_get_current_thread();
2102 	Team* parentTeam = parentThread->team;
2103 	Team* team;
2104 	arch_fork_arg* forkArgs;
2105 	struct area_info info;
2106 	thread_id threadID;
2107 	status_t status;
2108 	ssize_t areaCookie;
2109 	bool teamLimitReached = false;
2110 
2111 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2112 
2113 	if (parentTeam == team_get_kernel_team())
2114 		return B_NOT_ALLOWED;
2115 
2116 	// create a new team
2117 	// TODO: this is very similar to load_image_internal() - maybe we can do
2118 	// something about it :)
2119 
2120 	// create the main thread object
2121 	Thread* thread;
2122 	status = Thread::Create(parentThread->name, thread);
2123 	if (status != B_OK)
2124 		return status;
2125 	BReference<Thread> threadReference(thread, true);
2126 
2127 	// create the team object
2128 	team = Team::Create(thread->id, NULL, false);
2129 	if (team == NULL)
2130 		return B_NO_MEMORY;
2131 
2132 	parentTeam->LockTeamAndProcessGroup();
2133 	team->Lock();
2134 
2135 	team->SetName(parentTeam->Name());
2136 	team->SetArgs(parentTeam->Args());
2137 
2138 	team->commpage_address = parentTeam->commpage_address;
2139 
2140 	// Inherit the parent's user/group.
2141 	inherit_parent_user_and_group(team, parentTeam);
2142 
2143 	// inherit signal handlers
2144 	team->InheritSignalActions(parentTeam);
2145 
2146 	team->Unlock();
2147 	parentTeam->UnlockTeamAndProcessGroup();
2148 
2149 	// inherit some team debug flags
2150 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2151 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2152 
2153 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2154 	if (forkArgs == NULL) {
2155 		status = B_NO_MEMORY;
2156 		goto err1;
2157 	}
2158 
2159 	// create a new io_context for this team
2160 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2161 	if (!team->io_context) {
2162 		status = B_NO_MEMORY;
2163 		goto err2;
2164 	}
2165 
2166 	// duplicate the realtime sem context
2167 	if (parentTeam->realtime_sem_context) {
2168 		team->realtime_sem_context = clone_realtime_sem_context(
2169 			parentTeam->realtime_sem_context);
2170 		if (team->realtime_sem_context == NULL) {
2171 			status = B_NO_MEMORY;
2172 			goto err2;
2173 		}
2174 	}
2175 
2176 	// create an address space for this team
2177 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2178 		&team->address_space);
2179 	if (status < B_OK)
2180 		goto err3;
2181 
2182 	// copy all areas of the team
2183 	// TODO: should be able to handle stack areas differently (ie. don't have
2184 	// them copy-on-write)
2185 
2186 	areaCookie = 0;
2187 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2188 		if (info.area == parentTeam->user_data_area) {
2189 			// don't clone the user area; just create a new one
2190 			status = create_team_user_data(team, info.address);
2191 			if (status != B_OK)
2192 				break;
2193 
2194 			thread->user_thread = team_allocate_user_thread(team);
2195 		} else {
2196 			void* address;
2197 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2198 				&address, B_CLONE_ADDRESS, info.area);
2199 			if (area < B_OK) {
2200 				status = area;
2201 				break;
2202 			}
2203 
2204 			if (info.area == parentThread->user_stack_area)
2205 				thread->user_stack_area = area;
2206 		}
2207 	}
2208 
2209 	if (status < B_OK)
2210 		goto err4;
2211 
2212 	if (thread->user_thread == NULL) {
2213 #if KDEBUG
2214 		panic("user data area not found, parent area is %" B_PRId32,
2215 			parentTeam->user_data_area);
2216 #endif
2217 		status = B_ERROR;
2218 		goto err4;
2219 	}
2220 
2221 	thread->user_stack_base = parentThread->user_stack_base;
2222 	thread->user_stack_size = parentThread->user_stack_size;
2223 	thread->user_local_storage = parentThread->user_local_storage;
2224 	thread->sig_block_mask = parentThread->sig_block_mask;
2225 	thread->signal_stack_base = parentThread->signal_stack_base;
2226 	thread->signal_stack_size = parentThread->signal_stack_size;
2227 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2228 
2229 	arch_store_fork_frame(forkArgs);
2230 
2231 	// copy image list
2232 	if (copy_images(parentTeam->id, team) != B_OK)
2233 		goto err5;
2234 
2235 	// insert the team into its parent and the teams hash
2236 	parentTeam->LockTeamAndProcessGroup();
2237 	team->Lock();
2238 
2239 	{
2240 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2241 
2242 		sTeamHash.Insert(team);
2243 		teamLimitReached = sUsedTeams >= sMaxTeams;
2244 		if (!teamLimitReached)
2245 			sUsedTeams++;
2246 	}
2247 
2248 	insert_team_into_parent(parentTeam, team);
2249 	insert_team_into_group(parentTeam->group, team);
2250 
2251 	team->Unlock();
2252 	parentTeam->UnlockTeamAndProcessGroup();
2253 
2254 	// notify team listeners
2255 	sNotificationService.Notify(TEAM_ADDED, team);
2256 
2257 	if (teamLimitReached) {
2258 		status = B_NO_MORE_TEAMS;
2259 		goto err6;
2260 	}
2261 
2262 	// create the main thread
2263 	{
2264 		ThreadCreationAttributes threadCreationAttributes(NULL,
2265 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2266 		threadCreationAttributes.forkArgs = forkArgs;
2267 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2268 		threadID = thread_create_thread(threadCreationAttributes, false);
2269 		if (threadID < 0) {
2270 			status = threadID;
2271 			goto err6;
2272 		}
2273 	}
2274 
2275 	// notify the debugger
2276 	user_debug_team_created(team->id);
2277 
2278 	T(TeamForked(threadID));
2279 
2280 	resume_thread(threadID);
2281 	return threadID;
2282 
2283 err6:
2284 	// Remove the team structure from the process group, the parent team, and
2285 	// the team hash table and delete the team structure.
2286 	parentTeam->LockTeamAndProcessGroup();
2287 	team->Lock();
2288 
2289 	remove_team_from_group(team);
2290 	remove_team_from_parent(team->parent, team);
2291 
2292 	team->Unlock();
2293 	parentTeam->UnlockTeamAndProcessGroup();
2294 
2295 	{
2296 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2297 		sTeamHash.Remove(team);
2298 		if (!teamLimitReached)
2299 			sUsedTeams--;
2300 	}
2301 
2302 	sNotificationService.Notify(TEAM_REMOVED, team);
2303 err5:
2304 	remove_images(team);
2305 err4:
2306 	team->address_space->RemoveAndPut();
2307 err3:
2308 	delete_realtime_sem_context(team->realtime_sem_context);
2309 err2:
2310 	free(forkArgs);
2311 err1:
2312 	team->ReleaseReference();
2313 
2314 	return status;
2315 }
2316 
2317 
2318 /*!	Returns if the specified team \a parent has any children belonging to the
2319 	process group with the specified ID \a groupID.
2320 	The caller must hold \a parent's lock.
2321 */
2322 static bool
2323 has_children_in_group(Team* parent, pid_t groupID)
2324 {
2325 	for (Team* child = parent->children; child != NULL;
2326 			child = child->siblings_next) {
2327 		TeamLocker childLocker(child);
2328 		if (child->group_id == groupID)
2329 			return true;
2330 	}
2331 
2332 	return false;
2333 }
2334 
2335 
2336 /*!	Returns the first job control entry from \a children, which matches \a id.
2337 	\a id can be:
2338 	- \code > 0 \endcode: Matching an entry with that team ID.
2339 	- \code == -1 \endcode: Matching any entry.
2340 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2341 	\c 0 is an invalid value for \a id.
2342 
2343 	The caller must hold the lock of the team that \a children belongs to.
2344 
2345 	\param children The job control entry list to check.
2346 	\param id The match criterion.
2347 	\return The first matching entry or \c NULL, if none matches.
2348 */
2349 static job_control_entry*
2350 get_job_control_entry(team_job_control_children& children, pid_t id)
2351 {
2352 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2353 		 job_control_entry* entry = it.Next();) {
2354 
2355 		if (id > 0) {
2356 			if (entry->thread == id)
2357 				return entry;
2358 		} else if (id == -1) {
2359 			return entry;
2360 		} else {
2361 			pid_t processGroup
2362 				= (entry->team ? entry->team->group_id : entry->group_id);
2363 			if (processGroup == -id)
2364 				return entry;
2365 		}
2366 	}
2367 
2368 	return NULL;
2369 }
2370 
2371 
2372 /*!	Returns the first job control entry from one of team's dead, continued, or
2373 	stopped children which matches \a id.
2374 	\a id can be:
2375 	- \code > 0 \endcode: Matching an entry with that team ID.
2376 	- \code == -1 \endcode: Matching any entry.
2377 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2378 	\c 0 is an invalid value for \a id.
2379 
2380 	The caller must hold \a team's lock.
2381 
2382 	\param team The team whose dead, stopped, and continued child lists shall be
2383 		checked.
2384 	\param id The match criterion.
2385 	\param flags Specifies which children shall be considered. Dead children
2386 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2387 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2388 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2389 		\c WCONTINUED.
2390 	\return The first matching entry or \c NULL, if none matches.
2391 */
2392 static job_control_entry*
2393 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2394 {
2395 	job_control_entry* entry = NULL;
2396 
2397 	if ((flags & WEXITED) != 0)
2398 		entry = get_job_control_entry(team->dead_children, id);
2399 
2400 	if (entry == NULL && (flags & WCONTINUED) != 0)
2401 		entry = get_job_control_entry(team->continued_children, id);
2402 
2403 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2404 		entry = get_job_control_entry(team->stopped_children, id);
2405 
2406 	return entry;
2407 }
2408 
2409 
2410 job_control_entry::job_control_entry()
2411 	:
2412 	has_group_ref(false)
2413 {
2414 }
2415 
2416 
2417 job_control_entry::~job_control_entry()
2418 {
2419 	if (has_group_ref) {
2420 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2421 
2422 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2423 		if (group == NULL) {
2424 			panic("job_control_entry::~job_control_entry(): unknown group "
2425 				"ID: %" B_PRId32, group_id);
2426 			return;
2427 		}
2428 
2429 		groupHashLocker.Unlock();
2430 
2431 		group->ReleaseReference();
2432 	}
2433 }
2434 
2435 
2436 /*!	Invoked when the owning team is dying, initializing the entry according to
2437 	the dead state.
2438 
2439 	The caller must hold the owning team's lock and the scheduler lock.
2440 */
2441 void
2442 job_control_entry::InitDeadState()
2443 {
2444 	if (team != NULL) {
2445 		ASSERT(team->exit.initialized);
2446 
2447 		group_id = team->group_id;
2448 		team->group->AcquireReference();
2449 		has_group_ref = true;
2450 
2451 		thread = team->id;
2452 		status = team->exit.status;
2453 		reason = team->exit.reason;
2454 		signal = team->exit.signal;
2455 		signaling_user = team->exit.signaling_user;
2456 		user_time = team->dead_threads_user_time
2457 			+ team->dead_children.user_time;
2458 		kernel_time = team->dead_threads_kernel_time
2459 			+ team->dead_children.kernel_time;
2460 
2461 		team = NULL;
2462 	}
2463 }
2464 
2465 
2466 job_control_entry&
2467 job_control_entry::operator=(const job_control_entry& other)
2468 {
2469 	state = other.state;
2470 	thread = other.thread;
2471 	signal = other.signal;
2472 	has_group_ref = false;
2473 	signaling_user = other.signaling_user;
2474 	team = other.team;
2475 	group_id = other.group_id;
2476 	status = other.status;
2477 	reason = other.reason;
2478 	user_time = other.user_time;
2479 	kernel_time = other.kernel_time;
2480 
2481 	return *this;
2482 }
2483 
2484 
2485 /*! This is the kernel backend for waitid().
2486 */
2487 static thread_id
2488 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2489 	team_usage_info& _usage_info)
2490 {
2491 	Thread* thread = thread_get_current_thread();
2492 	Team* team = thread->team;
2493 	struct job_control_entry foundEntry;
2494 	struct job_control_entry* freeDeathEntry = NULL;
2495 	status_t status = B_OK;
2496 
2497 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2498 		child, flags));
2499 
2500 	T(WaitForChild(child, flags));
2501 
2502 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2503 		T(WaitForChildDone(B_BAD_VALUE));
2504 		return B_BAD_VALUE;
2505 	}
2506 
2507 	pid_t originalChild = child;
2508 
2509 	bool ignoreFoundEntries = false;
2510 	bool ignoreFoundEntriesChecked = false;
2511 
2512 	while (true) {
2513 		// lock the team
2514 		TeamLocker teamLocker(team);
2515 
2516 		// A 0 child argument means to wait for all children in the process
2517 		// group of the calling team.
2518 		child = originalChild == 0 ? -team->group_id : originalChild;
2519 
2520 		// check whether any condition holds
2521 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2522 
2523 		// If we don't have an entry yet, check whether there are any children
2524 		// complying to the process group specification at all.
2525 		if (entry == NULL) {
2526 			// No success yet -- check whether there are any children complying
2527 			// to the process group specification at all.
2528 			bool childrenExist = false;
2529 			if (child == -1) {
2530 				childrenExist = team->children != NULL;
2531 			} else if (child < -1) {
2532 				childrenExist = has_children_in_group(team, -child);
2533 			} else if (child != team->id) {
2534 				if (Team* childTeam = Team::Get(child)) {
2535 					BReference<Team> childTeamReference(childTeam, true);
2536 					TeamLocker childTeamLocker(childTeam);
2537 					childrenExist = childTeam->parent == team;
2538 				}
2539 			}
2540 
2541 			if (!childrenExist) {
2542 				// there is no child we could wait for
2543 				status = ECHILD;
2544 			} else {
2545 				// the children we're waiting for are still running
2546 				status = B_WOULD_BLOCK;
2547 			}
2548 		} else {
2549 			// got something
2550 			foundEntry = *entry;
2551 
2552 			// unless WNOWAIT has been specified, "consume" the wait state
2553 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2554 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2555 					// The child is dead. Reap its death entry.
2556 					freeDeathEntry = entry;
2557 					team->dead_children.entries.Remove(entry);
2558 					team->dead_children.count--;
2559 				} else {
2560 					// The child is well. Reset its job control state.
2561 					team_set_job_control_state(entry->team,
2562 						JOB_CONTROL_STATE_NONE, NULL);
2563 				}
2564 			}
2565 		}
2566 
2567 		// If we haven't got anything yet, prepare for waiting for the
2568 		// condition variable.
2569 		ConditionVariableEntry deadWaitEntry;
2570 
2571 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2572 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2573 
2574 		teamLocker.Unlock();
2575 
2576 		// we got our entry and can return to our caller
2577 		if (status == B_OK) {
2578 			if (ignoreFoundEntries) {
2579 				// ... unless we shall ignore found entries
2580 				delete freeDeathEntry;
2581 				freeDeathEntry = NULL;
2582 				continue;
2583 			}
2584 
2585 			break;
2586 		}
2587 
2588 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2589 			T(WaitForChildDone(status));
2590 			return status;
2591 		}
2592 
2593 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2594 		if (status == B_INTERRUPTED) {
2595 			T(WaitForChildDone(status));
2596 			return status;
2597 		}
2598 
2599 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2600 		// all our children are dead and fail with ECHILD. We check the
2601 		// condition at this point.
2602 		if (!ignoreFoundEntriesChecked) {
2603 			teamLocker.Lock();
2604 
2605 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2606 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2607 				|| handler.sa_handler == SIG_IGN) {
2608 				ignoreFoundEntries = true;
2609 			}
2610 
2611 			teamLocker.Unlock();
2612 
2613 			ignoreFoundEntriesChecked = true;
2614 		}
2615 	}
2616 
2617 	delete freeDeathEntry;
2618 
2619 	// When we got here, we have a valid death entry, and already got
2620 	// unregistered from the team or group. Fill in the returned info.
2621 	memset(&_info, 0, sizeof(_info));
2622 	_info.si_signo = SIGCHLD;
2623 	_info.si_pid = foundEntry.thread;
2624 	_info.si_uid = foundEntry.signaling_user;
2625 	// TODO: Fill in si_errno?
2626 
2627 	switch (foundEntry.state) {
2628 		case JOB_CONTROL_STATE_DEAD:
2629 			_info.si_code = foundEntry.reason;
2630 			_info.si_status = foundEntry.reason == CLD_EXITED
2631 				? foundEntry.status : foundEntry.signal;
2632 			_usage_info.user_time = foundEntry.user_time;
2633 			_usage_info.kernel_time = foundEntry.kernel_time;
2634 			break;
2635 		case JOB_CONTROL_STATE_STOPPED:
2636 			_info.si_code = CLD_STOPPED;
2637 			_info.si_status = foundEntry.signal;
2638 			break;
2639 		case JOB_CONTROL_STATE_CONTINUED:
2640 			_info.si_code = CLD_CONTINUED;
2641 			_info.si_status = 0;
2642 			break;
2643 		case JOB_CONTROL_STATE_NONE:
2644 			// can't happen
2645 			break;
2646 	}
2647 
2648 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2649 	// status is available.
2650 	TeamLocker teamLocker(team);
2651 	InterruptsSpinLocker signalLocker(team->signal_lock);
2652 	SpinLocker threadCreationLocker(gThreadCreationLock);
2653 
2654 	if (is_team_signal_blocked(team, SIGCHLD)) {
2655 		if (get_job_control_entry(team, child, flags) == NULL)
2656 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2657 	}
2658 
2659 	threadCreationLocker.Unlock();
2660 	signalLocker.Unlock();
2661 	teamLocker.Unlock();
2662 
2663 	// When the team is dead, the main thread continues to live in the kernel
2664 	// team for a very short time. To avoid surprises for the caller we rather
2665 	// wait until the thread is really gone.
2666 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2667 		wait_for_thread(foundEntry.thread, NULL);
2668 
2669 	T(WaitForChildDone(foundEntry));
2670 
2671 	return foundEntry.thread;
2672 }
2673 
2674 
2675 /*! Fills the team_info structure with information from the specified team.
2676 	Interrupts must be enabled. The team must not be locked.
2677 */
2678 static status_t
2679 fill_team_info(Team* team, team_info* info, size_t size)
2680 {
2681 	if (size > sizeof(team_info))
2682 		return B_BAD_VALUE;
2683 
2684 	// TODO: Set more informations for team_info
2685 	memset(info, 0, size);
2686 
2687 	info->team = team->id;
2688 		// immutable
2689 	info->image_count = count_images(team);
2690 		// protected by sImageMutex
2691 
2692 	TeamLocker teamLocker(team);
2693 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2694 
2695 	info->thread_count = team->num_threads;
2696 	//info->area_count =
2697 	info->debugger_nub_thread = team->debug_info.nub_thread;
2698 	info->debugger_nub_port = team->debug_info.nub_port;
2699 	info->uid = team->effective_uid;
2700 	info->gid = team->effective_gid;
2701 
2702 	strlcpy(info->args, team->Args(), sizeof(info->args));
2703 	info->argc = 1;
2704 
2705 	if (size > offsetof(team_info, real_uid)) {
2706 		info->real_uid = team->real_uid;
2707 		info->real_gid = team->real_gid;
2708 		info->group_id = team->group_id;
2709 		info->session_id = team->session_id;
2710 
2711 		if (team->parent != NULL)
2712 			info->parent = team->parent->id;
2713 		else
2714 			info->parent = -1;
2715 
2716 		strlcpy(info->name, team->Name(), sizeof(info->name));
2717 		info->start_time = team->start_time;
2718 	}
2719 
2720 	return B_OK;
2721 }
2722 
2723 
2724 /*!	Returns whether the process group contains stopped processes.
2725 	The caller must hold the process group's lock.
2726 */
2727 static bool
2728 process_group_has_stopped_processes(ProcessGroup* group)
2729 {
2730 	Team* team = group->teams;
2731 	while (team != NULL) {
2732 		// the parent team's lock guards the job control entry -- acquire it
2733 		team->LockTeamAndParent(false);
2734 
2735 		if (team->job_control_entry != NULL
2736 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2737 			team->UnlockTeamAndParent();
2738 			return true;
2739 		}
2740 
2741 		team->UnlockTeamAndParent();
2742 
2743 		team = team->group_next;
2744 	}
2745 
2746 	return false;
2747 }
2748 
2749 
2750 /*!	Iterates through all process groups queued in team_remove_team() and signals
2751 	those that are orphaned and have stopped processes.
2752 	The caller must not hold any team or process group locks.
2753 */
2754 static void
2755 orphaned_process_group_check()
2756 {
2757 	// process as long as there are groups in the list
2758 	while (true) {
2759 		// remove the head from the list
2760 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2761 
2762 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2763 		if (group == NULL)
2764 			return;
2765 
2766 		group->UnsetOrphanedCheck();
2767 		BReference<ProcessGroup> groupReference(group);
2768 
2769 		orphanedCheckLocker.Unlock();
2770 
2771 		AutoLocker<ProcessGroup> groupLocker(group);
2772 
2773 		// If the group is orphaned and contains stopped processes, we're
2774 		// supposed to send SIGHUP + SIGCONT.
2775 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2776 			Thread* currentThread = thread_get_current_thread();
2777 
2778 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2779 			send_signal_to_process_group_locked(group, signal, 0);
2780 
2781 			signal.SetNumber(SIGCONT);
2782 			send_signal_to_process_group_locked(group, signal, 0);
2783 		}
2784 	}
2785 }
2786 
2787 
2788 static status_t
2789 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2790 	uint32 flags)
2791 {
2792 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2793 		return B_BAD_VALUE;
2794 
2795 	// get the team
2796 	Team* team = Team::GetAndLock(id);
2797 	if (team == NULL)
2798 		return B_BAD_TEAM_ID;
2799 	BReference<Team> teamReference(team, true);
2800 	TeamLocker teamLocker(team, true);
2801 
2802 	if ((flags & B_CHECK_PERMISSION) != 0) {
2803 		uid_t uid = geteuid();
2804 		if (uid != 0 && uid != team->effective_uid)
2805 			return B_NOT_ALLOWED;
2806 	}
2807 
2808 	bigtime_t kernelTime = 0;
2809 	bigtime_t userTime = 0;
2810 
2811 	switch (who) {
2812 		case B_TEAM_USAGE_SELF:
2813 		{
2814 			Thread* thread = team->thread_list;
2815 
2816 			for (; thread != NULL; thread = thread->team_next) {
2817 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2818 				kernelTime += thread->kernel_time;
2819 				userTime += thread->user_time;
2820 			}
2821 
2822 			kernelTime += team->dead_threads_kernel_time;
2823 			userTime += team->dead_threads_user_time;
2824 			break;
2825 		}
2826 
2827 		case B_TEAM_USAGE_CHILDREN:
2828 		{
2829 			Team* child = team->children;
2830 			for (; child != NULL; child = child->siblings_next) {
2831 				TeamLocker childLocker(child);
2832 
2833 				Thread* thread = team->thread_list;
2834 
2835 				for (; thread != NULL; thread = thread->team_next) {
2836 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2837 					kernelTime += thread->kernel_time;
2838 					userTime += thread->user_time;
2839 				}
2840 
2841 				kernelTime += child->dead_threads_kernel_time;
2842 				userTime += child->dead_threads_user_time;
2843 			}
2844 
2845 			kernelTime += team->dead_children.kernel_time;
2846 			userTime += team->dead_children.user_time;
2847 			break;
2848 		}
2849 	}
2850 
2851 	info->kernel_time = kernelTime;
2852 	info->user_time = userTime;
2853 
2854 	return B_OK;
2855 }
2856 
2857 
2858 //	#pragma mark - Private kernel API
2859 
2860 
2861 status_t
2862 team_init(kernel_args* args)
2863 {
2864 	// create the team hash table
2865 	new(&sTeamHash) TeamTable;
2866 	if (sTeamHash.Init(64) != B_OK)
2867 		panic("Failed to init team hash table!");
2868 
2869 	new(&sGroupHash) ProcessGroupHashTable;
2870 	if (sGroupHash.Init() != B_OK)
2871 		panic("Failed to init process group hash table!");
2872 
2873 	// create initial session and process groups
2874 
2875 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2876 	if (session == NULL)
2877 		panic("Could not create initial session.\n");
2878 	BReference<ProcessSession> sessionReference(session, true);
2879 
2880 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2881 	if (group == NULL)
2882 		panic("Could not create initial process group.\n");
2883 	BReference<ProcessGroup> groupReference(group, true);
2884 
2885 	group->Publish(session);
2886 
2887 	// create the kernel team
2888 	sKernelTeam = Team::Create(1, "kernel_team", true);
2889 	if (sKernelTeam == NULL)
2890 		panic("could not create kernel team!\n");
2891 
2892 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2893 	sKernelTeam->SetArgs(sKernelTeam->Name());
2894 	sKernelTeam->state = TEAM_STATE_NORMAL;
2895 
2896 	sKernelTeam->saved_set_uid = 0;
2897 	sKernelTeam->real_uid = 0;
2898 	sKernelTeam->effective_uid = 0;
2899 	sKernelTeam->saved_set_gid = 0;
2900 	sKernelTeam->real_gid = 0;
2901 	sKernelTeam->effective_gid = 0;
2902 	sKernelTeam->supplementary_groups = NULL;
2903 
2904 	insert_team_into_group(group, sKernelTeam);
2905 
2906 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2907 	if (sKernelTeam->io_context == NULL)
2908 		panic("could not create io_context for kernel team!\n");
2909 
2910 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2911 		dprintf("Failed to resize FD table for kernel team!\n");
2912 
2913 	// stick it in the team hash
2914 	sTeamHash.Insert(sKernelTeam);
2915 
2916 	// check safe mode settings
2917 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2918 		false);
2919 
2920 	add_debugger_command_etc("team", &dump_team_info,
2921 		"Dump info about a particular team",
2922 		"[ <id> | <address> | <name> ]\n"
2923 		"Prints information about the specified team. If no argument is given\n"
2924 		"the current team is selected.\n"
2925 		"  <id>       - The ID of the team.\n"
2926 		"  <address>  - The address of the team structure.\n"
2927 		"  <name>     - The team's name.\n", 0);
2928 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2929 		"\n"
2930 		"Prints a list of all existing teams.\n", 0);
2931 
2932 	new(&sNotificationService) TeamNotificationService();
2933 
2934 	sNotificationService.Register();
2935 
2936 	return B_OK;
2937 }
2938 
2939 
2940 int32
2941 team_max_teams(void)
2942 {
2943 	return sMaxTeams;
2944 }
2945 
2946 
2947 int32
2948 team_used_teams(void)
2949 {
2950 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2951 	return sUsedTeams;
2952 }
2953 
2954 
2955 /*! Returns a death entry of a child team specified by ID (if any).
2956 	The caller must hold the team's lock.
2957 
2958 	\param team The team whose dead children list to check.
2959 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2960 	\param _deleteEntry Return variable, indicating whether the caller needs to
2961 		delete the returned entry.
2962 	\return The death entry of the matching team, or \c NULL, if no death entry
2963 		for the team was found.
2964 */
2965 job_control_entry*
2966 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2967 {
2968 	if (child <= 0)
2969 		return NULL;
2970 
2971 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2972 		child);
2973 	if (entry) {
2974 		// remove the entry only, if the caller is the parent of the found team
2975 		if (team_get_current_team_id() == entry->thread) {
2976 			team->dead_children.entries.Remove(entry);
2977 			team->dead_children.count--;
2978 			*_deleteEntry = true;
2979 		} else {
2980 			*_deleteEntry = false;
2981 		}
2982 	}
2983 
2984 	return entry;
2985 }
2986 
2987 
2988 /*! Quick check to see if we have a valid team ID. */
2989 bool
2990 team_is_valid(team_id id)
2991 {
2992 	if (id <= 0)
2993 		return false;
2994 
2995 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2996 	return team_get_team_struct_locked(id) != NULL;
2997 }
2998 
2999 
3000 Team*
3001 team_get_team_struct_locked(team_id id)
3002 {
3003 	return sTeamHash.Lookup(id);
3004 }
3005 
3006 
3007 void
3008 team_set_controlling_tty(void* tty)
3009 {
3010 	// lock the team, so its session won't change while we're playing with it
3011 	Team* team = thread_get_current_thread()->team;
3012 	TeamLocker teamLocker(team);
3013 
3014 	// get and lock the session
3015 	ProcessSession* session = team->group->Session();
3016 	AutoLocker<ProcessSession> sessionLocker(session);
3017 
3018 	// set the session's fields
3019 	session->controlling_tty = tty;
3020 	session->foreground_group = -1;
3021 }
3022 
3023 
3024 void*
3025 team_get_controlling_tty()
3026 {
3027 	// lock the team, so its session won't change while we're playing with it
3028 	Team* team = thread_get_current_thread()->team;
3029 	TeamLocker teamLocker(team);
3030 
3031 	// get and lock the session
3032 	ProcessSession* session = team->group->Session();
3033 	AutoLocker<ProcessSession> sessionLocker(session);
3034 
3035 	// get the session's field
3036 	return session->controlling_tty;
3037 }
3038 
3039 
3040 status_t
3041 team_set_foreground_process_group(void* tty, pid_t processGroupID)
3042 {
3043 	// lock the team, so its session won't change while we're playing with it
3044 	Thread* thread = thread_get_current_thread();
3045 	Team* team = thread->team;
3046 	TeamLocker teamLocker(team);
3047 
3048 	// get and lock the session
3049 	ProcessSession* session = team->group->Session();
3050 	AutoLocker<ProcessSession> sessionLocker(session);
3051 
3052 	// check given TTY -- must be the controlling tty of the calling process
3053 	if (session->controlling_tty != tty)
3054 		return ENOTTY;
3055 
3056 	// check given process group -- must belong to our session
3057 	{
3058 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3059 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3060 		if (group == NULL || group->Session() != session)
3061 			return B_BAD_VALUE;
3062 	}
3063 
3064 	// If we are a background group, we can do that unharmed only when we
3065 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3066 	if (session->foreground_group != -1
3067 		&& session->foreground_group != team->group_id
3068 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3069 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3070 		InterruptsSpinLocker signalLocker(team->signal_lock);
3071 
3072 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3073 			pid_t groupID = team->group_id;
3074 
3075 			signalLocker.Unlock();
3076 			sessionLocker.Unlock();
3077 			teamLocker.Unlock();
3078 
3079 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3080 			send_signal_to_process_group(groupID, signal, 0);
3081 			return B_INTERRUPTED;
3082 		}
3083 	}
3084 
3085 	session->foreground_group = processGroupID;
3086 
3087 	return B_OK;
3088 }
3089 
3090 
3091 uid_t
3092 team_geteuid(team_id id)
3093 {
3094 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3095 	Team* team = team_get_team_struct_locked(id);
3096 	if (team == NULL)
3097 		return (uid_t)-1;
3098 	return team->effective_uid;
3099 }
3100 
3101 
3102 /*!	Removes the specified team from the global team hash, from its process
3103 	group, and from its parent.
3104 	It also moves all of its children to the kernel team.
3105 
3106 	The caller must hold the following locks:
3107 	- \a team's process group's lock,
3108 	- the kernel team's lock,
3109 	- \a team's parent team's lock (might be the kernel team), and
3110 	- \a team's lock.
3111 */
3112 void
3113 team_remove_team(Team* team, pid_t& _signalGroup)
3114 {
3115 	Team* parent = team->parent;
3116 
3117 	// remember how long this team lasted
3118 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3119 		+ team->dead_children.kernel_time;
3120 	parent->dead_children.user_time += team->dead_threads_user_time
3121 		+ team->dead_children.user_time;
3122 
3123 	// remove the team from the hash table
3124 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3125 	sTeamHash.Remove(team);
3126 	sUsedTeams--;
3127 	teamsLocker.Unlock();
3128 
3129 	// The team can no longer be accessed by ID. Navigation to it is still
3130 	// possible from its process group and its parent and children, but that
3131 	// will be rectified shortly.
3132 	team->state = TEAM_STATE_DEATH;
3133 
3134 	// If we're a controlling process (i.e. a session leader with controlling
3135 	// terminal), there's a bit of signalling we have to do. We can't do any of
3136 	// the signaling here due to the bunch of locks we're holding, but we need
3137 	// to determine, whom to signal.
3138 	_signalGroup = -1;
3139 	bool isSessionLeader = false;
3140 	if (team->session_id == team->id
3141 		&& team->group->Session()->controlling_tty != NULL) {
3142 		isSessionLeader = true;
3143 
3144 		ProcessSession* session = team->group->Session();
3145 
3146 		AutoLocker<ProcessSession> sessionLocker(session);
3147 
3148 		session->controlling_tty = NULL;
3149 		_signalGroup = session->foreground_group;
3150 	}
3151 
3152 	// remove us from our process group
3153 	remove_team_from_group(team);
3154 
3155 	// move the team's children to the kernel team
3156 	while (Team* child = team->children) {
3157 		// remove the child from the current team and add it to the kernel team
3158 		TeamLocker childLocker(child);
3159 
3160 		remove_team_from_parent(team, child);
3161 		insert_team_into_parent(sKernelTeam, child);
3162 
3163 		// move job control entries too
3164 		sKernelTeam->stopped_children.entries.MoveFrom(
3165 			&team->stopped_children.entries);
3166 		sKernelTeam->continued_children.entries.MoveFrom(
3167 			&team->continued_children.entries);
3168 
3169 		// If the team was a session leader with controlling terminal,
3170 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3171 		// groups with stopped processes. Due to locking complications we can't
3172 		// do that here, so we only check whether we were a reason for the
3173 		// child's process group not being an orphan and, if so, schedule a
3174 		// later check (cf. orphaned_process_group_check()).
3175 		if (isSessionLeader) {
3176 			ProcessGroup* childGroup = child->group;
3177 			if (childGroup->Session()->id == team->session_id
3178 				&& childGroup->id != team->group_id) {
3179 				childGroup->ScheduleOrphanedCheck();
3180 			}
3181 		}
3182 
3183 		// Note, we don't move the dead children entries. Those will be deleted
3184 		// when the team structure is deleted.
3185 	}
3186 
3187 	// remove us from our parent
3188 	remove_team_from_parent(parent, team);
3189 }
3190 
3191 
3192 /*!	Kills all threads but the main thread of the team and shuts down user
3193 	debugging for it.
3194 	To be called on exit of the team's main thread. No locks must be held.
3195 
3196 	\param team The team in question.
3197 	\return The port of the debugger for the team, -1 if none. To be passed to
3198 		team_delete_team().
3199 */
3200 port_id
3201 team_shutdown_team(Team* team)
3202 {
3203 	ASSERT(thread_get_current_thread() == team->main_thread);
3204 
3205 	TeamLocker teamLocker(team);
3206 
3207 	// Make sure debugging changes won't happen anymore.
3208 	port_id debuggerPort = -1;
3209 	while (true) {
3210 		// If a debugger change is in progress for the team, we'll have to
3211 		// wait until it is done.
3212 		ConditionVariableEntry waitForDebuggerEntry;
3213 		bool waitForDebugger = false;
3214 
3215 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3216 
3217 		if (team->debug_info.debugger_changed_condition != NULL) {
3218 			team->debug_info.debugger_changed_condition->Add(
3219 				&waitForDebuggerEntry);
3220 			waitForDebugger = true;
3221 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3222 			// The team is being debugged. That will stop with the termination
3223 			// of the nub thread. Since we set the team state to death, no one
3224 			// can install a debugger anymore. We fetch the debugger's port to
3225 			// send it a message at the bitter end.
3226 			debuggerPort = team->debug_info.debugger_port;
3227 		}
3228 
3229 		debugInfoLocker.Unlock();
3230 
3231 		if (!waitForDebugger)
3232 			break;
3233 
3234 		// wait for the debugger change to be finished
3235 		teamLocker.Unlock();
3236 
3237 		waitForDebuggerEntry.Wait();
3238 
3239 		teamLocker.Lock();
3240 	}
3241 
3242 	// Mark the team as shutting down. That will prevent new threads from being
3243 	// created and debugger changes from taking place.
3244 	team->state = TEAM_STATE_SHUTDOWN;
3245 
3246 	// delete all timers
3247 	team->DeleteUserTimers(false);
3248 
3249 	// deactivate CPU time user timers for the team
3250 	InterruptsSpinLocker timeLocker(team->time_lock);
3251 
3252 	if (team->HasActiveCPUTimeUserTimers())
3253 		team->DeactivateCPUTimeUserTimers();
3254 
3255 	timeLocker.Unlock();
3256 
3257 	// kill all threads but the main thread
3258 	team_death_entry deathEntry;
3259 	deathEntry.condition.Init(team, "team death");
3260 
3261 	while (true) {
3262 		team->death_entry = &deathEntry;
3263 		deathEntry.remaining_threads = 0;
3264 
3265 		Thread* thread = team->thread_list;
3266 		while (thread != NULL) {
3267 			if (thread != team->main_thread) {
3268 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3269 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3270 				deathEntry.remaining_threads++;
3271 			}
3272 
3273 			thread = thread->team_next;
3274 		}
3275 
3276 		if (deathEntry.remaining_threads == 0)
3277 			break;
3278 
3279 		// there are threads to wait for
3280 		ConditionVariableEntry entry;
3281 		deathEntry.condition.Add(&entry);
3282 
3283 		teamLocker.Unlock();
3284 
3285 		entry.Wait();
3286 
3287 		teamLocker.Lock();
3288 	}
3289 
3290 	team->death_entry = NULL;
3291 
3292 	return debuggerPort;
3293 }
3294 
3295 
3296 /*!	Called on team exit to notify threads waiting on the team and free most
3297 	resources associated with it.
3298 	The caller shouldn't hold any locks.
3299 */
3300 void
3301 team_delete_team(Team* team, port_id debuggerPort)
3302 {
3303 	// Not quite in our job description, but work that has been left by
3304 	// team_remove_team() and that can be done now that we're not holding any
3305 	// locks.
3306 	orphaned_process_group_check();
3307 
3308 	team_id teamID = team->id;
3309 
3310 	ASSERT(team->num_threads == 0);
3311 
3312 	// If someone is waiting for this team to be loaded, but it dies
3313 	// unexpectedly before being done, we need to notify the waiting
3314 	// thread now.
3315 
3316 	TeamLocker teamLocker(team);
3317 
3318 	if (team->loading_info != NULL) {
3319 		// there's indeed someone waiting
3320 		team->loading_info->result = B_ERROR;
3321 
3322 		// wake up the waiting thread
3323 		team->loading_info->condition.NotifyAll();
3324 		team->loading_info = NULL;
3325 	}
3326 
3327 	// notify team watchers
3328 
3329 	{
3330 		// we're not reachable from anyone anymore at this point, so we
3331 		// can safely access the list without any locking
3332 		struct team_watcher* watcher;
3333 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3334 				&team->watcher_list)) != NULL) {
3335 			watcher->hook(teamID, watcher->data);
3336 			free(watcher);
3337 		}
3338 	}
3339 
3340 	teamLocker.Unlock();
3341 
3342 	sNotificationService.Notify(TEAM_REMOVED, team);
3343 
3344 	// free team resources
3345 
3346 	delete_realtime_sem_context(team->realtime_sem_context);
3347 	xsi_sem_undo(team);
3348 	remove_images(team);
3349 	team->address_space->RemoveAndPut();
3350 
3351 	team->ReleaseReference();
3352 
3353 	// notify the debugger, that the team is gone
3354 	user_debug_team_deleted(teamID, debuggerPort);
3355 }
3356 
3357 
3358 Team*
3359 team_get_kernel_team(void)
3360 {
3361 	return sKernelTeam;
3362 }
3363 
3364 
3365 team_id
3366 team_get_kernel_team_id(void)
3367 {
3368 	if (!sKernelTeam)
3369 		return 0;
3370 
3371 	return sKernelTeam->id;
3372 }
3373 
3374 
3375 team_id
3376 team_get_current_team_id(void)
3377 {
3378 	return thread_get_current_thread()->team->id;
3379 }
3380 
3381 
3382 status_t
3383 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3384 {
3385 	if (id == sKernelTeam->id) {
3386 		// we're the kernel team, so we don't have to go through all
3387 		// the hassle (locking and hash lookup)
3388 		*_addressSpace = VMAddressSpace::GetKernel();
3389 		return B_OK;
3390 	}
3391 
3392 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3393 
3394 	Team* team = team_get_team_struct_locked(id);
3395 	if (team == NULL)
3396 		return B_BAD_VALUE;
3397 
3398 	team->address_space->Get();
3399 	*_addressSpace = team->address_space;
3400 	return B_OK;
3401 }
3402 
3403 
3404 /*!	Sets the team's job control state.
3405 	The caller must hold the parent team's lock. Interrupts are allowed to be
3406 	enabled or disabled.
3407 	\a team The team whose job control state shall be set.
3408 	\a newState The new state to be set.
3409 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3410 		the caller is responsible for filling in the following fields of the
3411 		entry before releasing the parent team's lock, unless the new state is
3412 		\c JOB_CONTROL_STATE_NONE:
3413 		- \c signal: The number of the signal causing the state change.
3414 		- \c signaling_user: The real UID of the user sending the signal.
3415 */
3416 void
3417 team_set_job_control_state(Team* team, job_control_state newState,
3418 	Signal* signal)
3419 {
3420 	if (team == NULL || team->job_control_entry == NULL)
3421 		return;
3422 
3423 	// don't touch anything, if the state stays the same or the team is already
3424 	// dead
3425 	job_control_entry* entry = team->job_control_entry;
3426 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3427 		return;
3428 
3429 	T(SetJobControlState(team->id, newState, signal));
3430 
3431 	// remove from the old list
3432 	switch (entry->state) {
3433 		case JOB_CONTROL_STATE_NONE:
3434 			// entry is in no list ATM
3435 			break;
3436 		case JOB_CONTROL_STATE_DEAD:
3437 			// can't get here
3438 			break;
3439 		case JOB_CONTROL_STATE_STOPPED:
3440 			team->parent->stopped_children.entries.Remove(entry);
3441 			break;
3442 		case JOB_CONTROL_STATE_CONTINUED:
3443 			team->parent->continued_children.entries.Remove(entry);
3444 			break;
3445 	}
3446 
3447 	entry->state = newState;
3448 
3449 	if (signal != NULL) {
3450 		entry->signal = signal->Number();
3451 		entry->signaling_user = signal->SendingUser();
3452 	}
3453 
3454 	// add to new list
3455 	team_job_control_children* childList = NULL;
3456 	switch (entry->state) {
3457 		case JOB_CONTROL_STATE_NONE:
3458 			// entry doesn't get into any list
3459 			break;
3460 		case JOB_CONTROL_STATE_DEAD:
3461 			childList = &team->parent->dead_children;
3462 			team->parent->dead_children.count++;
3463 			break;
3464 		case JOB_CONTROL_STATE_STOPPED:
3465 			childList = &team->parent->stopped_children;
3466 			break;
3467 		case JOB_CONTROL_STATE_CONTINUED:
3468 			childList = &team->parent->continued_children;
3469 			break;
3470 	}
3471 
3472 	if (childList != NULL) {
3473 		childList->entries.Add(entry);
3474 		team->parent->dead_children.condition_variable.NotifyAll();
3475 	}
3476 }
3477 
3478 
3479 /*!	Inits the given team's exit information, if not yet initialized, to some
3480 	generic "killed" status.
3481 	The caller must not hold the team's lock. Interrupts must be enabled.
3482 
3483 	\param team The team whose exit info shall be initialized.
3484 */
3485 void
3486 team_init_exit_info_on_error(Team* team)
3487 {
3488 	TeamLocker teamLocker(team);
3489 
3490 	if (!team->exit.initialized) {
3491 		team->exit.reason = CLD_KILLED;
3492 		team->exit.signal = SIGKILL;
3493 		team->exit.signaling_user = geteuid();
3494 		team->exit.status = 0;
3495 		team->exit.initialized = true;
3496 	}
3497 }
3498 
3499 
3500 /*! Adds a hook to the team that is called as soon as this team goes away.
3501 	This call might get public in the future.
3502 */
3503 status_t
3504 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3505 {
3506 	if (hook == NULL || teamID < B_OK)
3507 		return B_BAD_VALUE;
3508 
3509 	// create the watcher object
3510 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3511 	if (watcher == NULL)
3512 		return B_NO_MEMORY;
3513 
3514 	watcher->hook = hook;
3515 	watcher->data = data;
3516 
3517 	// add watcher, if the team isn't already dying
3518 	// get the team
3519 	Team* team = Team::GetAndLock(teamID);
3520 	if (team == NULL) {
3521 		free(watcher);
3522 		return B_BAD_TEAM_ID;
3523 	}
3524 
3525 	list_add_item(&team->watcher_list, watcher);
3526 
3527 	team->UnlockAndReleaseReference();
3528 
3529 	return B_OK;
3530 }
3531 
3532 
3533 status_t
3534 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3535 {
3536 	if (hook == NULL || teamID < 0)
3537 		return B_BAD_VALUE;
3538 
3539 	// get team and remove watcher (if present)
3540 	Team* team = Team::GetAndLock(teamID);
3541 	if (team == NULL)
3542 		return B_BAD_TEAM_ID;
3543 
3544 	// search for watcher
3545 	team_watcher* watcher = NULL;
3546 	while ((watcher = (team_watcher*)list_get_next_item(
3547 			&team->watcher_list, watcher)) != NULL) {
3548 		if (watcher->hook == hook && watcher->data == data) {
3549 			// got it!
3550 			list_remove_item(&team->watcher_list, watcher);
3551 			break;
3552 		}
3553 	}
3554 
3555 	team->UnlockAndReleaseReference();
3556 
3557 	if (watcher == NULL)
3558 		return B_ENTRY_NOT_FOUND;
3559 
3560 	free(watcher);
3561 	return B_OK;
3562 }
3563 
3564 
3565 /*!	Allocates a user_thread structure from the team.
3566 	The team lock must be held, unless the function is called for the team's
3567 	main thread. Interrupts must be enabled.
3568 */
3569 struct user_thread*
3570 team_allocate_user_thread(Team* team)
3571 {
3572 	if (team->user_data == 0)
3573 		return NULL;
3574 
3575 	// take an entry from the free list, if any
3576 	if (struct free_user_thread* entry = team->free_user_threads) {
3577 		user_thread* thread = entry->thread;
3578 		team->free_user_threads = entry->next;
3579 		free(entry);
3580 		return thread;
3581 	}
3582 
3583 	while (true) {
3584 		// enough space left?
3585 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3586 		if (team->user_data_size - team->used_user_data < needed) {
3587 			// try to resize the area
3588 			if (resize_area(team->user_data_area,
3589 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3590 				return NULL;
3591 			}
3592 
3593 			// resized user area successfully -- try to allocate the user_thread
3594 			// again
3595 			team->user_data_size += B_PAGE_SIZE;
3596 			continue;
3597 		}
3598 
3599 		// allocate the user_thread
3600 		user_thread* thread
3601 			= (user_thread*)(team->user_data + team->used_user_data);
3602 		team->used_user_data += needed;
3603 
3604 		return thread;
3605 	}
3606 }
3607 
3608 
3609 /*!	Frees the given user_thread structure.
3610 	The team's lock must not be held. Interrupts must be enabled.
3611 	\param team The team the user thread was allocated from.
3612 	\param userThread The user thread to free.
3613 */
3614 void
3615 team_free_user_thread(Team* team, struct user_thread* userThread)
3616 {
3617 	if (userThread == NULL)
3618 		return;
3619 
3620 	// create a free list entry
3621 	free_user_thread* entry
3622 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3623 	if (entry == NULL) {
3624 		// we have to leak the user thread :-/
3625 		return;
3626 	}
3627 
3628 	// add to free list
3629 	TeamLocker teamLocker(team);
3630 
3631 	entry->thread = userThread;
3632 	entry->next = team->free_user_threads;
3633 	team->free_user_threads = entry;
3634 }
3635 
3636 
3637 //	#pragma mark - Associated data interface
3638 
3639 
3640 AssociatedData::AssociatedData()
3641 	:
3642 	fOwner(NULL)
3643 {
3644 }
3645 
3646 
3647 AssociatedData::~AssociatedData()
3648 {
3649 }
3650 
3651 
3652 void
3653 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3654 {
3655 }
3656 
3657 
3658 AssociatedDataOwner::AssociatedDataOwner()
3659 {
3660 	mutex_init(&fLock, "associated data owner");
3661 }
3662 
3663 
3664 AssociatedDataOwner::~AssociatedDataOwner()
3665 {
3666 	mutex_destroy(&fLock);
3667 }
3668 
3669 
3670 bool
3671 AssociatedDataOwner::AddData(AssociatedData* data)
3672 {
3673 	MutexLocker locker(fLock);
3674 
3675 	if (data->Owner() != NULL)
3676 		return false;
3677 
3678 	data->AcquireReference();
3679 	fList.Add(data);
3680 	data->SetOwner(this);
3681 
3682 	return true;
3683 }
3684 
3685 
3686 bool
3687 AssociatedDataOwner::RemoveData(AssociatedData* data)
3688 {
3689 	MutexLocker locker(fLock);
3690 
3691 	if (data->Owner() != this)
3692 		return false;
3693 
3694 	data->SetOwner(NULL);
3695 	fList.Remove(data);
3696 
3697 	locker.Unlock();
3698 
3699 	data->ReleaseReference();
3700 
3701 	return true;
3702 }
3703 
3704 
3705 void
3706 AssociatedDataOwner::PrepareForDeletion()
3707 {
3708 	MutexLocker locker(fLock);
3709 
3710 	// move all data to a temporary list and unset the owner
3711 	DataList list;
3712 	list.MoveFrom(&fList);
3713 
3714 	for (DataList::Iterator it = list.GetIterator();
3715 		AssociatedData* data = it.Next();) {
3716 		data->SetOwner(NULL);
3717 	}
3718 
3719 	locker.Unlock();
3720 
3721 	// call the notification hooks and release our references
3722 	while (AssociatedData* data = list.RemoveHead()) {
3723 		data->OwnerDeleted(this);
3724 		data->ReleaseReference();
3725 	}
3726 }
3727 
3728 
3729 /*!	Associates data with the current team.
3730 	When the team is deleted, the data object is notified.
3731 	The team acquires a reference to the object.
3732 
3733 	\param data The data object.
3734 	\return \c true on success, \c false otherwise. Fails only when the supplied
3735 		data object is already associated with another owner.
3736 */
3737 bool
3738 team_associate_data(AssociatedData* data)
3739 {
3740 	return thread_get_current_thread()->team->AddData(data);
3741 }
3742 
3743 
3744 /*!	Dissociates data from the current team.
3745 	Balances an earlier call to team_associate_data().
3746 
3747 	\param data The data object.
3748 	\return \c true on success, \c false otherwise. Fails only when the data
3749 		object is not associated with the current team.
3750 */
3751 bool
3752 team_dissociate_data(AssociatedData* data)
3753 {
3754 	return thread_get_current_thread()->team->RemoveData(data);
3755 }
3756 
3757 
3758 //	#pragma mark - Public kernel API
3759 
3760 
3761 thread_id
3762 load_image(int32 argCount, const char** args, const char** env)
3763 {
3764 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3765 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3766 }
3767 
3768 
3769 thread_id
3770 load_image_etc(int32 argCount, const char* const* args,
3771 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3772 {
3773 	// we need to flatten the args and environment
3774 
3775 	if (args == NULL)
3776 		return B_BAD_VALUE;
3777 
3778 	// determine total needed size
3779 	int32 argSize = 0;
3780 	for (int32 i = 0; i < argCount; i++)
3781 		argSize += strlen(args[i]) + 1;
3782 
3783 	int32 envCount = 0;
3784 	int32 envSize = 0;
3785 	while (env != NULL && env[envCount] != NULL)
3786 		envSize += strlen(env[envCount++]) + 1;
3787 
3788 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3789 	if (size > MAX_PROCESS_ARGS_SIZE)
3790 		return B_TOO_MANY_ARGS;
3791 
3792 	// allocate space
3793 	char** flatArgs = (char**)malloc(size);
3794 	if (flatArgs == NULL)
3795 		return B_NO_MEMORY;
3796 
3797 	char** slot = flatArgs;
3798 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3799 
3800 	// copy arguments and environment
3801 	for (int32 i = 0; i < argCount; i++) {
3802 		int32 argSize = strlen(args[i]) + 1;
3803 		memcpy(stringSpace, args[i], argSize);
3804 		*slot++ = stringSpace;
3805 		stringSpace += argSize;
3806 	}
3807 
3808 	*slot++ = NULL;
3809 
3810 	for (int32 i = 0; i < envCount; i++) {
3811 		int32 envSize = strlen(env[i]) + 1;
3812 		memcpy(stringSpace, env[i], envSize);
3813 		*slot++ = stringSpace;
3814 		stringSpace += envSize;
3815 	}
3816 
3817 	*slot++ = NULL;
3818 
3819 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3820 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3821 
3822 	free(flatArgs);
3823 		// load_image_internal() unset our variable if it took over ownership
3824 
3825 	return thread;
3826 }
3827 
3828 
3829 status_t
3830 wait_for_team(team_id id, status_t* _returnCode)
3831 {
3832 	// check whether the team exists
3833 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3834 
3835 	Team* team = team_get_team_struct_locked(id);
3836 	if (team == NULL)
3837 		return B_BAD_TEAM_ID;
3838 
3839 	id = team->id;
3840 
3841 	teamsLocker.Unlock();
3842 
3843 	// wait for the main thread (it has the same ID as the team)
3844 	return wait_for_thread(id, _returnCode);
3845 }
3846 
3847 
3848 status_t
3849 kill_team(team_id id)
3850 {
3851 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3852 
3853 	Team* team = team_get_team_struct_locked(id);
3854 	if (team == NULL)
3855 		return B_BAD_TEAM_ID;
3856 
3857 	id = team->id;
3858 
3859 	teamsLocker.Unlock();
3860 
3861 	if (team == sKernelTeam)
3862 		return B_NOT_ALLOWED;
3863 
3864 	// Just kill the team's main thread (it has same ID as the team). The
3865 	// cleanup code there will take care of the team.
3866 	return kill_thread(id);
3867 }
3868 
3869 
3870 status_t
3871 _get_team_info(team_id id, team_info* info, size_t size)
3872 {
3873 	// get the team
3874 	Team* team = Team::Get(id);
3875 	if (team == NULL)
3876 		return B_BAD_TEAM_ID;
3877 	BReference<Team> teamReference(team, true);
3878 
3879 	// fill in the info
3880 	return fill_team_info(team, info, size);
3881 }
3882 
3883 
3884 status_t
3885 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3886 {
3887 	int32 slot = *cookie;
3888 	if (slot < 1)
3889 		slot = 1;
3890 
3891 	InterruptsReadSpinLocker locker(sTeamHashLock);
3892 
3893 	team_id lastTeamID = peek_next_thread_id();
3894 		// TODO: This is broken, since the id can wrap around!
3895 
3896 	// get next valid team
3897 	Team* team = NULL;
3898 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3899 		slot++;
3900 
3901 	if (team == NULL)
3902 		return B_BAD_TEAM_ID;
3903 
3904 	// get a reference to the team and unlock
3905 	BReference<Team> teamReference(team);
3906 	locker.Unlock();
3907 
3908 	// fill in the info
3909 	*cookie = ++slot;
3910 	return fill_team_info(team, info, size);
3911 }
3912 
3913 
3914 status_t
3915 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3916 {
3917 	if (size != sizeof(team_usage_info))
3918 		return B_BAD_VALUE;
3919 
3920 	return common_get_team_usage_info(id, who, info, 0);
3921 }
3922 
3923 
3924 pid_t
3925 getpid(void)
3926 {
3927 	return thread_get_current_thread()->team->id;
3928 }
3929 
3930 
3931 pid_t
3932 getppid()
3933 {
3934 	return _getppid(0);
3935 }
3936 
3937 
3938 pid_t
3939 getpgid(pid_t id)
3940 {
3941 	if (id < 0) {
3942 		errno = EINVAL;
3943 		return -1;
3944 	}
3945 
3946 	if (id == 0) {
3947 		// get process group of the calling process
3948 		Team* team = thread_get_current_thread()->team;
3949 		TeamLocker teamLocker(team);
3950 		return team->group_id;
3951 	}
3952 
3953 	// get the team
3954 	Team* team = Team::GetAndLock(id);
3955 	if (team == NULL) {
3956 		errno = ESRCH;
3957 		return -1;
3958 	}
3959 
3960 	// get the team's process group ID
3961 	pid_t groupID = team->group_id;
3962 
3963 	team->UnlockAndReleaseReference();
3964 
3965 	return groupID;
3966 }
3967 
3968 
3969 pid_t
3970 getsid(pid_t id)
3971 {
3972 	if (id < 0) {
3973 		errno = EINVAL;
3974 		return -1;
3975 	}
3976 
3977 	if (id == 0) {
3978 		// get session of the calling process
3979 		Team* team = thread_get_current_thread()->team;
3980 		TeamLocker teamLocker(team);
3981 		return team->session_id;
3982 	}
3983 
3984 	// get the team
3985 	Team* team = Team::GetAndLock(id);
3986 	if (team == NULL) {
3987 		errno = ESRCH;
3988 		return -1;
3989 	}
3990 
3991 	// get the team's session ID
3992 	pid_t sessionID = team->session_id;
3993 
3994 	team->UnlockAndReleaseReference();
3995 
3996 	return sessionID;
3997 }
3998 
3999 
4000 //	#pragma mark - User syscalls
4001 
4002 
4003 status_t
4004 _user_exec(const char* userPath, const char* const* userFlatArgs,
4005 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
4006 {
4007 	// NOTE: Since this function normally doesn't return, don't use automatic
4008 	// variables that need destruction in the function scope.
4009 	char path[B_PATH_NAME_LENGTH];
4010 
4011 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
4012 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
4013 		return B_BAD_ADDRESS;
4014 
4015 	// copy and relocate the flat arguments
4016 	char** flatArgs;
4017 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4018 		argCount, envCount, flatArgs);
4019 
4020 	if (error == B_OK) {
4021 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
4022 			envCount, umask);
4023 			// this one only returns in case of error
4024 	}
4025 
4026 	free(flatArgs);
4027 	return error;
4028 }
4029 
4030 
4031 thread_id
4032 _user_fork(void)
4033 {
4034 	return fork_team();
4035 }
4036 
4037 
4038 pid_t
4039 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4040 	team_usage_info* usageInfo)
4041 {
4042 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4043 		return B_BAD_ADDRESS;
4044 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4045 		return B_BAD_ADDRESS;
4046 
4047 	siginfo_t info;
4048 	team_usage_info usage_info;
4049 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4050 	if (foundChild < 0)
4051 		return syscall_restart_handle_post(foundChild);
4052 
4053 	// copy info back to userland
4054 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4055 		return B_BAD_ADDRESS;
4056 	// copy usage_info back to userland
4057 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4058 		sizeof(usage_info)) != B_OK) {
4059 		return B_BAD_ADDRESS;
4060 	}
4061 
4062 	return foundChild;
4063 }
4064 
4065 
4066 pid_t
4067 _user_process_info(pid_t process, int32 which)
4068 {
4069 	pid_t result;
4070 	switch (which) {
4071 		case SESSION_ID:
4072 			result = getsid(process);
4073 			break;
4074 		case GROUP_ID:
4075 			result = getpgid(process);
4076 			break;
4077 		case PARENT_ID:
4078 			result = _getppid(process);
4079 			break;
4080 		default:
4081 			return B_BAD_VALUE;
4082 	}
4083 
4084 	return result >= 0 ? result : errno;
4085 }
4086 
4087 
4088 pid_t
4089 _user_setpgid(pid_t processID, pid_t groupID)
4090 {
4091 	// setpgid() can be called either by the parent of the target process or
4092 	// by the process itself to do one of two things:
4093 	// * Create a new process group with the target process' ID and the target
4094 	//   process as group leader.
4095 	// * Set the target process' process group to an already existing one in the
4096 	//   same session.
4097 
4098 	if (groupID < 0)
4099 		return B_BAD_VALUE;
4100 
4101 	Team* currentTeam = thread_get_current_thread()->team;
4102 	if (processID == 0)
4103 		processID = currentTeam->id;
4104 
4105 	// if the group ID is not specified, use the target process' ID
4106 	if (groupID == 0)
4107 		groupID = processID;
4108 
4109 	// We loop when running into the following race condition: We create a new
4110 	// process group, because there isn't one with that ID yet, but later when
4111 	// trying to publish it, we find that someone else created and published
4112 	// a group with that ID in the meantime. In that case we just restart the
4113 	// whole action.
4114 	while (true) {
4115 		// Look up the process group by ID. If it doesn't exist yet and we are
4116 		// allowed to create a new one, do that.
4117 		ProcessGroup* group = ProcessGroup::Get(groupID);
4118 		bool newGroup = false;
4119 		if (group == NULL) {
4120 			if (groupID != processID)
4121 				return B_NOT_ALLOWED;
4122 
4123 			group = new(std::nothrow) ProcessGroup(groupID);
4124 			if (group == NULL)
4125 				return B_NO_MEMORY;
4126 
4127 			newGroup = true;
4128 		}
4129 		BReference<ProcessGroup> groupReference(group, true);
4130 
4131 		// get the target team
4132 		Team* team = Team::Get(processID);
4133 		if (team == NULL)
4134 			return ESRCH;
4135 		BReference<Team> teamReference(team, true);
4136 
4137 		// lock the new process group and the team's current process group
4138 		while (true) {
4139 			// lock the team's current process group
4140 			team->LockProcessGroup();
4141 
4142 			ProcessGroup* oldGroup = team->group;
4143 			if (oldGroup == NULL) {
4144 				// This can only happen if the team is exiting.
4145 				ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4146 				return ESRCH;
4147 			}
4148 
4149 			if (oldGroup == group) {
4150 				// it's the same as the target group, so just bail out
4151 				oldGroup->Unlock();
4152 				return group->id;
4153 			}
4154 
4155 			oldGroup->AcquireReference();
4156 
4157 			// lock the target process group, if locking order allows it
4158 			if (newGroup || group->id > oldGroup->id) {
4159 				group->Lock();
4160 				break;
4161 			}
4162 
4163 			// try to lock
4164 			if (group->TryLock())
4165 				break;
4166 
4167 			// no dice -- unlock the team's current process group and relock in
4168 			// the correct order
4169 			oldGroup->Unlock();
4170 
4171 			group->Lock();
4172 			oldGroup->Lock();
4173 
4174 			// check whether things are still the same
4175 			TeamLocker teamLocker(team);
4176 			if (team->group == oldGroup)
4177 				break;
4178 
4179 			// something changed -- unlock everything and retry
4180 			teamLocker.Unlock();
4181 			oldGroup->Unlock();
4182 			group->Unlock();
4183 			oldGroup->ReleaseReference();
4184 		}
4185 
4186 		// we now have references and locks of both new and old process group
4187 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4188 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4189 		AutoLocker<ProcessGroup> groupLocker(group, true);
4190 
4191 		// also lock the target team and its parent
4192 		team->LockTeamAndParent(false);
4193 		TeamLocker parentLocker(team->parent, true);
4194 		TeamLocker teamLocker(team, true);
4195 
4196 		// perform the checks
4197 		if (team == currentTeam) {
4198 			// we set our own group
4199 
4200 			// we must not change our process group ID if we're a session leader
4201 			if (is_session_leader(currentTeam))
4202 				return B_NOT_ALLOWED;
4203 		} else {
4204 			// Calling team != target team. The target team must be a child of
4205 			// the calling team and in the same session. (If that's the case it
4206 			// isn't a session leader either.)
4207 			if (team->parent != currentTeam
4208 				|| team->session_id != currentTeam->session_id) {
4209 				return B_NOT_ALLOWED;
4210 			}
4211 
4212 			// The call is also supposed to fail on a child, when the child has
4213 			// already executed exec*() [EACCES].
4214 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4215 				return EACCES;
4216 		}
4217 
4218 		// If we created a new process group, publish it now.
4219 		if (newGroup) {
4220 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4221 			if (sGroupHash.Lookup(groupID)) {
4222 				// A group with the group ID appeared since we first checked.
4223 				// Back to square one.
4224 				continue;
4225 			}
4226 
4227 			group->PublishLocked(team->group->Session());
4228 		} else if (group->Session()->id != team->session_id) {
4229 			// The existing target process group belongs to a different session.
4230 			// That's not allowed.
4231 			return B_NOT_ALLOWED;
4232 		}
4233 
4234 		// Everything is ready -- set the group.
4235 		remove_team_from_group(team);
4236 		insert_team_into_group(group, team);
4237 
4238 		// Changing the process group might have changed the situation for a
4239 		// parent waiting in wait_for_child(). Hence we notify it.
4240 		team->parent->dead_children.condition_variable.NotifyAll();
4241 
4242 		return group->id;
4243 	}
4244 }
4245 
4246 
4247 pid_t
4248 _user_setsid(void)
4249 {
4250 	Team* team = thread_get_current_thread()->team;
4251 
4252 	// create a new process group and session
4253 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4254 	if (group == NULL)
4255 		return B_NO_MEMORY;
4256 	BReference<ProcessGroup> groupReference(group, true);
4257 	AutoLocker<ProcessGroup> groupLocker(group);
4258 
4259 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4260 	if (session == NULL)
4261 		return B_NO_MEMORY;
4262 	BReference<ProcessSession> sessionReference(session, true);
4263 
4264 	// lock the team's current process group, parent, and the team itself
4265 	team->LockTeamParentAndProcessGroup();
4266 	BReference<ProcessGroup> oldGroupReference(team->group);
4267 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4268 	TeamLocker parentLocker(team->parent, true);
4269 	TeamLocker teamLocker(team, true);
4270 
4271 	// the team must not already be a process group leader
4272 	if (is_process_group_leader(team))
4273 		return B_NOT_ALLOWED;
4274 
4275 	// remove the team from the old and add it to the new process group
4276 	remove_team_from_group(team);
4277 	group->Publish(session);
4278 	insert_team_into_group(group, team);
4279 
4280 	// Changing the process group might have changed the situation for a
4281 	// parent waiting in wait_for_child(). Hence we notify it.
4282 	team->parent->dead_children.condition_variable.NotifyAll();
4283 
4284 	return group->id;
4285 }
4286 
4287 
4288 status_t
4289 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4290 {
4291 	status_t returnCode;
4292 	status_t status;
4293 
4294 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4295 		return B_BAD_ADDRESS;
4296 
4297 	status = wait_for_team(id, &returnCode);
4298 	if (status >= B_OK && _userReturnCode != NULL) {
4299 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4300 				!= B_OK)
4301 			return B_BAD_ADDRESS;
4302 		return B_OK;
4303 	}
4304 
4305 	return syscall_restart_handle_post(status);
4306 }
4307 
4308 
4309 thread_id
4310 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4311 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4312 	port_id errorPort, uint32 errorToken)
4313 {
4314 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4315 
4316 	if (argCount < 1)
4317 		return B_BAD_VALUE;
4318 
4319 	// copy and relocate the flat arguments
4320 	char** flatArgs;
4321 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4322 		argCount, envCount, flatArgs);
4323 	if (error != B_OK)
4324 		return error;
4325 
4326 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4327 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4328 		errorToken);
4329 
4330 	free(flatArgs);
4331 		// load_image_internal() unset our variable if it took over ownership
4332 
4333 	return thread;
4334 }
4335 
4336 
4337 void
4338 _user_exit_team(status_t returnValue)
4339 {
4340 	Thread* thread = thread_get_current_thread();
4341 	Team* team = thread->team;
4342 
4343 	// set this thread's exit status
4344 	thread->exit.status = returnValue;
4345 
4346 	// set the team exit status
4347 	TeamLocker teamLocker(team);
4348 
4349 	if (!team->exit.initialized) {
4350 		team->exit.reason = CLD_EXITED;
4351 		team->exit.signal = 0;
4352 		team->exit.signaling_user = 0;
4353 		team->exit.status = returnValue;
4354 		team->exit.initialized = true;
4355 	}
4356 
4357 	teamLocker.Unlock();
4358 
4359 	// Stop the thread, if the team is being debugged and that has been
4360 	// requested.
4361 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4362 		user_debug_stop_thread();
4363 
4364 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4365 	// userland. The signal handling code forwards the signal to the main
4366 	// thread (if that's not already this one), which will take the team down.
4367 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4368 	send_signal_to_thread(thread, signal, 0);
4369 }
4370 
4371 
4372 status_t
4373 _user_kill_team(team_id team)
4374 {
4375 	return kill_team(team);
4376 }
4377 
4378 
4379 status_t
4380 _user_get_team_info(team_id id, team_info* userInfo, size_t size)
4381 {
4382 	status_t status;
4383 	team_info info;
4384 
4385 	if (size > sizeof(team_info))
4386 		return B_BAD_VALUE;
4387 
4388 	if (!IS_USER_ADDRESS(userInfo))
4389 		return B_BAD_ADDRESS;
4390 
4391 	status = _get_team_info(id, &info, size);
4392 	if (status == B_OK) {
4393 		if (user_memcpy(userInfo, &info, size) < B_OK)
4394 			return B_BAD_ADDRESS;
4395 	}
4396 
4397 	return status;
4398 }
4399 
4400 
4401 status_t
4402 _user_get_next_team_info(int32* userCookie, team_info* userInfo, size_t size)
4403 {
4404 	status_t status;
4405 	team_info info;
4406 	int32 cookie;
4407 
4408 	if (size > sizeof(team_info))
4409 		return B_BAD_VALUE;
4410 
4411 	if (!IS_USER_ADDRESS(userCookie)
4412 		|| !IS_USER_ADDRESS(userInfo)
4413 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4414 		return B_BAD_ADDRESS;
4415 
4416 	status = _get_next_team_info(&cookie, &info, size);
4417 	if (status != B_OK)
4418 		return status;
4419 
4420 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4421 		|| user_memcpy(userInfo, &info, size) < B_OK)
4422 		return B_BAD_ADDRESS;
4423 
4424 	return status;
4425 }
4426 
4427 
4428 team_id
4429 _user_get_current_team(void)
4430 {
4431 	return team_get_current_team_id();
4432 }
4433 
4434 
4435 status_t
4436 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4437 	size_t size)
4438 {
4439 	if (size != sizeof(team_usage_info))
4440 		return B_BAD_VALUE;
4441 
4442 	team_usage_info info;
4443 	status_t status = common_get_team_usage_info(team, who, &info,
4444 		B_CHECK_PERMISSION);
4445 
4446 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4447 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4448 		return B_BAD_ADDRESS;
4449 	}
4450 
4451 	return status;
4452 }
4453 
4454 
4455 status_t
4456 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4457 	size_t size, size_t* _sizeNeeded)
4458 {
4459 	// check parameters
4460 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4461 		|| (buffer == NULL && size > 0)
4462 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4463 		return B_BAD_ADDRESS;
4464 	}
4465 
4466 	KMessage info;
4467 
4468 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4469 		// allocate memory for a copy of the needed team data
4470 		struct ExtendedTeamData {
4471 			team_id	id;
4472 			pid_t	group_id;
4473 			pid_t	session_id;
4474 			uid_t	real_uid;
4475 			gid_t	real_gid;
4476 			uid_t	effective_uid;
4477 			gid_t	effective_gid;
4478 			char	name[B_OS_NAME_LENGTH];
4479 		} teamClone;
4480 
4481 		io_context* ioContext;
4482 		{
4483 			// get the team structure
4484 			Team* team = Team::GetAndLock(teamID);
4485 			if (team == NULL)
4486 				return B_BAD_TEAM_ID;
4487 			BReference<Team> teamReference(team, true);
4488 			TeamLocker teamLocker(team, true);
4489 
4490 			// copy the data
4491 			teamClone.id = team->id;
4492 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4493 			teamClone.group_id = team->group_id;
4494 			teamClone.session_id = team->session_id;
4495 			teamClone.real_uid = team->real_uid;
4496 			teamClone.real_gid = team->real_gid;
4497 			teamClone.effective_uid = team->effective_uid;
4498 			teamClone.effective_gid = team->effective_gid;
4499 
4500 			// also fetch a reference to the I/O context
4501 			ioContext = team->io_context;
4502 			vfs_get_io_context(ioContext);
4503 		}
4504 		CObjectDeleter<io_context, void, vfs_put_io_context>
4505 			ioContextPutter(ioContext);
4506 
4507 		// add the basic data to the info message
4508 		if (info.AddInt32("id", teamClone.id) != B_OK
4509 			|| info.AddString("name", teamClone.name) != B_OK
4510 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4511 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4512 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4513 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4514 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4515 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4516 			return B_NO_MEMORY;
4517 		}
4518 
4519 		// get the current working directory from the I/O context
4520 		dev_t cwdDevice;
4521 		ino_t cwdDirectory;
4522 		{
4523 			MutexLocker ioContextLocker(ioContext->io_mutex);
4524 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4525 		}
4526 
4527 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4528 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4529 			return B_NO_MEMORY;
4530 		}
4531 	}
4532 
4533 	// TODO: Support the other flags!
4534 
4535 	// copy the needed size and, if it fits, the message back to userland
4536 	size_t sizeNeeded = info.ContentSize();
4537 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4538 		return B_BAD_ADDRESS;
4539 
4540 	if (sizeNeeded > size)
4541 		return B_BUFFER_OVERFLOW;
4542 
4543 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4544 		return B_BAD_ADDRESS;
4545 
4546 	return B_OK;
4547 }
4548