xref: /haiku/src/system/kernel/team.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_mutex.h>
55 #include <user_runtime.h>
56 #include <user_thread.h>
57 #include <usergroup.h>
58 #include <vfs.h>
59 #include <vm/vm.h>
60 #include <vm/VMAddressSpace.h>
61 #include <util/AutoLock.h>
62 #include <util/ThreadAutoLock.h>
63 
64 #include "TeamThreadTables.h"
65 
66 
67 //#define TRACE_TEAM
68 #ifdef TRACE_TEAM
69 #	define TRACE(x) dprintf x
70 #else
71 #	define TRACE(x) ;
72 #endif
73 
74 
75 struct team_key {
76 	team_id id;
77 };
78 
79 struct team_arg {
80 	char	*path;
81 	char	**flat_args;
82 	size_t	flat_args_size;
83 	uint32	arg_count;
84 	uint32	env_count;
85 	mode_t	umask;
86 	uint32	flags;
87 	port_id	error_port;
88 	uint32	error_token;
89 };
90 
91 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
92 
93 
94 namespace {
95 
96 
97 class TeamNotificationService : public DefaultNotificationService {
98 public:
99 							TeamNotificationService();
100 
101 			void			Notify(uint32 eventCode, Team* team);
102 };
103 
104 
105 // #pragma mark - TeamTable
106 
107 
108 typedef BKernel::TeamThreadTable<Team> TeamTable;
109 
110 
111 // #pragma mark - ProcessGroupHashDefinition
112 
113 
114 struct ProcessGroupHashDefinition {
115 	typedef pid_t			KeyType;
116 	typedef	ProcessGroup	ValueType;
117 
118 	size_t HashKey(pid_t key) const
119 	{
120 		return key;
121 	}
122 
123 	size_t Hash(ProcessGroup* value) const
124 	{
125 		return HashKey(value->id);
126 	}
127 
128 	bool Compare(pid_t key, ProcessGroup* value) const
129 	{
130 		return value->id == key;
131 	}
132 
133 	ProcessGroup*& GetLink(ProcessGroup* value) const
134 	{
135 		return value->next;
136 	}
137 };
138 
139 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
140 
141 
142 }	// unnamed namespace
143 
144 
145 // #pragma mark -
146 
147 
148 // the team_id -> Team hash table and the lock protecting it
149 static TeamTable sTeamHash;
150 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
151 
152 // the pid_t -> ProcessGroup hash table and the lock protecting it
153 static ProcessGroupHashTable sGroupHash;
154 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
155 
156 static Team* sKernelTeam = NULL;
157 static bool sDisableUserAddOns = false;
158 
159 // A list of process groups of children of dying session leaders that need to
160 // be signalled, if they have become orphaned and contain stopped processes.
161 static ProcessGroupList sOrphanedCheckProcessGroups;
162 static mutex sOrphanedCheckLock
163 	= MUTEX_INITIALIZER("orphaned process group check");
164 
165 // some arbitrarily chosen limits -- should probably depend on the available
166 // memory (the limit is not yet enforced)
167 static int32 sMaxTeams = 2048;
168 static int32 sUsedTeams = 1;
169 
170 static TeamNotificationService sNotificationService;
171 
172 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
173 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
174 
175 
176 // #pragma mark - TeamListIterator
177 
178 
179 TeamListIterator::TeamListIterator()
180 {
181 	// queue the entry
182 	InterruptsWriteSpinLocker locker(sTeamHashLock);
183 	sTeamHash.InsertIteratorEntry(&fEntry);
184 }
185 
186 
187 TeamListIterator::~TeamListIterator()
188 {
189 	// remove the entry
190 	InterruptsWriteSpinLocker locker(sTeamHashLock);
191 	sTeamHash.RemoveIteratorEntry(&fEntry);
192 }
193 
194 
195 Team*
196 TeamListIterator::Next()
197 {
198 	// get the next team -- if there is one, get reference for it
199 	InterruptsWriteSpinLocker locker(sTeamHashLock);
200 	Team* team = sTeamHash.NextElement(&fEntry);
201 	if (team != NULL)
202 		team->AcquireReference();
203 
204 	return team;
205 }
206 
207 
208 // #pragma mark - Tracing
209 
210 
211 #if TEAM_TRACING
212 namespace TeamTracing {
213 
214 class TeamForked : public AbstractTraceEntry {
215 public:
216 	TeamForked(thread_id forkedThread)
217 		:
218 		fForkedThread(forkedThread)
219 	{
220 		Initialized();
221 	}
222 
223 	virtual void AddDump(TraceOutput& out)
224 	{
225 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
226 	}
227 
228 private:
229 	thread_id			fForkedThread;
230 };
231 
232 
233 class ExecTeam : public AbstractTraceEntry {
234 public:
235 	ExecTeam(const char* path, int32 argCount, const char* const* args,
236 			int32 envCount, const char* const* env)
237 		:
238 		fArgCount(argCount),
239 		fArgs(NULL)
240 	{
241 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
242 			false);
243 
244 		// determine the buffer size we need for the args
245 		size_t argBufferSize = 0;
246 		for (int32 i = 0; i < argCount; i++)
247 			argBufferSize += strlen(args[i]) + 1;
248 
249 		// allocate a buffer
250 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
251 		if (fArgs) {
252 			char* buffer = fArgs;
253 			for (int32 i = 0; i < argCount; i++) {
254 				size_t argSize = strlen(args[i]) + 1;
255 				memcpy(buffer, args[i], argSize);
256 				buffer += argSize;
257 			}
258 		}
259 
260 		// ignore env for the time being
261 		(void)envCount;
262 		(void)env;
263 
264 		Initialized();
265 	}
266 
267 	virtual void AddDump(TraceOutput& out)
268 	{
269 		out.Print("team exec, \"%p\", args:", fPath);
270 
271 		if (fArgs != NULL) {
272 			char* args = fArgs;
273 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
274 				out.Print(" \"%s\"", args);
275 				args += strlen(args) + 1;
276 			}
277 		} else
278 			out.Print(" <too long>");
279 	}
280 
281 private:
282 	char*	fPath;
283 	int32	fArgCount;
284 	char*	fArgs;
285 };
286 
287 
288 static const char*
289 job_control_state_name(job_control_state state)
290 {
291 	switch (state) {
292 		case JOB_CONTROL_STATE_NONE:
293 			return "none";
294 		case JOB_CONTROL_STATE_STOPPED:
295 			return "stopped";
296 		case JOB_CONTROL_STATE_CONTINUED:
297 			return "continued";
298 		case JOB_CONTROL_STATE_DEAD:
299 			return "dead";
300 		default:
301 			return "invalid";
302 	}
303 }
304 
305 
306 class SetJobControlState : public AbstractTraceEntry {
307 public:
308 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
309 		:
310 		fTeam(team),
311 		fNewState(newState),
312 		fSignal(signal != NULL ? signal->Number() : 0)
313 	{
314 		Initialized();
315 	}
316 
317 	virtual void AddDump(TraceOutput& out)
318 	{
319 		out.Print("team set job control state, team %" B_PRId32 ", "
320 			"new state: %s, signal: %d",
321 			fTeam, job_control_state_name(fNewState), fSignal);
322 	}
323 
324 private:
325 	team_id				fTeam;
326 	job_control_state	fNewState;
327 	int					fSignal;
328 };
329 
330 
331 class WaitForChild : public AbstractTraceEntry {
332 public:
333 	WaitForChild(pid_t child, uint32 flags)
334 		:
335 		fChild(child),
336 		fFlags(flags)
337 	{
338 		Initialized();
339 	}
340 
341 	virtual void AddDump(TraceOutput& out)
342 	{
343 		out.Print("team wait for child, child: %" B_PRId32 ", "
344 			"flags: %#" B_PRIx32, fChild, fFlags);
345 	}
346 
347 private:
348 	pid_t	fChild;
349 	uint32	fFlags;
350 };
351 
352 
353 class WaitForChildDone : public AbstractTraceEntry {
354 public:
355 	WaitForChildDone(const job_control_entry& entry)
356 		:
357 		fState(entry.state),
358 		fTeam(entry.thread),
359 		fStatus(entry.status),
360 		fReason(entry.reason),
361 		fSignal(entry.signal)
362 	{
363 		Initialized();
364 	}
365 
366 	WaitForChildDone(status_t error)
367 		:
368 		fTeam(error)
369 	{
370 		Initialized();
371 	}
372 
373 	virtual void AddDump(TraceOutput& out)
374 	{
375 		if (fTeam >= 0) {
376 			out.Print("team wait for child done, team: %" B_PRId32 ", "
377 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
378 				fTeam, job_control_state_name(fState), fStatus, fReason,
379 				fSignal);
380 		} else {
381 			out.Print("team wait for child failed, error: "
382 				"%#" B_PRIx32 ", ", fTeam);
383 		}
384 	}
385 
386 private:
387 	job_control_state	fState;
388 	team_id				fTeam;
389 	status_t			fStatus;
390 	uint16				fReason;
391 	uint16				fSignal;
392 };
393 
394 }	// namespace TeamTracing
395 
396 #	define T(x) new(std::nothrow) TeamTracing::x;
397 #else
398 #	define T(x) ;
399 #endif
400 
401 
402 //	#pragma mark - TeamNotificationService
403 
404 
405 TeamNotificationService::TeamNotificationService()
406 	: DefaultNotificationService("teams")
407 {
408 }
409 
410 
411 void
412 TeamNotificationService::Notify(uint32 eventCode, Team* team)
413 {
414 	char eventBuffer[128];
415 	KMessage event;
416 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
417 	event.AddInt32("event", eventCode);
418 	event.AddInt32("team", team->id);
419 	event.AddPointer("teamStruct", team);
420 
421 	DefaultNotificationService::Notify(event, eventCode);
422 }
423 
424 
425 //	#pragma mark - Team
426 
427 
428 Team::Team(team_id id, bool kernel)
429 {
430 	// allocate an ID
431 	this->id = id;
432 	visible = true;
433 
434 	hash_next = siblings_next = parent = children = group_next = NULL;
435 	serial_number = -1;
436 
437 	group_id = session_id = -1;
438 	group = NULL;
439 
440 	num_threads = 0;
441 	state = TEAM_STATE_BIRTH;
442 	flags = 0;
443 	io_context = NULL;
444 	user_mutex_context = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	death_entry = NULL;
448 	list_init(&dead_threads);
449 
450 	dead_children.condition_variable.Init(&dead_children, "team children");
451 	dead_children.count = 0;
452 	dead_children.kernel_time = 0;
453 	dead_children.user_time = 0;
454 
455 	job_control_entry = new(nothrow) ::job_control_entry;
456 	if (job_control_entry != NULL) {
457 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
458 		job_control_entry->thread = id;
459 		job_control_entry->team = this;
460 	}
461 
462 	address_space = NULL;
463 	main_thread = NULL;
464 	thread_list = NULL;
465 	loading_info = NULL;
466 
467 	list_init(&image_list);
468 	list_init(&watcher_list);
469 	list_init(&sem_list);
470 	list_init_etc(&port_list, port_team_link_offset());
471 
472 	user_data = 0;
473 	user_data_area = -1;
474 	used_user_data = 0;
475 	user_data_size = 0;
476 	free_user_threads = NULL;
477 
478 	commpage_address = NULL;
479 
480 	clear_team_debug_info(&debug_info, true);
481 
482 	dead_threads_kernel_time = 0;
483 	dead_threads_user_time = 0;
484 	cpu_clock_offset = 0;
485 	B_INITIALIZE_SPINLOCK(&time_lock);
486 
487 	saved_set_uid = real_uid = effective_uid = -1;
488 	saved_set_gid = real_gid = effective_gid = -1;
489 
490 	// exit status -- setting initialized to false suffices
491 	exit.initialized = false;
492 
493 	B_INITIALIZE_SPINLOCK(&signal_lock);
494 
495 	// init mutex
496 	if (kernel) {
497 		mutex_init(&fLock, "Team:kernel");
498 	} else {
499 		char lockName[16];
500 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
501 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
502 	}
503 
504 	fName[0] = '\0';
505 	fArgs[0] = '\0';
506 
507 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
508 		kernel ? -1 : MAX_QUEUED_SIGNALS);
509 	memset(fSignalActions, 0, sizeof(fSignalActions));
510 	fUserDefinedTimerCount = 0;
511 
512 	fCoreDumpCondition = NULL;
513 }
514 
515 
516 Team::~Team()
517 {
518 	// get rid of all associated data
519 	PrepareForDeletion();
520 
521 	if (io_context != NULL)
522 		vfs_put_io_context(io_context);
523 	delete_owned_ports(this);
524 	sem_delete_owned_sems(this);
525 
526 	DeleteUserTimers(false);
527 
528 	fPendingSignals.Clear();
529 
530 	if (fQueuedSignalsCounter != NULL)
531 		fQueuedSignalsCounter->ReleaseReference();
532 
533 	while (thread_death_entry* threadDeathEntry
534 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
535 		free(threadDeathEntry);
536 	}
537 
538 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
539 		delete entry;
540 
541 	while (free_user_thread* entry = free_user_threads) {
542 		free_user_threads = entry->next;
543 		free(entry);
544 	}
545 
546 	delete job_control_entry;
547 		// usually already NULL and transferred to the parent
548 
549 	mutex_destroy(&fLock);
550 }
551 
552 
553 /*static*/ Team*
554 Team::Create(team_id id, const char* name, bool kernel)
555 {
556 	// create the team object
557 	Team* team = new(std::nothrow) Team(id, kernel);
558 	if (team == NULL)
559 		return NULL;
560 	ObjectDeleter<Team> teamDeleter(team);
561 
562 	if (name != NULL)
563 		team->SetName(name);
564 
565 	// check initialization
566 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
567 		return NULL;
568 
569 	// finish initialization (arch specifics)
570 	if (arch_team_init_team_struct(team, kernel) != B_OK)
571 		return NULL;
572 
573 	if (!kernel) {
574 		status_t error = user_timer_create_team_timers(team);
575 		if (error != B_OK)
576 			return NULL;
577 	}
578 
579 	team->start_time = system_time();
580 
581 	// everything went fine
582 	return teamDeleter.Detach();
583 }
584 
585 
586 /*!	\brief Returns the team with the given ID.
587 	Returns a reference to the team.
588 	Team and thread spinlock must not be held.
589 */
590 /*static*/ Team*
591 Team::Get(team_id id)
592 {
593 	if (id == B_CURRENT_TEAM) {
594 		Team* team = thread_get_current_thread()->team;
595 		team->AcquireReference();
596 		return team;
597 	}
598 
599 	InterruptsReadSpinLocker locker(sTeamHashLock);
600 	Team* team = sTeamHash.Lookup(id);
601 	if (team != NULL)
602 		team->AcquireReference();
603 	return team;
604 }
605 
606 
607 /*!	\brief Returns the team with the given ID in a locked state.
608 	Returns a reference to the team.
609 	Team and thread spinlock must not be held.
610 */
611 /*static*/ Team*
612 Team::GetAndLock(team_id id)
613 {
614 	// get the team
615 	Team* team = Get(id);
616 	if (team == NULL)
617 		return NULL;
618 
619 	// lock it
620 	team->Lock();
621 
622 	// only return the team, when it isn't already dying
623 	if (team->state >= TEAM_STATE_SHUTDOWN) {
624 		team->Unlock();
625 		team->ReleaseReference();
626 		return NULL;
627 	}
628 
629 	return team;
630 }
631 
632 
633 /*!	Locks the team and its parent team (if any).
634 	The caller must hold a reference to the team or otherwise make sure that
635 	it won't be deleted.
636 	If the team doesn't have a parent, only the team itself is locked. If the
637 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
638 	only the team itself is locked.
639 
640 	\param dontLockParentIfKernel If \c true, the team's parent team is only
641 		locked, if it is not the kernel team.
642 */
643 void
644 Team::LockTeamAndParent(bool dontLockParentIfKernel)
645 {
646 	// The locking order is parent -> child. Since the parent can change as long
647 	// as we don't lock the team, we need to do a trial and error loop.
648 	Lock();
649 
650 	while (true) {
651 		// If the team doesn't have a parent, we're done. Otherwise try to lock
652 		// the parent.This will succeed in most cases, simplifying things.
653 		Team* parent = this->parent;
654 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
655 			|| parent->TryLock()) {
656 			return;
657 		}
658 
659 		// get a temporary reference to the parent, unlock this team, lock the
660 		// parent, and re-lock this team
661 		BReference<Team> parentReference(parent);
662 
663 		Unlock();
664 		parent->Lock();
665 		Lock();
666 
667 		// If the parent hasn't changed in the meantime, we're done.
668 		if (this->parent == parent)
669 			return;
670 
671 		// The parent has changed -- unlock and retry.
672 		parent->Unlock();
673 	}
674 }
675 
676 
677 /*!	Unlocks the team and its parent team (if any).
678 */
679 void
680 Team::UnlockTeamAndParent()
681 {
682 	if (parent != NULL)
683 		parent->Unlock();
684 
685 	Unlock();
686 }
687 
688 
689 /*!	Locks the team, its parent team (if any), and the team's process group.
690 	The caller must hold a reference to the team or otherwise make sure that
691 	it won't be deleted.
692 	If the team doesn't have a parent, only the team itself is locked.
693 */
694 void
695 Team::LockTeamParentAndProcessGroup()
696 {
697 	LockTeamAndProcessGroup();
698 
699 	// We hold the group's and the team's lock, but not the parent team's lock.
700 	// If we have a parent, try to lock it.
701 	if (this->parent == NULL || this->parent->TryLock())
702 		return;
703 
704 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
705 	// the job.
706 	Unlock();
707 	LockTeamAndParent(false);
708 }
709 
710 
711 /*!	Unlocks the team, its parent team (if any), and the team's process group.
712 */
713 void
714 Team::UnlockTeamParentAndProcessGroup()
715 {
716 	group->Unlock();
717 
718 	if (parent != NULL)
719 		parent->Unlock();
720 
721 	Unlock();
722 }
723 
724 
725 void
726 Team::LockTeamAndProcessGroup()
727 {
728 	// The locking order is process group -> child. Since the process group can
729 	// change as long as we don't lock the team, we need to do a trial and error
730 	// loop.
731 	Lock();
732 
733 	while (true) {
734 		// Try to lock the group. This will succeed in most cases, simplifying
735 		// things.
736 		ProcessGroup* group = this->group;
737 		if (group == NULL)
738 			return;
739 
740 		if (group->TryLock())
741 			return;
742 
743 		// get a temporary reference to the group, unlock this team, lock the
744 		// group, and re-lock this team
745 		BReference<ProcessGroup> groupReference(group);
746 
747 		Unlock();
748 		group->Lock();
749 		Lock();
750 
751 		// If the group hasn't changed in the meantime, we're done.
752 		if (this->group == group)
753 			return;
754 
755 		// The group has changed -- unlock and retry.
756 		group->Unlock();
757 	}
758 }
759 
760 
761 void
762 Team::UnlockTeamAndProcessGroup()
763 {
764 	group->Unlock();
765 	Unlock();
766 }
767 
768 
769 void
770 Team::SetName(const char* name)
771 {
772 	if (const char* lastSlash = strrchr(name, '/'))
773 		name = lastSlash + 1;
774 
775 	strlcpy(fName, name, B_OS_NAME_LENGTH);
776 }
777 
778 
779 void
780 Team::SetArgs(const char* args)
781 {
782 	strlcpy(fArgs, args, sizeof(fArgs));
783 }
784 
785 
786 void
787 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
788 {
789 	fArgs[0] = '\0';
790 	strlcpy(fArgs, path, sizeof(fArgs));
791 	for (int i = 0; i < otherArgCount; i++) {
792 		strlcat(fArgs, " ", sizeof(fArgs));
793 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
794 	}
795 }
796 
797 
798 void
799 Team::ResetSignalsOnExec()
800 {
801 	// We are supposed to keep pending signals. Signal actions shall be reset
802 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
803 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
804 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
805 	// flags, but since there aren't any handlers, they make little sense, so
806 	// we clear them.
807 
808 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
809 		struct sigaction& action = SignalActionFor(i);
810 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
811 			action.sa_handler = SIG_DFL;
812 
813 		action.sa_mask = 0;
814 		action.sa_flags = 0;
815 		action.sa_userdata = NULL;
816 	}
817 }
818 
819 
820 void
821 Team::InheritSignalActions(Team* parent)
822 {
823 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
824 }
825 
826 
827 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
828 	ID.
829 
830 	The caller must hold the team's lock.
831 
832 	\param timer The timer to be added. If it doesn't have an ID yet, it is
833 		considered user-defined and will be assigned an ID.
834 	\return \c B_OK, if the timer was added successfully, another error code
835 		otherwise.
836 */
837 status_t
838 Team::AddUserTimer(UserTimer* timer)
839 {
840 	// don't allow addition of timers when already shutting the team down
841 	if (state >= TEAM_STATE_SHUTDOWN)
842 		return B_BAD_TEAM_ID;
843 
844 	// If the timer is user-defined, check timer limit and increment
845 	// user-defined count.
846 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
847 		return EAGAIN;
848 
849 	fUserTimers.AddTimer(timer);
850 
851 	return B_OK;
852 }
853 
854 
855 /*!	Removes the given user timer from the team.
856 
857 	The caller must hold the team's lock.
858 
859 	\param timer The timer to be removed.
860 
861 */
862 void
863 Team::RemoveUserTimer(UserTimer* timer)
864 {
865 	fUserTimers.RemoveTimer(timer);
866 
867 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
868 		UserDefinedTimersRemoved(1);
869 }
870 
871 
872 /*!	Deletes all (or all user-defined) user timers of the team.
873 
874 	Timer's belonging to the team's threads are not affected.
875 	The caller must hold the team's lock.
876 
877 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
878 		otherwise all timers are deleted.
879 */
880 void
881 Team::DeleteUserTimers(bool userDefinedOnly)
882 {
883 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
884 	UserDefinedTimersRemoved(count);
885 }
886 
887 
888 /*!	If not at the limit yet, increments the team's user-defined timer count.
889 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
890 */
891 bool
892 Team::CheckAddUserDefinedTimer()
893 {
894 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
895 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
896 		atomic_add(&fUserDefinedTimerCount, -1);
897 		return false;
898 	}
899 
900 	return true;
901 }
902 
903 
904 /*!	Subtracts the given count for the team's user-defined timer count.
905 	\param count The count to subtract.
906 */
907 void
908 Team::UserDefinedTimersRemoved(int32 count)
909 {
910 	atomic_add(&fUserDefinedTimerCount, -count);
911 }
912 
913 
914 void
915 Team::DeactivateCPUTimeUserTimers()
916 {
917 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
918 		timer->Deactivate();
919 
920 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
921 		timer->Deactivate();
922 }
923 
924 
925 /*!	Returns the team's current total CPU time (kernel + user + offset).
926 
927 	The caller must hold \c time_lock.
928 
929 	\param ignoreCurrentRun If \c true and the current thread is one team's
930 		threads, don't add the time since the last time \c last_time was
931 		updated. Should be used in "thread unscheduled" scheduler callbacks,
932 		since although the thread is still running at that time, its time has
933 		already been stopped.
934 	\return The team's current total CPU time.
935 */
936 bigtime_t
937 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
938 {
939 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
940 		+ dead_threads_user_time;
941 
942 	Thread* currentThread = thread_get_current_thread();
943 	bigtime_t now = system_time();
944 
945 	for (Thread* thread = thread_list; thread != NULL;
946 			thread = thread->team_next) {
947 		bool alreadyLocked = thread == lockedThread;
948 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
949 		time += thread->kernel_time + thread->user_time;
950 
951 		if (thread->last_time != 0) {
952 			if (!ignoreCurrentRun || thread != currentThread)
953 				time += now - thread->last_time;
954 		}
955 
956 		if (alreadyLocked)
957 			threadTimeLocker.Detach();
958 	}
959 
960 	return time;
961 }
962 
963 
964 /*!	Returns the team's current user CPU time.
965 
966 	The caller must hold \c time_lock.
967 
968 	\return The team's current user CPU time.
969 */
970 bigtime_t
971 Team::UserCPUTime() const
972 {
973 	bigtime_t time = dead_threads_user_time;
974 
975 	bigtime_t now = system_time();
976 
977 	for (Thread* thread = thread_list; thread != NULL;
978 			thread = thread->team_next) {
979 		SpinLocker threadTimeLocker(thread->time_lock);
980 		time += thread->user_time;
981 
982 		if (thread->last_time != 0 && !thread->in_kernel)
983 			time += now - thread->last_time;
984 	}
985 
986 	return time;
987 }
988 
989 
990 //	#pragma mark - ProcessGroup
991 
992 
993 ProcessGroup::ProcessGroup(pid_t id)
994 	:
995 	id(id),
996 	teams(NULL),
997 	fSession(NULL),
998 	fInOrphanedCheckList(false)
999 {
1000 	char lockName[32];
1001 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
1002 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1003 }
1004 
1005 
1006 ProcessGroup::~ProcessGroup()
1007 {
1008 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1009 
1010 	// If the group is in the orphaned check list, remove it.
1011 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1012 
1013 	if (fInOrphanedCheckList)
1014 		sOrphanedCheckProcessGroups.Remove(this);
1015 
1016 	orphanedCheckLocker.Unlock();
1017 
1018 	// remove group from the hash table and from the session
1019 	if (fSession != NULL) {
1020 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1021 		sGroupHash.RemoveUnchecked(this);
1022 		groupHashLocker.Unlock();
1023 
1024 		fSession->ReleaseReference();
1025 	}
1026 
1027 	mutex_destroy(&fLock);
1028 }
1029 
1030 
1031 /*static*/ ProcessGroup*
1032 ProcessGroup::Get(pid_t id)
1033 {
1034 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1035 	ProcessGroup* group = sGroupHash.Lookup(id);
1036 	if (group != NULL)
1037 		group->AcquireReference();
1038 	return group;
1039 }
1040 
1041 
1042 /*!	Adds the group the given session and makes it publicly accessible.
1043 	The caller must not hold the process group hash lock.
1044 */
1045 void
1046 ProcessGroup::Publish(ProcessSession* session)
1047 {
1048 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1049 	PublishLocked(session);
1050 }
1051 
1052 
1053 /*!	Adds the group to the given session and makes it publicly accessible.
1054 	The caller must hold the process group hash lock.
1055 */
1056 void
1057 ProcessGroup::PublishLocked(ProcessSession* session)
1058 {
1059 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1060 
1061 	fSession = session;
1062 	fSession->AcquireReference();
1063 
1064 	sGroupHash.InsertUnchecked(this);
1065 }
1066 
1067 
1068 /*!	Checks whether the process group is orphaned.
1069 	The caller must hold the group's lock.
1070 	\return \c true, if the group is orphaned, \c false otherwise.
1071 */
1072 bool
1073 ProcessGroup::IsOrphaned() const
1074 {
1075 	// Orphaned Process Group: "A process group in which the parent of every
1076 	// member is either itself a member of the group or is not a member of the
1077 	// group's session." (Open Group Base Specs Issue 7)
1078 	bool orphaned = true;
1079 
1080 	Team* team = teams;
1081 	while (orphaned && team != NULL) {
1082 		team->LockTeamAndParent(false);
1083 
1084 		Team* parent = team->parent;
1085 		if (parent != NULL && parent->group_id != id
1086 			&& parent->session_id == fSession->id) {
1087 			orphaned = false;
1088 		}
1089 
1090 		team->UnlockTeamAndParent();
1091 
1092 		team = team->group_next;
1093 	}
1094 
1095 	return orphaned;
1096 }
1097 
1098 
1099 void
1100 ProcessGroup::ScheduleOrphanedCheck()
1101 {
1102 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1103 
1104 	if (!fInOrphanedCheckList) {
1105 		sOrphanedCheckProcessGroups.Add(this);
1106 		fInOrphanedCheckList = true;
1107 	}
1108 }
1109 
1110 
1111 void
1112 ProcessGroup::UnsetOrphanedCheck()
1113 {
1114 	fInOrphanedCheckList = false;
1115 }
1116 
1117 
1118 //	#pragma mark - ProcessSession
1119 
1120 
1121 ProcessSession::ProcessSession(pid_t id)
1122 	:
1123 	id(id),
1124 	controlling_tty(NULL),
1125 	foreground_group(-1)
1126 {
1127 	char lockName[32];
1128 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1129 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1130 }
1131 
1132 
1133 ProcessSession::~ProcessSession()
1134 {
1135 	mutex_destroy(&fLock);
1136 }
1137 
1138 
1139 //	#pragma mark - KDL functions
1140 
1141 
1142 static void
1143 _dump_team_info(Team* team)
1144 {
1145 	kprintf("TEAM: %p\n", team);
1146 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1147 		team->id);
1148 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1149 	kprintf("name:             '%s'\n", team->Name());
1150 	kprintf("args:             '%s'\n", team->Args());
1151 	kprintf("hash_next:        %p\n", team->hash_next);
1152 	kprintf("parent:           %p", team->parent);
1153 	if (team->parent != NULL) {
1154 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1155 	} else
1156 		kprintf("\n");
1157 
1158 	kprintf("children:         %p\n", team->children);
1159 	kprintf("num_threads:      %d\n", team->num_threads);
1160 	kprintf("state:            %d\n", team->state);
1161 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1162 	kprintf("io_context:       %p\n", team->io_context);
1163 	if (team->address_space)
1164 		kprintf("address_space:    %p\n", team->address_space);
1165 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1166 		(void*)team->user_data, team->user_data_area);
1167 	kprintf("free user thread: %p\n", team->free_user_threads);
1168 	kprintf("main_thread:      %p\n", team->main_thread);
1169 	kprintf("thread_list:      %p\n", team->thread_list);
1170 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1171 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1172 }
1173 
1174 
1175 static int
1176 dump_team_info(int argc, char** argv)
1177 {
1178 	ulong arg;
1179 	bool found = false;
1180 
1181 	if (argc < 2) {
1182 		Thread* thread = thread_get_current_thread();
1183 		if (thread != NULL && thread->team != NULL)
1184 			_dump_team_info(thread->team);
1185 		else
1186 			kprintf("No current team!\n");
1187 		return 0;
1188 	}
1189 
1190 	arg = strtoul(argv[1], NULL, 0);
1191 	if (IS_KERNEL_ADDRESS(arg)) {
1192 		// semi-hack
1193 		_dump_team_info((Team*)arg);
1194 		return 0;
1195 	}
1196 
1197 	// walk through the thread list, trying to match name or id
1198 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1199 		Team* team = it.Next();) {
1200 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1201 			|| team->id == (team_id)arg) {
1202 			_dump_team_info(team);
1203 			found = true;
1204 			break;
1205 		}
1206 	}
1207 
1208 	if (!found)
1209 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1210 	return 0;
1211 }
1212 
1213 
1214 static int
1215 dump_teams(int argc, char** argv)
1216 {
1217 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1218 		B_PRINTF_POINTER_WIDTH, "parent");
1219 
1220 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1221 		Team* team = it.Next();) {
1222 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 
1229 //	#pragma mark - Private functions
1230 
1231 
1232 /*! Get the parent of a given process.
1233 
1234 	Used in the implementation of getppid (where a process can get its own
1235 	parent, only) as well as in user_process_info where the information is
1236 	available to anyone (allowing to display a tree of running processes)
1237 */
1238 static pid_t
1239 _getppid(pid_t id)
1240 {
1241 	if (id < 0) {
1242 		errno = EINVAL;
1243 		return -1;
1244 	}
1245 
1246 	if (id == 0) {
1247 		Team* team = thread_get_current_thread()->team;
1248 		TeamLocker teamLocker(team);
1249 		if (team->parent == NULL) {
1250 			errno = EINVAL;
1251 			return -1;
1252 		}
1253 		return team->parent->id;
1254 	}
1255 
1256 	Team* team = Team::GetAndLock(id);
1257 	if (team == NULL) {
1258 		errno = ESRCH;
1259 		return -1;
1260 	}
1261 
1262 	pid_t parentID;
1263 
1264 	if (team->parent == NULL) {
1265 		errno = EINVAL;
1266 		parentID = -1;
1267 	} else
1268 		parentID = team->parent->id;
1269 
1270 	team->UnlockAndReleaseReference();
1271 
1272 	return parentID;
1273 }
1274 
1275 
1276 /*!	Inserts team \a team into the child list of team \a parent.
1277 
1278 	The caller must hold the lock of both \a parent and \a team.
1279 
1280 	\param parent The parent team.
1281 	\param team The team to be inserted into \a parent's child list.
1282 */
1283 static void
1284 insert_team_into_parent(Team* parent, Team* team)
1285 {
1286 	ASSERT(parent != NULL);
1287 
1288 	team->siblings_next = parent->children;
1289 	parent->children = team;
1290 	team->parent = parent;
1291 }
1292 
1293 
1294 /*!	Removes team \a team from the child list of team \a parent.
1295 
1296 	The caller must hold the lock of both \a parent and \a team.
1297 
1298 	\param parent The parent team.
1299 	\param team The team to be removed from \a parent's child list.
1300 */
1301 static void
1302 remove_team_from_parent(Team* parent, Team* team)
1303 {
1304 	Team* child;
1305 	Team* last = NULL;
1306 
1307 	for (child = parent->children; child != NULL;
1308 			child = child->siblings_next) {
1309 		if (child == team) {
1310 			if (last == NULL)
1311 				parent->children = child->siblings_next;
1312 			else
1313 				last->siblings_next = child->siblings_next;
1314 
1315 			team->parent = NULL;
1316 			break;
1317 		}
1318 		last = child;
1319 	}
1320 }
1321 
1322 
1323 /*!	Returns whether the given team is a session leader.
1324 	The caller must hold the team's lock or its process group's lock.
1325 */
1326 static bool
1327 is_session_leader(Team* team)
1328 {
1329 	return team->session_id == team->id;
1330 }
1331 
1332 
1333 /*!	Returns whether the given team is a process group leader.
1334 	The caller must hold the team's lock or its process group's lock.
1335 */
1336 static bool
1337 is_process_group_leader(Team* team)
1338 {
1339 	return team->group_id == team->id;
1340 }
1341 
1342 
1343 /*!	Inserts the given team into the given process group.
1344 	The caller must hold the process group's lock, the team's lock, and the
1345 	team's parent's lock.
1346 */
1347 static void
1348 insert_team_into_group(ProcessGroup* group, Team* team)
1349 {
1350 	team->group = group;
1351 	team->group_id = group->id;
1352 	team->session_id = group->Session()->id;
1353 
1354 	team->group_next = group->teams;
1355 	group->teams = team;
1356 	group->AcquireReference();
1357 }
1358 
1359 
1360 /*!	Removes the given team from its process group.
1361 
1362 	The caller must hold the process group's lock, the team's lock, and the
1363 	team's parent's lock. Interrupts must be enabled.
1364 
1365 	\param team The team that'll be removed from its process group.
1366 */
1367 static void
1368 remove_team_from_group(Team* team)
1369 {
1370 	ProcessGroup* group = team->group;
1371 	Team* current;
1372 	Team* last = NULL;
1373 
1374 	// the team must be in a process group to let this function have any effect
1375 	if (group == NULL)
1376 		return;
1377 
1378 	for (current = group->teams; current != NULL;
1379 			current = current->group_next) {
1380 		if (current == team) {
1381 			if (last == NULL)
1382 				group->teams = current->group_next;
1383 			else
1384 				last->group_next = current->group_next;
1385 
1386 			break;
1387 		}
1388 		last = current;
1389 	}
1390 
1391 	team->group = NULL;
1392 	team->group_next = NULL;
1393 	team->group_id = -1;
1394 
1395 	group->ReleaseReference();
1396 }
1397 
1398 
1399 static status_t
1400 create_team_user_data(Team* team, void* exactAddress = NULL)
1401 {
1402 	void* address;
1403 	uint32 addressSpec;
1404 
1405 	if (exactAddress != NULL) {
1406 		address = exactAddress;
1407 		addressSpec = B_EXACT_ADDRESS;
1408 	} else {
1409 		address = (void*)KERNEL_USER_DATA_BASE;
1410 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1411 	}
1412 
1413 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1414 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1415 
1416 	virtual_address_restrictions virtualRestrictions = {};
1417 	if (result == B_OK || exactAddress != NULL) {
1418 		if (exactAddress != NULL)
1419 			virtualRestrictions.address = exactAddress;
1420 		else
1421 			virtualRestrictions.address = address;
1422 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1423 	} else {
1424 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1425 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1426 	}
1427 
1428 	physical_address_restrictions physicalRestrictions = {};
1429 	team->user_data_area = create_area_etc(team->id, "user area",
1430 		kTeamUserDataInitialSize, B_FULL_LOCK,
1431 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1432 		&virtualRestrictions, &physicalRestrictions, &address);
1433 	if (team->user_data_area < 0)
1434 		return team->user_data_area;
1435 
1436 	team->user_data = (addr_t)address;
1437 	team->used_user_data = 0;
1438 	team->user_data_size = kTeamUserDataInitialSize;
1439 	team->free_user_threads = NULL;
1440 
1441 	return B_OK;
1442 }
1443 
1444 
1445 static void
1446 delete_team_user_data(Team* team)
1447 {
1448 	if (team->user_data_area >= 0) {
1449 		vm_delete_area(team->id, team->user_data_area, true);
1450 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1451 			kTeamUserDataReservedSize);
1452 
1453 		team->user_data = 0;
1454 		team->used_user_data = 0;
1455 		team->user_data_size = 0;
1456 		team->user_data_area = -1;
1457 		while (free_user_thread* entry = team->free_user_threads) {
1458 			team->free_user_threads = entry->next;
1459 			free(entry);
1460 		}
1461 	}
1462 }
1463 
1464 
1465 static status_t
1466 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1467 	int32 argCount, int32 envCount, char**& _flatArgs)
1468 {
1469 	if (argCount < 0 || envCount < 0)
1470 		return B_BAD_VALUE;
1471 
1472 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1473 		return B_TOO_MANY_ARGS;
1474 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1475 		return B_BAD_VALUE;
1476 
1477 	if (!IS_USER_ADDRESS(userFlatArgs))
1478 		return B_BAD_ADDRESS;
1479 
1480 	// allocate kernel memory
1481 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1482 	if (flatArgs == NULL)
1483 		return B_NO_MEMORY;
1484 
1485 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1486 		free(flatArgs);
1487 		return B_BAD_ADDRESS;
1488 	}
1489 
1490 	// check and relocate the array
1491 	status_t error = B_OK;
1492 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1493 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1494 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1495 		if (i == argCount || i == argCount + envCount + 1) {
1496 			// check array null termination
1497 			if (flatArgs[i] != NULL) {
1498 				error = B_BAD_VALUE;
1499 				break;
1500 			}
1501 		} else {
1502 			// check string
1503 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1504 			size_t maxLen = stringEnd - arg;
1505 			if (arg < stringBase || arg >= stringEnd
1506 					|| strnlen(arg, maxLen) == maxLen) {
1507 				error = B_BAD_VALUE;
1508 				break;
1509 			}
1510 
1511 			flatArgs[i] = arg;
1512 		}
1513 	}
1514 
1515 	if (error == B_OK)
1516 		_flatArgs = flatArgs;
1517 	else
1518 		free(flatArgs);
1519 
1520 	return error;
1521 }
1522 
1523 
1524 static void
1525 free_team_arg(struct team_arg* teamArg)
1526 {
1527 	if (teamArg != NULL) {
1528 		free(teamArg->flat_args);
1529 		free(teamArg->path);
1530 		free(teamArg);
1531 	}
1532 }
1533 
1534 
1535 static status_t
1536 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1537 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1538 	port_id port, uint32 token)
1539 {
1540 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1541 	if (teamArg == NULL)
1542 		return B_NO_MEMORY;
1543 
1544 	teamArg->path = strdup(path);
1545 	if (teamArg->path == NULL) {
1546 		free(teamArg);
1547 		return B_NO_MEMORY;
1548 	}
1549 
1550 	// copy the args over
1551 	teamArg->flat_args = flatArgs;
1552 	teamArg->flat_args_size = flatArgsSize;
1553 	teamArg->arg_count = argCount;
1554 	teamArg->env_count = envCount;
1555 	teamArg->flags = 0;
1556 	teamArg->umask = umask;
1557 	teamArg->error_port = port;
1558 	teamArg->error_token = token;
1559 
1560 	// determine the flags from the environment
1561 	const char* const* env = flatArgs + argCount + 1;
1562 	for (int32 i = 0; i < envCount; i++) {
1563 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1564 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1565 			break;
1566 		}
1567 	}
1568 
1569 	*_teamArg = teamArg;
1570 	return B_OK;
1571 }
1572 
1573 
1574 static status_t
1575 team_create_thread_start_internal(void* args)
1576 {
1577 	status_t err;
1578 	Thread* thread;
1579 	Team* team;
1580 	struct team_arg* teamArgs = (struct team_arg*)args;
1581 	const char* path;
1582 	addr_t entry;
1583 	char** userArgs;
1584 	char** userEnv;
1585 	struct user_space_program_args* programArgs;
1586 	uint32 argCount, envCount;
1587 
1588 	thread = thread_get_current_thread();
1589 	team = thread->team;
1590 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1591 
1592 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1593 		thread->id));
1594 
1595 	// Main stack area layout is currently as follows (starting from 0):
1596 	//
1597 	// size								| usage
1598 	// ---------------------------------+--------------------------------
1599 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1600 	// TLS_SIZE							| TLS data
1601 	// sizeof(user_space_program_args)	| argument structure for the runtime
1602 	//									| loader
1603 	// flat arguments size				| flat process arguments and environment
1604 
1605 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1606 	// the heap
1607 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1608 
1609 	argCount = teamArgs->arg_count;
1610 	envCount = teamArgs->env_count;
1611 
1612 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1613 		+ thread->user_stack_size + TLS_SIZE);
1614 
1615 	userArgs = (char**)(programArgs + 1);
1616 	userEnv = userArgs + argCount + 1;
1617 	path = teamArgs->path;
1618 
1619 	if (user_strlcpy(programArgs->program_path, path,
1620 				sizeof(programArgs->program_path)) < B_OK
1621 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1622 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1623 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1624 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1625 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1626 				sizeof(port_id)) < B_OK
1627 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1628 				sizeof(uint32)) < B_OK
1629 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1630 		|| user_memcpy(&programArgs->disable_user_addons,
1631 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1632 		|| user_memcpy(userArgs, teamArgs->flat_args,
1633 				teamArgs->flat_args_size) < B_OK) {
1634 		// the team deletion process will clean this mess
1635 		free_team_arg(teamArgs);
1636 		return B_BAD_ADDRESS;
1637 	}
1638 
1639 	free_team_arg(teamArgs);
1640 		// the arguments are already on the user stack, we no longer need
1641 		// them in this form
1642 
1643 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1644 
1645 	// update state
1646 	team->Lock();
1647 	team->state = TEAM_STATE_NORMAL;
1648 	team->Unlock();
1649 
1650 	// Clone commpage area
1651 	area_id commPageArea = clone_commpage_area(team->id,
1652 		&team->commpage_address);
1653 	if (commPageArea  < B_OK) {
1654 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1655 			strerror(commPageArea)));
1656 		return commPageArea;
1657 	}
1658 
1659 	// Register commpage image
1660 	image_id commPageImage = get_commpage_image();
1661 	extended_image_info imageInfo;
1662 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1663 	if (err != B_OK) {
1664 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1665 			strerror(err)));
1666 		return err;
1667 	}
1668 	imageInfo.basic_info.text = team->commpage_address;
1669 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1670 	imageInfo.symbol_table = NULL;
1671 	imageInfo.symbol_hash = NULL;
1672 	imageInfo.string_table = NULL;
1673 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1674 	if (image < 0) {
1675 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1676 			strerror(image)));
1677 		return image;
1678 	}
1679 
1680 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1681 	// automatic variables with function scope will never be destroyed.
1682 	{
1683 		// find runtime_loader path
1684 		KPath runtimeLoaderPath;
1685 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1686 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1687 		if (err < B_OK) {
1688 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1689 				strerror(err)));
1690 			return err;
1691 		}
1692 		runtimeLoaderPath.UnlockBuffer();
1693 		err = runtimeLoaderPath.Append("runtime_loader");
1694 
1695 		if (err == B_OK) {
1696 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1697 				&entry);
1698 		}
1699 	}
1700 
1701 	if (err < B_OK) {
1702 		// Luckily, we don't have to clean up the mess we created - that's
1703 		// done for us by the normal team deletion process
1704 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1705 			"%s\n", strerror(err)));
1706 		return err;
1707 	}
1708 
1709 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1710 
1711 	// enter userspace -- returns only in case of error
1712 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1713 		programArgs, team->commpage_address);
1714 }
1715 
1716 
1717 static status_t
1718 team_create_thread_start(void* args)
1719 {
1720 	team_create_thread_start_internal(args);
1721 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1722 	thread_exit();
1723 		// does not return
1724 	return B_OK;
1725 }
1726 
1727 
1728 static thread_id
1729 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1730 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1731 	port_id errorPort, uint32 errorToken)
1732 {
1733 	char** flatArgs = _flatArgs;
1734 	thread_id thread;
1735 	status_t status;
1736 	struct team_arg* teamArgs;
1737 	struct team_loading_info loadingInfo;
1738 	ConditionVariableEntry loadingWaitEntry;
1739 	io_context* parentIOContext = NULL;
1740 	team_id teamID;
1741 	bool teamLimitReached = false;
1742 
1743 	if (flatArgs == NULL || argCount == 0)
1744 		return B_BAD_VALUE;
1745 
1746 	const char* path = flatArgs[0];
1747 
1748 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1749 		"\n", path, flatArgs, argCount));
1750 
1751 	// cut the path from the main thread name
1752 	const char* threadName = strrchr(path, '/');
1753 	if (threadName != NULL)
1754 		threadName++;
1755 	else
1756 		threadName = path;
1757 
1758 	// create the main thread object
1759 	Thread* mainThread;
1760 	status = Thread::Create(threadName, mainThread);
1761 	if (status != B_OK)
1762 		return status;
1763 	BReference<Thread> mainThreadReference(mainThread, true);
1764 
1765 	// create team object
1766 	Team* team = Team::Create(mainThread->id, path, false);
1767 	if (team == NULL)
1768 		return B_NO_MEMORY;
1769 	BReference<Team> teamReference(team, true);
1770 
1771 	BReference<Team> teamLoadingReference;
1772 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1773 		loadingInfo.condition.Init(team, "image load");
1774 		loadingInfo.condition.Add(&loadingWaitEntry);
1775 		loadingInfo.result = B_ERROR;
1776 		team->loading_info = &loadingInfo;
1777 		teamLoadingReference = teamReference;
1778 	}
1779 
1780 	// get the parent team
1781 	Team* parent = Team::Get(parentID);
1782 	if (parent == NULL)
1783 		return B_BAD_TEAM_ID;
1784 	BReference<Team> parentReference(parent, true);
1785 
1786 	parent->LockTeamAndProcessGroup();
1787 	team->Lock();
1788 
1789 	// inherit the parent's user/group
1790 	inherit_parent_user_and_group(team, parent);
1791 
1792 	// get a reference to the parent's I/O context -- we need it to create ours
1793 	parentIOContext = parent->io_context;
1794 	vfs_get_io_context(parentIOContext);
1795 
1796 	team->Unlock();
1797 	parent->UnlockTeamAndProcessGroup();
1798 
1799 	// check the executable's set-user/group-id permission
1800 	update_set_id_user_and_group(team, path);
1801 
1802 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1803 		envCount, (mode_t)-1, errorPort, errorToken);
1804 	if (status != B_OK)
1805 		goto err1;
1806 
1807 	_flatArgs = NULL;
1808 		// args are owned by the team_arg structure now
1809 
1810 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1811 
1812 	// create a new io_context for this team
1813 	team->io_context = vfs_new_io_context(parentIOContext, true);
1814 	if (!team->io_context) {
1815 		status = B_NO_MEMORY;
1816 		goto err2;
1817 	}
1818 
1819 	// We don't need the parent's I/O context any longer.
1820 	vfs_put_io_context(parentIOContext);
1821 	parentIOContext = NULL;
1822 
1823 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1824 	vfs_exec_io_context(team->io_context);
1825 
1826 	// create an address space for this team
1827 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1828 		&team->address_space);
1829 	if (status != B_OK)
1830 		goto err2;
1831 
1832 	team->address_space->SetRandomizingEnabled(
1833 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1834 
1835 	// create the user data area
1836 	status = create_team_user_data(team);
1837 	if (status != B_OK)
1838 		goto err4;
1839 
1840 	// insert the team into its parent and the teams hash
1841 	parent->LockTeamAndProcessGroup();
1842 	team->Lock();
1843 
1844 	{
1845 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1846 
1847 		sTeamHash.Insert(team);
1848 		teamLimitReached = sUsedTeams >= sMaxTeams;
1849 		if (!teamLimitReached)
1850 			sUsedTeams++;
1851 	}
1852 
1853 	insert_team_into_parent(parent, team);
1854 	insert_team_into_group(parent->group, team);
1855 
1856 	team->Unlock();
1857 	parent->UnlockTeamAndProcessGroup();
1858 
1859 	// notify team listeners
1860 	sNotificationService.Notify(TEAM_ADDED, team);
1861 
1862 	if (teamLimitReached) {
1863 		status = B_NO_MORE_TEAMS;
1864 		goto err6;
1865 	}
1866 
1867 	// In case we start the main thread, we shouldn't access the team object
1868 	// afterwards, so cache the team's ID.
1869 	teamID = team->id;
1870 
1871 	// Create a kernel thread, but under the context of the new team
1872 	// The new thread will take over ownership of teamArgs.
1873 	{
1874 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1875 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1876 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1877 			+ teamArgs->flat_args_size;
1878 		thread = thread_create_thread(threadAttributes, false);
1879 		if (thread < 0) {
1880 			status = thread;
1881 			goto err6;
1882 		}
1883 	}
1884 
1885 	// The team has been created successfully, so we keep the reference. Or
1886 	// more precisely: It's owned by the team's main thread, now.
1887 	teamReference.Detach();
1888 
1889 	// wait for the loader of the new team to finish its work
1890 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1891 		if (mainThread != NULL) {
1892 			// resume the team's main thread
1893 			thread_continue(mainThread);
1894 		}
1895 
1896 		// Now wait until loading is finished. We will be woken either by the
1897 		// thread, when it finished or aborted loading, or when the team is
1898 		// going to die (e.g. is killed). In either case the one notifying is
1899 		// responsible for unsetting `loading_info` in the team structure.
1900 		loadingWaitEntry.Wait();
1901 
1902 		// We must synchronize with the thread that woke us up, to ensure
1903 		// there are no remaining consumers of the team_loading_info.
1904 		team->Lock();
1905 		if (team->loading_info != NULL)
1906 			panic("team loading wait complete, but loading_info != NULL");
1907 		team->Unlock();
1908 		teamLoadingReference.Unset();
1909 
1910 		if (loadingInfo.result < B_OK)
1911 			return loadingInfo.result;
1912 	}
1913 
1914 	// notify the debugger
1915 	user_debug_team_created(teamID);
1916 
1917 	return thread;
1918 
1919 err6:
1920 	// Remove the team structure from the process group, the parent team, and
1921 	// the team hash table and delete the team structure.
1922 	parent->LockTeamAndProcessGroup();
1923 	team->Lock();
1924 
1925 	remove_team_from_group(team);
1926 	remove_team_from_parent(team->parent, team);
1927 
1928 	team->Unlock();
1929 	parent->UnlockTeamAndProcessGroup();
1930 
1931 	{
1932 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1933 		sTeamHash.Remove(team);
1934 		if (!teamLimitReached)
1935 			sUsedTeams--;
1936 	}
1937 
1938 	sNotificationService.Notify(TEAM_REMOVED, team);
1939 
1940 	delete_team_user_data(team);
1941 err4:
1942 	team->address_space->Put();
1943 err2:
1944 	free_team_arg(teamArgs);
1945 err1:
1946 	if (parentIOContext != NULL)
1947 		vfs_put_io_context(parentIOContext);
1948 
1949 	return status;
1950 }
1951 
1952 
1953 /*!	Almost shuts down the current team and loads a new image into it.
1954 	If successful, this function does not return and will takeover ownership of
1955 	the arguments provided.
1956 	This function may only be called in a userland team (caused by one of the
1957 	exec*() syscalls).
1958 */
1959 static status_t
1960 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1961 	int32 argCount, int32 envCount, mode_t umask)
1962 {
1963 	// NOTE: Since this function normally doesn't return, don't use automatic
1964 	// variables that need destruction in the function scope.
1965 	char** flatArgs = _flatArgs;
1966 	Team* team = thread_get_current_thread()->team;
1967 	struct team_arg* teamArgs;
1968 	const char* threadName;
1969 	thread_id nubThreadID = -1;
1970 
1971 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1972 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1973 		team->id));
1974 
1975 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1976 
1977 	// switching the kernel at run time is probably not a good idea :)
1978 	if (team == team_get_kernel_team())
1979 		return B_NOT_ALLOWED;
1980 
1981 	// we currently need to be single threaded here
1982 	// TODO: maybe we should just kill all other threads and
1983 	//	make the current thread the team's main thread?
1984 	Thread* currentThread = thread_get_current_thread();
1985 	if (currentThread != team->main_thread)
1986 		return B_NOT_ALLOWED;
1987 
1988 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1989 	// We iterate through the thread list to make sure that there's no other
1990 	// thread.
1991 	TeamLocker teamLocker(team);
1992 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1993 
1994 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1995 		nubThreadID = team->debug_info.nub_thread;
1996 
1997 	debugInfoLocker.Unlock();
1998 
1999 	for (Thread* thread = team->thread_list; thread != NULL;
2000 			thread = thread->team_next) {
2001 		if (thread != team->main_thread && thread->id != nubThreadID)
2002 			return B_NOT_ALLOWED;
2003 	}
2004 
2005 	team->DeleteUserTimers(true);
2006 	team->ResetSignalsOnExec();
2007 
2008 	teamLocker.Unlock();
2009 
2010 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
2011 		argCount, envCount, umask, -1, 0);
2012 	if (status != B_OK)
2013 		return status;
2014 
2015 	_flatArgs = NULL;
2016 		// args are owned by the team_arg structure now
2017 
2018 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
2019 
2020 	// TODO: remove team resources if there are any left
2021 	// thread_atkernel_exit() might not be called at all
2022 
2023 	thread_reset_for_exec();
2024 
2025 	user_debug_prepare_for_exec();
2026 
2027 	delete_team_user_data(team);
2028 	vm_delete_areas(team->address_space, false);
2029 	xsi_sem_undo(team);
2030 	delete_owned_ports(team);
2031 	sem_delete_owned_sems(team);
2032 	remove_images(team);
2033 	vfs_exec_io_context(team->io_context);
2034 	delete_user_mutex_context(team->user_mutex_context);
2035 	team->user_mutex_context = NULL;
2036 	delete_realtime_sem_context(team->realtime_sem_context);
2037 	team->realtime_sem_context = NULL;
2038 
2039 	// update ASLR
2040 	team->address_space->SetRandomizingEnabled(
2041 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2042 
2043 	status = create_team_user_data(team);
2044 	if (status != B_OK) {
2045 		// creating the user data failed -- we're toast
2046 		free_team_arg(teamArgs);
2047 		exit_thread(status);
2048 		return status;
2049 	}
2050 
2051 	user_debug_finish_after_exec();
2052 
2053 	// rename the team
2054 
2055 	team->Lock();
2056 	team->SetName(path);
2057 	team->Unlock();
2058 
2059 	// cut the path from the team name and rename the main thread, too
2060 	threadName = strrchr(path, '/');
2061 	if (threadName != NULL)
2062 		threadName++;
2063 	else
2064 		threadName = path;
2065 	rename_thread(thread_get_current_thread_id(), threadName);
2066 
2067 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2068 
2069 	// Update user/group according to the executable's set-user/group-id
2070 	// permission.
2071 	update_set_id_user_and_group(team, path);
2072 
2073 	user_debug_team_exec();
2074 
2075 	// notify team listeners
2076 	sNotificationService.Notify(TEAM_EXEC, team);
2077 
2078 	// get a user thread for the thread
2079 	user_thread* userThread = team_allocate_user_thread(team);
2080 		// cannot fail (the allocation for the team would have failed already)
2081 	ThreadLocker currentThreadLocker(currentThread);
2082 	currentThread->user_thread = userThread;
2083 	currentThreadLocker.Unlock();
2084 
2085 	// create the user stack for the thread
2086 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2087 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2088 	if (status == B_OK) {
2089 		// prepare the stack, load the runtime loader, and enter userspace
2090 		team_create_thread_start(teamArgs);
2091 			// does never return
2092 	} else
2093 		free_team_arg(teamArgs);
2094 
2095 	// Sorry, we have to kill ourselves, there is no way out anymore
2096 	// (without any areas left and all that).
2097 	exit_thread(status);
2098 
2099 	// We return a status here since the signal that is sent by the
2100 	// call above is not immediately handled.
2101 	return B_ERROR;
2102 }
2103 
2104 
2105 static thread_id
2106 fork_team(void)
2107 {
2108 	Thread* parentThread = thread_get_current_thread();
2109 	Team* parentTeam = parentThread->team;
2110 	Team* team;
2111 	arch_fork_arg* forkArgs;
2112 	struct area_info info;
2113 	thread_id threadID;
2114 	status_t status;
2115 	ssize_t areaCookie;
2116 	bool teamLimitReached = false;
2117 
2118 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2119 
2120 	if (parentTeam == team_get_kernel_team())
2121 		return B_NOT_ALLOWED;
2122 
2123 	// create a new team
2124 	// TODO: this is very similar to load_image_internal() - maybe we can do
2125 	// something about it :)
2126 
2127 	// create the main thread object
2128 	Thread* thread;
2129 	status = Thread::Create(parentThread->name, thread);
2130 	if (status != B_OK)
2131 		return status;
2132 	BReference<Thread> threadReference(thread, true);
2133 
2134 	// create the team object
2135 	team = Team::Create(thread->id, NULL, false);
2136 	if (team == NULL)
2137 		return B_NO_MEMORY;
2138 
2139 	parentTeam->LockTeamAndProcessGroup();
2140 	team->Lock();
2141 
2142 	team->SetName(parentTeam->Name());
2143 	team->SetArgs(parentTeam->Args());
2144 
2145 	team->commpage_address = parentTeam->commpage_address;
2146 
2147 	// Inherit the parent's user/group.
2148 	inherit_parent_user_and_group(team, parentTeam);
2149 
2150 	// inherit signal handlers
2151 	team->InheritSignalActions(parentTeam);
2152 
2153 	team->Unlock();
2154 	parentTeam->UnlockTeamAndProcessGroup();
2155 
2156 	// inherit some team debug flags
2157 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2158 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2159 
2160 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2161 	if (forkArgs == NULL) {
2162 		status = B_NO_MEMORY;
2163 		goto err1;
2164 	}
2165 
2166 	// create a new io_context for this team
2167 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2168 	if (!team->io_context) {
2169 		status = B_NO_MEMORY;
2170 		goto err2;
2171 	}
2172 
2173 	// duplicate the realtime sem context
2174 	if (parentTeam->realtime_sem_context) {
2175 		team->realtime_sem_context = clone_realtime_sem_context(
2176 			parentTeam->realtime_sem_context);
2177 		if (team->realtime_sem_context == NULL) {
2178 			status = B_NO_MEMORY;
2179 			goto err2;
2180 		}
2181 	}
2182 
2183 	// create an address space for this team
2184 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2185 		&team->address_space);
2186 	if (status < B_OK)
2187 		goto err3;
2188 
2189 	// copy all areas of the team
2190 	// TODO: should be able to handle stack areas differently (ie. don't have
2191 	// them copy-on-write)
2192 
2193 	areaCookie = 0;
2194 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2195 		if (info.area == parentTeam->user_data_area) {
2196 			// don't clone the user area; just create a new one
2197 			status = create_team_user_data(team, info.address);
2198 			if (status != B_OK)
2199 				break;
2200 
2201 			thread->user_thread = team_allocate_user_thread(team);
2202 		} else {
2203 			void* address;
2204 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2205 				&address, B_CLONE_ADDRESS, info.area);
2206 			if (area < B_OK) {
2207 				status = area;
2208 				break;
2209 			}
2210 
2211 			if (info.area == parentThread->user_stack_area)
2212 				thread->user_stack_area = area;
2213 		}
2214 	}
2215 
2216 	if (status < B_OK)
2217 		goto err4;
2218 
2219 	if (thread->user_thread == NULL) {
2220 #if KDEBUG
2221 		panic("user data area not found, parent area is %" B_PRId32,
2222 			parentTeam->user_data_area);
2223 #endif
2224 		status = B_ERROR;
2225 		goto err4;
2226 	}
2227 
2228 	thread->user_stack_base = parentThread->user_stack_base;
2229 	thread->user_stack_size = parentThread->user_stack_size;
2230 	thread->user_local_storage = parentThread->user_local_storage;
2231 	thread->sig_block_mask = parentThread->sig_block_mask;
2232 	thread->signal_stack_base = parentThread->signal_stack_base;
2233 	thread->signal_stack_size = parentThread->signal_stack_size;
2234 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2235 
2236 	arch_store_fork_frame(forkArgs);
2237 
2238 	// copy image list
2239 	if (copy_images(parentTeam->id, team) != B_OK)
2240 		goto err5;
2241 
2242 	// insert the team into its parent and the teams hash
2243 	parentTeam->LockTeamAndProcessGroup();
2244 	team->Lock();
2245 
2246 	{
2247 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2248 
2249 		sTeamHash.Insert(team);
2250 		teamLimitReached = sUsedTeams >= sMaxTeams;
2251 		if (!teamLimitReached)
2252 			sUsedTeams++;
2253 	}
2254 
2255 	insert_team_into_parent(parentTeam, team);
2256 	insert_team_into_group(parentTeam->group, team);
2257 
2258 	team->Unlock();
2259 	parentTeam->UnlockTeamAndProcessGroup();
2260 
2261 	// notify team listeners
2262 	sNotificationService.Notify(TEAM_ADDED, team);
2263 
2264 	if (teamLimitReached) {
2265 		status = B_NO_MORE_TEAMS;
2266 		goto err6;
2267 	}
2268 
2269 	// create the main thread
2270 	{
2271 		ThreadCreationAttributes threadCreationAttributes(NULL,
2272 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2273 		threadCreationAttributes.forkArgs = forkArgs;
2274 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2275 		threadID = thread_create_thread(threadCreationAttributes, false);
2276 		if (threadID < 0) {
2277 			status = threadID;
2278 			goto err6;
2279 		}
2280 	}
2281 
2282 	// notify the debugger
2283 	user_debug_team_created(team->id);
2284 
2285 	T(TeamForked(threadID));
2286 
2287 	resume_thread(threadID);
2288 	return threadID;
2289 
2290 err6:
2291 	// Remove the team structure from the process group, the parent team, and
2292 	// the team hash table and delete the team structure.
2293 	parentTeam->LockTeamAndProcessGroup();
2294 	team->Lock();
2295 
2296 	remove_team_from_group(team);
2297 	remove_team_from_parent(team->parent, team);
2298 
2299 	team->Unlock();
2300 	parentTeam->UnlockTeamAndProcessGroup();
2301 
2302 	{
2303 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2304 		sTeamHash.Remove(team);
2305 		if (!teamLimitReached)
2306 			sUsedTeams--;
2307 	}
2308 
2309 	sNotificationService.Notify(TEAM_REMOVED, team);
2310 err5:
2311 	remove_images(team);
2312 err4:
2313 	team->address_space->RemoveAndPut();
2314 err3:
2315 	delete_realtime_sem_context(team->realtime_sem_context);
2316 err2:
2317 	free(forkArgs);
2318 err1:
2319 	team->ReleaseReference();
2320 
2321 	return status;
2322 }
2323 
2324 
2325 /*!	Returns if the specified team \a parent has any children belonging to the
2326 	process group with the specified ID \a groupID.
2327 	The caller must hold \a parent's lock.
2328 */
2329 static bool
2330 has_children_in_group(Team* parent, pid_t groupID)
2331 {
2332 	for (Team* child = parent->children; child != NULL;
2333 			child = child->siblings_next) {
2334 		TeamLocker childLocker(child);
2335 		if (child->group_id == groupID)
2336 			return true;
2337 	}
2338 
2339 	return false;
2340 }
2341 
2342 
2343 /*!	Returns the first job control entry from \a children, which matches \a id.
2344 	\a id can be:
2345 	- \code > 0 \endcode: Matching an entry with that team ID.
2346 	- \code == -1 \endcode: Matching any entry.
2347 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2348 	\c 0 is an invalid value for \a id.
2349 
2350 	The caller must hold the lock of the team that \a children belongs to.
2351 
2352 	\param children The job control entry list to check.
2353 	\param id The match criterion.
2354 	\return The first matching entry or \c NULL, if none matches.
2355 */
2356 static job_control_entry*
2357 get_job_control_entry(team_job_control_children& children, pid_t id)
2358 {
2359 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2360 		 job_control_entry* entry = it.Next();) {
2361 
2362 		if (id > 0) {
2363 			if (entry->thread == id)
2364 				return entry;
2365 		} else if (id == -1) {
2366 			return entry;
2367 		} else {
2368 			pid_t processGroup
2369 				= (entry->team ? entry->team->group_id : entry->group_id);
2370 			if (processGroup == -id)
2371 				return entry;
2372 		}
2373 	}
2374 
2375 	return NULL;
2376 }
2377 
2378 
2379 /*!	Returns the first job control entry from one of team's dead, continued, or
2380 	stopped children which matches \a id.
2381 	\a id can be:
2382 	- \code > 0 \endcode: Matching an entry with that team ID.
2383 	- \code == -1 \endcode: Matching any entry.
2384 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2385 	\c 0 is an invalid value for \a id.
2386 
2387 	The caller must hold \a team's lock.
2388 
2389 	\param team The team whose dead, stopped, and continued child lists shall be
2390 		checked.
2391 	\param id The match criterion.
2392 	\param flags Specifies which children shall be considered. Dead children
2393 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2394 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2395 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2396 		\c WCONTINUED.
2397 	\return The first matching entry or \c NULL, if none matches.
2398 */
2399 static job_control_entry*
2400 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2401 {
2402 	job_control_entry* entry = NULL;
2403 
2404 	if ((flags & WEXITED) != 0)
2405 		entry = get_job_control_entry(team->dead_children, id);
2406 
2407 	if (entry == NULL && (flags & WCONTINUED) != 0)
2408 		entry = get_job_control_entry(team->continued_children, id);
2409 
2410 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2411 		entry = get_job_control_entry(team->stopped_children, id);
2412 
2413 	return entry;
2414 }
2415 
2416 
2417 job_control_entry::job_control_entry()
2418 	:
2419 	has_group_ref(false)
2420 {
2421 }
2422 
2423 
2424 job_control_entry::~job_control_entry()
2425 {
2426 	if (has_group_ref) {
2427 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2428 
2429 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2430 		if (group == NULL) {
2431 			panic("job_control_entry::~job_control_entry(): unknown group "
2432 				"ID: %" B_PRId32, group_id);
2433 			return;
2434 		}
2435 
2436 		groupHashLocker.Unlock();
2437 
2438 		group->ReleaseReference();
2439 	}
2440 }
2441 
2442 
2443 /*!	Invoked when the owning team is dying, initializing the entry according to
2444 	the dead state.
2445 
2446 	The caller must hold the owning team's lock and the scheduler lock.
2447 */
2448 void
2449 job_control_entry::InitDeadState()
2450 {
2451 	if (team != NULL) {
2452 		ASSERT(team->exit.initialized);
2453 
2454 		group_id = team->group_id;
2455 		team->group->AcquireReference();
2456 		has_group_ref = true;
2457 
2458 		thread = team->id;
2459 		status = team->exit.status;
2460 		reason = team->exit.reason;
2461 		signal = team->exit.signal;
2462 		signaling_user = team->exit.signaling_user;
2463 		user_time = team->dead_threads_user_time
2464 			+ team->dead_children.user_time;
2465 		kernel_time = team->dead_threads_kernel_time
2466 			+ team->dead_children.kernel_time;
2467 
2468 		team = NULL;
2469 	}
2470 }
2471 
2472 
2473 job_control_entry&
2474 job_control_entry::operator=(const job_control_entry& other)
2475 {
2476 	state = other.state;
2477 	thread = other.thread;
2478 	signal = other.signal;
2479 	has_group_ref = false;
2480 	signaling_user = other.signaling_user;
2481 	team = other.team;
2482 	group_id = other.group_id;
2483 	status = other.status;
2484 	reason = other.reason;
2485 	user_time = other.user_time;
2486 	kernel_time = other.kernel_time;
2487 
2488 	return *this;
2489 }
2490 
2491 
2492 /*! This is the kernel backend for waitid().
2493 */
2494 static thread_id
2495 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2496 	team_usage_info& _usage_info)
2497 {
2498 	Thread* thread = thread_get_current_thread();
2499 	Team* team = thread->team;
2500 	struct job_control_entry foundEntry;
2501 	struct job_control_entry* freeDeathEntry = NULL;
2502 	status_t status = B_OK;
2503 
2504 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2505 		child, flags));
2506 
2507 	T(WaitForChild(child, flags));
2508 
2509 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2510 		T(WaitForChildDone(B_BAD_VALUE));
2511 		return B_BAD_VALUE;
2512 	}
2513 
2514 	pid_t originalChild = child;
2515 
2516 	bool ignoreFoundEntries = false;
2517 	bool ignoreFoundEntriesChecked = false;
2518 
2519 	while (true) {
2520 		// lock the team
2521 		TeamLocker teamLocker(team);
2522 
2523 		// A 0 child argument means to wait for all children in the process
2524 		// group of the calling team.
2525 		child = originalChild == 0 ? -team->group_id : originalChild;
2526 
2527 		// check whether any condition holds
2528 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2529 
2530 		// If we don't have an entry yet, check whether there are any children
2531 		// complying to the process group specification at all.
2532 		if (entry == NULL) {
2533 			// No success yet -- check whether there are any children complying
2534 			// to the process group specification at all.
2535 			bool childrenExist = false;
2536 			if (child == -1) {
2537 				childrenExist = team->children != NULL;
2538 			} else if (child < -1) {
2539 				childrenExist = has_children_in_group(team, -child);
2540 			} else if (child != team->id) {
2541 				if (Team* childTeam = Team::Get(child)) {
2542 					BReference<Team> childTeamReference(childTeam, true);
2543 					TeamLocker childTeamLocker(childTeam);
2544 					childrenExist = childTeam->parent == team;
2545 				}
2546 			}
2547 
2548 			if (!childrenExist) {
2549 				// there is no child we could wait for
2550 				status = ECHILD;
2551 			} else {
2552 				// the children we're waiting for are still running
2553 				status = B_WOULD_BLOCK;
2554 			}
2555 		} else {
2556 			// got something
2557 			foundEntry = *entry;
2558 
2559 			// unless WNOWAIT has been specified, "consume" the wait state
2560 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2561 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2562 					// The child is dead. Reap its death entry.
2563 					freeDeathEntry = entry;
2564 					team->dead_children.entries.Remove(entry);
2565 					team->dead_children.count--;
2566 				} else {
2567 					// The child is well. Reset its job control state.
2568 					team_set_job_control_state(entry->team,
2569 						JOB_CONTROL_STATE_NONE, NULL);
2570 				}
2571 			}
2572 		}
2573 
2574 		// If we haven't got anything yet, prepare for waiting for the
2575 		// condition variable.
2576 		ConditionVariableEntry deadWaitEntry;
2577 
2578 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2579 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2580 
2581 		teamLocker.Unlock();
2582 
2583 		// we got our entry and can return to our caller
2584 		if (status == B_OK) {
2585 			if (ignoreFoundEntries) {
2586 				// ... unless we shall ignore found entries
2587 				delete freeDeathEntry;
2588 				freeDeathEntry = NULL;
2589 				continue;
2590 			}
2591 
2592 			break;
2593 		}
2594 
2595 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2596 			T(WaitForChildDone(status));
2597 			return status;
2598 		}
2599 
2600 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2601 		if (status == B_INTERRUPTED) {
2602 			T(WaitForChildDone(status));
2603 			return status;
2604 		}
2605 
2606 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2607 		// all our children are dead and fail with ECHILD. We check the
2608 		// condition at this point.
2609 		if (!ignoreFoundEntriesChecked) {
2610 			teamLocker.Lock();
2611 
2612 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2613 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2614 				|| handler.sa_handler == SIG_IGN) {
2615 				ignoreFoundEntries = true;
2616 			}
2617 
2618 			teamLocker.Unlock();
2619 
2620 			ignoreFoundEntriesChecked = true;
2621 		}
2622 	}
2623 
2624 	delete freeDeathEntry;
2625 
2626 	// When we got here, we have a valid death entry, and already got
2627 	// unregistered from the team or group. Fill in the returned info.
2628 	memset(&_info, 0, sizeof(_info));
2629 	_info.si_signo = SIGCHLD;
2630 	_info.si_pid = foundEntry.thread;
2631 	_info.si_uid = foundEntry.signaling_user;
2632 	// TODO: Fill in si_errno?
2633 
2634 	switch (foundEntry.state) {
2635 		case JOB_CONTROL_STATE_DEAD:
2636 			_info.si_code = foundEntry.reason;
2637 			_info.si_status = foundEntry.reason == CLD_EXITED
2638 				? foundEntry.status : foundEntry.signal;
2639 			_usage_info.user_time = foundEntry.user_time;
2640 			_usage_info.kernel_time = foundEntry.kernel_time;
2641 			break;
2642 		case JOB_CONTROL_STATE_STOPPED:
2643 			_info.si_code = CLD_STOPPED;
2644 			_info.si_status = foundEntry.signal;
2645 			break;
2646 		case JOB_CONTROL_STATE_CONTINUED:
2647 			_info.si_code = CLD_CONTINUED;
2648 			_info.si_status = 0;
2649 			break;
2650 		case JOB_CONTROL_STATE_NONE:
2651 			// can't happen
2652 			break;
2653 	}
2654 
2655 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2656 	// status is available.
2657 	TeamLocker teamLocker(team);
2658 	InterruptsSpinLocker signalLocker(team->signal_lock);
2659 	SpinLocker threadCreationLocker(gThreadCreationLock);
2660 
2661 	if (is_team_signal_blocked(team, SIGCHLD)) {
2662 		if (get_job_control_entry(team, child, flags) == NULL)
2663 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2664 	}
2665 
2666 	threadCreationLocker.Unlock();
2667 	signalLocker.Unlock();
2668 	teamLocker.Unlock();
2669 
2670 	// When the team is dead, the main thread continues to live in the kernel
2671 	// team for a very short time. To avoid surprises for the caller we rather
2672 	// wait until the thread is really gone.
2673 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2674 		wait_for_thread(foundEntry.thread, NULL);
2675 
2676 	T(WaitForChildDone(foundEntry));
2677 
2678 	return foundEntry.thread;
2679 }
2680 
2681 
2682 /*! Fills the team_info structure with information from the specified team.
2683 	Interrupts must be enabled. The team must not be locked.
2684 */
2685 static status_t
2686 fill_team_info(Team* team, team_info* info, size_t size)
2687 {
2688 	if (size > sizeof(team_info))
2689 		return B_BAD_VALUE;
2690 
2691 	// TODO: Set more informations for team_info
2692 	memset(info, 0, size);
2693 
2694 	info->team = team->id;
2695 		// immutable
2696 	info->image_count = count_images(team);
2697 		// protected by sImageMutex
2698 
2699 	TeamLocker teamLocker(team);
2700 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2701 
2702 	info->thread_count = team->num_threads;
2703 	//info->area_count =
2704 	info->debugger_nub_thread = team->debug_info.nub_thread;
2705 	info->debugger_nub_port = team->debug_info.nub_port;
2706 	info->uid = team->effective_uid;
2707 	info->gid = team->effective_gid;
2708 
2709 	strlcpy(info->args, team->Args(), sizeof(info->args));
2710 	info->argc = 1;
2711 
2712 	if (size > offsetof(team_info, real_uid)) {
2713 		info->real_uid = team->real_uid;
2714 		info->real_gid = team->real_gid;
2715 		info->group_id = team->group_id;
2716 		info->session_id = team->session_id;
2717 
2718 		if (team->parent != NULL)
2719 			info->parent = team->parent->id;
2720 		else
2721 			info->parent = -1;
2722 
2723 		strlcpy(info->name, team->Name(), sizeof(info->name));
2724 		info->start_time = team->start_time;
2725 	}
2726 
2727 	return B_OK;
2728 }
2729 
2730 
2731 /*!	Returns whether the process group contains stopped processes.
2732 	The caller must hold the process group's lock.
2733 */
2734 static bool
2735 process_group_has_stopped_processes(ProcessGroup* group)
2736 {
2737 	Team* team = group->teams;
2738 	while (team != NULL) {
2739 		// the parent team's lock guards the job control entry -- acquire it
2740 		team->LockTeamAndParent(false);
2741 
2742 		if (team->job_control_entry != NULL
2743 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2744 			team->UnlockTeamAndParent();
2745 			return true;
2746 		}
2747 
2748 		team->UnlockTeamAndParent();
2749 
2750 		team = team->group_next;
2751 	}
2752 
2753 	return false;
2754 }
2755 
2756 
2757 /*!	Iterates through all process groups queued in team_remove_team() and signals
2758 	those that are orphaned and have stopped processes.
2759 	The caller must not hold any team or process group locks.
2760 */
2761 static void
2762 orphaned_process_group_check()
2763 {
2764 	// process as long as there are groups in the list
2765 	while (true) {
2766 		// remove the head from the list
2767 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2768 
2769 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2770 		if (group == NULL)
2771 			return;
2772 
2773 		group->UnsetOrphanedCheck();
2774 		BReference<ProcessGroup> groupReference(group);
2775 
2776 		orphanedCheckLocker.Unlock();
2777 
2778 		AutoLocker<ProcessGroup> groupLocker(group);
2779 
2780 		// If the group is orphaned and contains stopped processes, we're
2781 		// supposed to send SIGHUP + SIGCONT.
2782 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2783 			Thread* currentThread = thread_get_current_thread();
2784 
2785 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2786 			send_signal_to_process_group_locked(group, signal, 0);
2787 
2788 			signal.SetNumber(SIGCONT);
2789 			send_signal_to_process_group_locked(group, signal, 0);
2790 		}
2791 	}
2792 }
2793 
2794 
2795 static status_t
2796 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2797 	uint32 flags)
2798 {
2799 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2800 		return B_BAD_VALUE;
2801 
2802 	// get the team
2803 	Team* team = Team::GetAndLock(id);
2804 	if (team == NULL)
2805 		return B_BAD_TEAM_ID;
2806 	BReference<Team> teamReference(team, true);
2807 	TeamLocker teamLocker(team, true);
2808 
2809 	if ((flags & B_CHECK_PERMISSION) != 0) {
2810 		uid_t uid = geteuid();
2811 		if (uid != 0 && uid != team->effective_uid)
2812 			return B_NOT_ALLOWED;
2813 	}
2814 
2815 	bigtime_t kernelTime = 0;
2816 	bigtime_t userTime = 0;
2817 
2818 	switch (who) {
2819 		case B_TEAM_USAGE_SELF:
2820 		{
2821 			Thread* thread = team->thread_list;
2822 
2823 			for (; thread != NULL; thread = thread->team_next) {
2824 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2825 				kernelTime += thread->kernel_time;
2826 				userTime += thread->user_time;
2827 			}
2828 
2829 			kernelTime += team->dead_threads_kernel_time;
2830 			userTime += team->dead_threads_user_time;
2831 			break;
2832 		}
2833 
2834 		case B_TEAM_USAGE_CHILDREN:
2835 		{
2836 			Team* child = team->children;
2837 			for (; child != NULL; child = child->siblings_next) {
2838 				TeamLocker childLocker(child);
2839 
2840 				Thread* thread = team->thread_list;
2841 
2842 				for (; thread != NULL; thread = thread->team_next) {
2843 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2844 					kernelTime += thread->kernel_time;
2845 					userTime += thread->user_time;
2846 				}
2847 
2848 				kernelTime += child->dead_threads_kernel_time;
2849 				userTime += child->dead_threads_user_time;
2850 			}
2851 
2852 			kernelTime += team->dead_children.kernel_time;
2853 			userTime += team->dead_children.user_time;
2854 			break;
2855 		}
2856 	}
2857 
2858 	info->kernel_time = kernelTime;
2859 	info->user_time = userTime;
2860 
2861 	return B_OK;
2862 }
2863 
2864 
2865 //	#pragma mark - Private kernel API
2866 
2867 
2868 status_t
2869 team_init(kernel_args* args)
2870 {
2871 	// create the team hash table
2872 	new(&sTeamHash) TeamTable;
2873 	if (sTeamHash.Init(64) != B_OK)
2874 		panic("Failed to init team hash table!");
2875 
2876 	new(&sGroupHash) ProcessGroupHashTable;
2877 	if (sGroupHash.Init() != B_OK)
2878 		panic("Failed to init process group hash table!");
2879 
2880 	// create initial session and process groups
2881 
2882 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2883 	if (session == NULL)
2884 		panic("Could not create initial session.\n");
2885 	BReference<ProcessSession> sessionReference(session, true);
2886 
2887 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2888 	if (group == NULL)
2889 		panic("Could not create initial process group.\n");
2890 	BReference<ProcessGroup> groupReference(group, true);
2891 
2892 	group->Publish(session);
2893 
2894 	// create the kernel team
2895 	sKernelTeam = Team::Create(1, "kernel_team", true);
2896 	if (sKernelTeam == NULL)
2897 		panic("could not create kernel team!\n");
2898 
2899 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2900 	sKernelTeam->SetArgs(sKernelTeam->Name());
2901 	sKernelTeam->state = TEAM_STATE_NORMAL;
2902 
2903 	sKernelTeam->saved_set_uid = 0;
2904 	sKernelTeam->real_uid = 0;
2905 	sKernelTeam->effective_uid = 0;
2906 	sKernelTeam->saved_set_gid = 0;
2907 	sKernelTeam->real_gid = 0;
2908 	sKernelTeam->effective_gid = 0;
2909 	sKernelTeam->supplementary_groups = NULL;
2910 
2911 	insert_team_into_group(group, sKernelTeam);
2912 
2913 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2914 	if (sKernelTeam->io_context == NULL)
2915 		panic("could not create io_context for kernel team!\n");
2916 
2917 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2918 		dprintf("Failed to resize FD table for kernel team!\n");
2919 
2920 	// stick it in the team hash
2921 	sTeamHash.Insert(sKernelTeam);
2922 
2923 	// check safe mode settings
2924 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2925 		false);
2926 
2927 	add_debugger_command_etc("team", &dump_team_info,
2928 		"Dump info about a particular team",
2929 		"[ <id> | <address> | <name> ]\n"
2930 		"Prints information about the specified team. If no argument is given\n"
2931 		"the current team is selected.\n"
2932 		"  <id>       - The ID of the team.\n"
2933 		"  <address>  - The address of the team structure.\n"
2934 		"  <name>     - The team's name.\n", 0);
2935 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2936 		"\n"
2937 		"Prints a list of all existing teams.\n", 0);
2938 
2939 	new(&sNotificationService) TeamNotificationService();
2940 
2941 	sNotificationService.Register();
2942 
2943 	return B_OK;
2944 }
2945 
2946 
2947 int32
2948 team_max_teams(void)
2949 {
2950 	return sMaxTeams;
2951 }
2952 
2953 
2954 int32
2955 team_used_teams(void)
2956 {
2957 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2958 	return sUsedTeams;
2959 }
2960 
2961 
2962 /*! Returns a death entry of a child team specified by ID (if any).
2963 	The caller must hold the team's lock.
2964 
2965 	\param team The team whose dead children list to check.
2966 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2967 	\param _deleteEntry Return variable, indicating whether the caller needs to
2968 		delete the returned entry.
2969 	\return The death entry of the matching team, or \c NULL, if no death entry
2970 		for the team was found.
2971 */
2972 job_control_entry*
2973 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2974 {
2975 	if (child <= 0)
2976 		return NULL;
2977 
2978 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2979 		child);
2980 	if (entry) {
2981 		// remove the entry only, if the caller is the parent of the found team
2982 		if (team_get_current_team_id() == entry->thread) {
2983 			team->dead_children.entries.Remove(entry);
2984 			team->dead_children.count--;
2985 			*_deleteEntry = true;
2986 		} else {
2987 			*_deleteEntry = false;
2988 		}
2989 	}
2990 
2991 	return entry;
2992 }
2993 
2994 
2995 /*! Quick check to see if we have a valid team ID. */
2996 bool
2997 team_is_valid(team_id id)
2998 {
2999 	if (id <= 0)
3000 		return false;
3001 
3002 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3003 	return team_get_team_struct_locked(id) != NULL;
3004 }
3005 
3006 
3007 Team*
3008 team_get_team_struct_locked(team_id id)
3009 {
3010 	return sTeamHash.Lookup(id);
3011 }
3012 
3013 
3014 void
3015 team_set_controlling_tty(void* tty)
3016 {
3017 	// lock the team, so its session won't change while we're playing with it
3018 	Team* team = thread_get_current_thread()->team;
3019 	TeamLocker teamLocker(team);
3020 
3021 	// get and lock the session
3022 	ProcessSession* session = team->group->Session();
3023 	AutoLocker<ProcessSession> sessionLocker(session);
3024 
3025 	// set the session's fields
3026 	session->controlling_tty = tty;
3027 	session->foreground_group = -1;
3028 }
3029 
3030 
3031 void*
3032 team_get_controlling_tty()
3033 {
3034 	// lock the team, so its session won't change while we're playing with it
3035 	Team* team = thread_get_current_thread()->team;
3036 	TeamLocker teamLocker(team);
3037 
3038 	// get and lock the session
3039 	ProcessSession* session = team->group->Session();
3040 	AutoLocker<ProcessSession> sessionLocker(session);
3041 
3042 	// get the session's field
3043 	return session->controlling_tty;
3044 }
3045 
3046 
3047 status_t
3048 team_set_foreground_process_group(void* tty, pid_t processGroupID)
3049 {
3050 	// lock the team, so its session won't change while we're playing with it
3051 	Thread* thread = thread_get_current_thread();
3052 	Team* team = thread->team;
3053 	TeamLocker teamLocker(team);
3054 
3055 	// get and lock the session
3056 	ProcessSession* session = team->group->Session();
3057 	AutoLocker<ProcessSession> sessionLocker(session);
3058 
3059 	// check given TTY -- must be the controlling tty of the calling process
3060 	if (session->controlling_tty != tty)
3061 		return ENOTTY;
3062 
3063 	// check given process group -- must belong to our session
3064 	{
3065 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3066 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3067 		if (group == NULL || group->Session() != session)
3068 			return B_BAD_VALUE;
3069 	}
3070 
3071 	// If we are a background group, we can do that unharmed only when we
3072 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3073 	if (session->foreground_group != -1
3074 		&& session->foreground_group != team->group_id
3075 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3076 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3077 		InterruptsSpinLocker signalLocker(team->signal_lock);
3078 
3079 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3080 			pid_t groupID = team->group_id;
3081 
3082 			signalLocker.Unlock();
3083 			sessionLocker.Unlock();
3084 			teamLocker.Unlock();
3085 
3086 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3087 			send_signal_to_process_group(groupID, signal, 0);
3088 			return B_INTERRUPTED;
3089 		}
3090 	}
3091 
3092 	session->foreground_group = processGroupID;
3093 
3094 	return B_OK;
3095 }
3096 
3097 
3098 uid_t
3099 team_geteuid(team_id id)
3100 {
3101 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3102 	Team* team = team_get_team_struct_locked(id);
3103 	if (team == NULL)
3104 		return (uid_t)-1;
3105 	return team->effective_uid;
3106 }
3107 
3108 
3109 /*!	Removes the specified team from the global team hash, from its process
3110 	group, and from its parent.
3111 	It also moves all of its children to the kernel team.
3112 
3113 	The caller must hold the following locks:
3114 	- \a team's process group's lock,
3115 	- the kernel team's lock,
3116 	- \a team's parent team's lock (might be the kernel team), and
3117 	- \a team's lock.
3118 */
3119 void
3120 team_remove_team(Team* team, pid_t& _signalGroup)
3121 {
3122 	Team* parent = team->parent;
3123 
3124 	// remember how long this team lasted
3125 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3126 		+ team->dead_children.kernel_time;
3127 	parent->dead_children.user_time += team->dead_threads_user_time
3128 		+ team->dead_children.user_time;
3129 
3130 	// remove the team from the hash table
3131 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3132 	sTeamHash.Remove(team);
3133 	sUsedTeams--;
3134 	teamsLocker.Unlock();
3135 
3136 	// The team can no longer be accessed by ID. Navigation to it is still
3137 	// possible from its process group and its parent and children, but that
3138 	// will be rectified shortly.
3139 	team->state = TEAM_STATE_DEATH;
3140 
3141 	// If we're a controlling process (i.e. a session leader with controlling
3142 	// terminal), there's a bit of signalling we have to do. We can't do any of
3143 	// the signaling here due to the bunch of locks we're holding, but we need
3144 	// to determine, whom to signal.
3145 	_signalGroup = -1;
3146 	bool isSessionLeader = false;
3147 	if (team->session_id == team->id
3148 		&& team->group->Session()->controlling_tty != NULL) {
3149 		isSessionLeader = true;
3150 
3151 		ProcessSession* session = team->group->Session();
3152 
3153 		AutoLocker<ProcessSession> sessionLocker(session);
3154 
3155 		session->controlling_tty = NULL;
3156 		_signalGroup = session->foreground_group;
3157 	}
3158 
3159 	// remove us from our process group
3160 	remove_team_from_group(team);
3161 
3162 	// move the team's children to the kernel team
3163 	while (Team* child = team->children) {
3164 		// remove the child from the current team and add it to the kernel team
3165 		TeamLocker childLocker(child);
3166 
3167 		remove_team_from_parent(team, child);
3168 		insert_team_into_parent(sKernelTeam, child);
3169 
3170 		// move job control entries too
3171 		sKernelTeam->stopped_children.entries.MoveFrom(
3172 			&team->stopped_children.entries);
3173 		sKernelTeam->continued_children.entries.MoveFrom(
3174 			&team->continued_children.entries);
3175 
3176 		// If the team was a session leader with controlling terminal,
3177 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3178 		// groups with stopped processes. Due to locking complications we can't
3179 		// do that here, so we only check whether we were a reason for the
3180 		// child's process group not being an orphan and, if so, schedule a
3181 		// later check (cf. orphaned_process_group_check()).
3182 		if (isSessionLeader) {
3183 			ProcessGroup* childGroup = child->group;
3184 			if (childGroup->Session()->id == team->session_id
3185 				&& childGroup->id != team->group_id) {
3186 				childGroup->ScheduleOrphanedCheck();
3187 			}
3188 		}
3189 
3190 		// Note, we don't move the dead children entries. Those will be deleted
3191 		// when the team structure is deleted.
3192 	}
3193 
3194 	// remove us from our parent
3195 	remove_team_from_parent(parent, team);
3196 }
3197 
3198 
3199 /*!	Kills all threads but the main thread of the team and shuts down user
3200 	debugging for it.
3201 	To be called on exit of the team's main thread. No locks must be held.
3202 
3203 	\param team The team in question.
3204 	\return The port of the debugger for the team, -1 if none. To be passed to
3205 		team_delete_team().
3206 */
3207 port_id
3208 team_shutdown_team(Team* team)
3209 {
3210 	ASSERT(thread_get_current_thread() == team->main_thread);
3211 
3212 	TeamLocker teamLocker(team);
3213 
3214 	// Make sure debugging changes won't happen anymore.
3215 	port_id debuggerPort = -1;
3216 	while (true) {
3217 		// If a debugger change is in progress for the team, we'll have to
3218 		// wait until it is done.
3219 		ConditionVariableEntry waitForDebuggerEntry;
3220 		bool waitForDebugger = false;
3221 
3222 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3223 
3224 		if (team->debug_info.debugger_changed_condition != NULL) {
3225 			team->debug_info.debugger_changed_condition->Add(
3226 				&waitForDebuggerEntry);
3227 			waitForDebugger = true;
3228 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3229 			// The team is being debugged. That will stop with the termination
3230 			// of the nub thread. Since we set the team state to death, no one
3231 			// can install a debugger anymore. We fetch the debugger's port to
3232 			// send it a message at the bitter end.
3233 			debuggerPort = team->debug_info.debugger_port;
3234 		}
3235 
3236 		debugInfoLocker.Unlock();
3237 
3238 		if (!waitForDebugger)
3239 			break;
3240 
3241 		// wait for the debugger change to be finished
3242 		teamLocker.Unlock();
3243 
3244 		waitForDebuggerEntry.Wait();
3245 
3246 		teamLocker.Lock();
3247 	}
3248 
3249 	// Mark the team as shutting down. That will prevent new threads from being
3250 	// created and debugger changes from taking place.
3251 	team->state = TEAM_STATE_SHUTDOWN;
3252 
3253 	// delete all timers
3254 	team->DeleteUserTimers(false);
3255 
3256 	// deactivate CPU time user timers for the team
3257 	InterruptsSpinLocker timeLocker(team->time_lock);
3258 
3259 	if (team->HasActiveCPUTimeUserTimers())
3260 		team->DeactivateCPUTimeUserTimers();
3261 
3262 	timeLocker.Unlock();
3263 
3264 	// kill all threads but the main thread
3265 	team_death_entry deathEntry;
3266 	deathEntry.condition.Init(team, "team death");
3267 
3268 	while (true) {
3269 		team->death_entry = &deathEntry;
3270 		deathEntry.remaining_threads = 0;
3271 
3272 		Thread* thread = team->thread_list;
3273 		while (thread != NULL) {
3274 			if (thread != team->main_thread) {
3275 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3276 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3277 				deathEntry.remaining_threads++;
3278 			}
3279 
3280 			thread = thread->team_next;
3281 		}
3282 
3283 		if (deathEntry.remaining_threads == 0)
3284 			break;
3285 
3286 		// there are threads to wait for
3287 		ConditionVariableEntry entry;
3288 		deathEntry.condition.Add(&entry);
3289 
3290 		teamLocker.Unlock();
3291 
3292 		entry.Wait();
3293 
3294 		teamLocker.Lock();
3295 	}
3296 
3297 	team->death_entry = NULL;
3298 
3299 	return debuggerPort;
3300 }
3301 
3302 
3303 /*!	Called on team exit to notify threads waiting on the team and free most
3304 	resources associated with it.
3305 	The caller shouldn't hold any locks.
3306 */
3307 void
3308 team_delete_team(Team* team, port_id debuggerPort)
3309 {
3310 	// Not quite in our job description, but work that has been left by
3311 	// team_remove_team() and that can be done now that we're not holding any
3312 	// locks.
3313 	orphaned_process_group_check();
3314 
3315 	team_id teamID = team->id;
3316 
3317 	ASSERT(team->num_threads == 0);
3318 
3319 	// If someone is waiting for this team to be loaded, but it dies
3320 	// unexpectedly before being done, we need to notify the waiting
3321 	// thread now.
3322 
3323 	TeamLocker teamLocker(team);
3324 
3325 	if (team->loading_info != NULL) {
3326 		// there's indeed someone waiting
3327 		team->loading_info->result = B_ERROR;
3328 
3329 		// wake up the waiting thread
3330 		team->loading_info->condition.NotifyAll();
3331 		team->loading_info = NULL;
3332 	}
3333 
3334 	// notify team watchers
3335 
3336 	{
3337 		// we're not reachable from anyone anymore at this point, so we
3338 		// can safely access the list without any locking
3339 		struct team_watcher* watcher;
3340 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3341 				&team->watcher_list)) != NULL) {
3342 			watcher->hook(teamID, watcher->data);
3343 			free(watcher);
3344 		}
3345 	}
3346 
3347 	// get team exit information
3348 	status_t exitStatus = team->exit.status;
3349 
3350 	teamLocker.Unlock();
3351 
3352 	sNotificationService.Notify(TEAM_REMOVED, team);
3353 
3354 	// get team usage information
3355 	InterruptsSpinLocker timeLocker(team->time_lock);
3356 
3357 	team_usage_info usageInfo;
3358 	usageInfo.kernel_time = team->dead_threads_kernel_time;
3359 	usageInfo.user_time = team->dead_threads_user_time;
3360 
3361 	timeLocker.Unlock();
3362 
3363 	// free team resources
3364 
3365 	delete_user_mutex_context(team->user_mutex_context);
3366 	delete_realtime_sem_context(team->realtime_sem_context);
3367 	xsi_sem_undo(team);
3368 	remove_images(team);
3369 	team->address_space->RemoveAndPut();
3370 
3371 	team->ReleaseReference();
3372 
3373 	// notify the debugger, that the team is gone
3374 	user_debug_team_deleted(teamID, debuggerPort, exitStatus, &usageInfo);
3375 }
3376 
3377 
3378 Team*
3379 team_get_kernel_team(void)
3380 {
3381 	return sKernelTeam;
3382 }
3383 
3384 
3385 team_id
3386 team_get_kernel_team_id(void)
3387 {
3388 	if (!sKernelTeam)
3389 		return 0;
3390 
3391 	return sKernelTeam->id;
3392 }
3393 
3394 
3395 team_id
3396 team_get_current_team_id(void)
3397 {
3398 	return thread_get_current_thread()->team->id;
3399 }
3400 
3401 
3402 status_t
3403 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3404 {
3405 	if (id == sKernelTeam->id) {
3406 		// we're the kernel team, so we don't have to go through all
3407 		// the hassle (locking and hash lookup)
3408 		*_addressSpace = VMAddressSpace::GetKernel();
3409 		return B_OK;
3410 	}
3411 
3412 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3413 
3414 	Team* team = team_get_team_struct_locked(id);
3415 	if (team == NULL)
3416 		return B_BAD_VALUE;
3417 
3418 	team->address_space->Get();
3419 	*_addressSpace = team->address_space;
3420 	return B_OK;
3421 }
3422 
3423 
3424 /*!	Sets the team's job control state.
3425 	The caller must hold the parent team's lock. Interrupts are allowed to be
3426 	enabled or disabled.
3427 	\a team The team whose job control state shall be set.
3428 	\a newState The new state to be set.
3429 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3430 		the caller is responsible for filling in the following fields of the
3431 		entry before releasing the parent team's lock, unless the new state is
3432 		\c JOB_CONTROL_STATE_NONE:
3433 		- \c signal: The number of the signal causing the state change.
3434 		- \c signaling_user: The real UID of the user sending the signal.
3435 */
3436 void
3437 team_set_job_control_state(Team* team, job_control_state newState,
3438 	Signal* signal)
3439 {
3440 	if (team == NULL || team->job_control_entry == NULL)
3441 		return;
3442 
3443 	// don't touch anything, if the state stays the same or the team is already
3444 	// dead
3445 	job_control_entry* entry = team->job_control_entry;
3446 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3447 		return;
3448 
3449 	T(SetJobControlState(team->id, newState, signal));
3450 
3451 	// remove from the old list
3452 	switch (entry->state) {
3453 		case JOB_CONTROL_STATE_NONE:
3454 			// entry is in no list ATM
3455 			break;
3456 		case JOB_CONTROL_STATE_DEAD:
3457 			// can't get here
3458 			break;
3459 		case JOB_CONTROL_STATE_STOPPED:
3460 			team->parent->stopped_children.entries.Remove(entry);
3461 			break;
3462 		case JOB_CONTROL_STATE_CONTINUED:
3463 			team->parent->continued_children.entries.Remove(entry);
3464 			break;
3465 	}
3466 
3467 	entry->state = newState;
3468 
3469 	if (signal != NULL) {
3470 		entry->signal = signal->Number();
3471 		entry->signaling_user = signal->SendingUser();
3472 	}
3473 
3474 	// add to new list
3475 	team_job_control_children* childList = NULL;
3476 	switch (entry->state) {
3477 		case JOB_CONTROL_STATE_NONE:
3478 			// entry doesn't get into any list
3479 			break;
3480 		case JOB_CONTROL_STATE_DEAD:
3481 			childList = &team->parent->dead_children;
3482 			team->parent->dead_children.count++;
3483 			break;
3484 		case JOB_CONTROL_STATE_STOPPED:
3485 			childList = &team->parent->stopped_children;
3486 			break;
3487 		case JOB_CONTROL_STATE_CONTINUED:
3488 			childList = &team->parent->continued_children;
3489 			break;
3490 	}
3491 
3492 	if (childList != NULL) {
3493 		childList->entries.Add(entry);
3494 		team->parent->dead_children.condition_variable.NotifyAll();
3495 	}
3496 }
3497 
3498 
3499 /*!	Inits the given team's exit information, if not yet initialized, to some
3500 	generic "killed" status.
3501 	The caller must not hold the team's lock. Interrupts must be enabled.
3502 
3503 	\param team The team whose exit info shall be initialized.
3504 */
3505 void
3506 team_init_exit_info_on_error(Team* team)
3507 {
3508 	TeamLocker teamLocker(team);
3509 
3510 	if (!team->exit.initialized) {
3511 		team->exit.reason = CLD_KILLED;
3512 		team->exit.signal = SIGKILL;
3513 		team->exit.signaling_user = geteuid();
3514 		team->exit.status = 0;
3515 		team->exit.initialized = true;
3516 	}
3517 }
3518 
3519 
3520 /*! Adds a hook to the team that is called as soon as this team goes away.
3521 	This call might get public in the future.
3522 */
3523 status_t
3524 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3525 {
3526 	if (hook == NULL || teamID < B_OK)
3527 		return B_BAD_VALUE;
3528 
3529 	// create the watcher object
3530 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3531 	if (watcher == NULL)
3532 		return B_NO_MEMORY;
3533 
3534 	watcher->hook = hook;
3535 	watcher->data = data;
3536 
3537 	// add watcher, if the team isn't already dying
3538 	// get the team
3539 	Team* team = Team::GetAndLock(teamID);
3540 	if (team == NULL) {
3541 		free(watcher);
3542 		return B_BAD_TEAM_ID;
3543 	}
3544 
3545 	list_add_item(&team->watcher_list, watcher);
3546 
3547 	team->UnlockAndReleaseReference();
3548 
3549 	return B_OK;
3550 }
3551 
3552 
3553 status_t
3554 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3555 {
3556 	if (hook == NULL || teamID < 0)
3557 		return B_BAD_VALUE;
3558 
3559 	// get team and remove watcher (if present)
3560 	Team* team = Team::GetAndLock(teamID);
3561 	if (team == NULL)
3562 		return B_BAD_TEAM_ID;
3563 
3564 	// search for watcher
3565 	team_watcher* watcher = NULL;
3566 	while ((watcher = (team_watcher*)list_get_next_item(
3567 			&team->watcher_list, watcher)) != NULL) {
3568 		if (watcher->hook == hook && watcher->data == data) {
3569 			// got it!
3570 			list_remove_item(&team->watcher_list, watcher);
3571 			break;
3572 		}
3573 	}
3574 
3575 	team->UnlockAndReleaseReference();
3576 
3577 	if (watcher == NULL)
3578 		return B_ENTRY_NOT_FOUND;
3579 
3580 	free(watcher);
3581 	return B_OK;
3582 }
3583 
3584 
3585 /*!	Allocates a user_thread structure from the team.
3586 	The team lock must be held, unless the function is called for the team's
3587 	main thread. Interrupts must be enabled.
3588 */
3589 struct user_thread*
3590 team_allocate_user_thread(Team* team)
3591 {
3592 	if (team->user_data == 0)
3593 		return NULL;
3594 
3595 	// take an entry from the free list, if any
3596 	if (struct free_user_thread* entry = team->free_user_threads) {
3597 		user_thread* thread = entry->thread;
3598 		team->free_user_threads = entry->next;
3599 		free(entry);
3600 		return thread;
3601 	}
3602 
3603 	while (true) {
3604 		// enough space left?
3605 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3606 		if (team->user_data_size - team->used_user_data < needed) {
3607 			// try to resize the area
3608 			if (resize_area(team->user_data_area,
3609 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3610 				return NULL;
3611 			}
3612 
3613 			// resized user area successfully -- try to allocate the user_thread
3614 			// again
3615 			team->user_data_size += B_PAGE_SIZE;
3616 			continue;
3617 		}
3618 
3619 		// allocate the user_thread
3620 		user_thread* thread
3621 			= (user_thread*)(team->user_data + team->used_user_data);
3622 		team->used_user_data += needed;
3623 
3624 		return thread;
3625 	}
3626 }
3627 
3628 
3629 /*!	Frees the given user_thread structure.
3630 	The team's lock must not be held. Interrupts must be enabled.
3631 	\param team The team the user thread was allocated from.
3632 	\param userThread The user thread to free.
3633 */
3634 void
3635 team_free_user_thread(Team* team, struct user_thread* userThread)
3636 {
3637 	if (userThread == NULL)
3638 		return;
3639 
3640 	// create a free list entry
3641 	free_user_thread* entry
3642 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3643 	if (entry == NULL) {
3644 		// we have to leak the user thread :-/
3645 		return;
3646 	}
3647 
3648 	// add to free list
3649 	TeamLocker teamLocker(team);
3650 
3651 	entry->thread = userThread;
3652 	entry->next = team->free_user_threads;
3653 	team->free_user_threads = entry;
3654 }
3655 
3656 
3657 //	#pragma mark - Associated data interface
3658 
3659 
3660 AssociatedData::AssociatedData()
3661 	:
3662 	fOwner(NULL)
3663 {
3664 }
3665 
3666 
3667 AssociatedData::~AssociatedData()
3668 {
3669 }
3670 
3671 
3672 void
3673 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3674 {
3675 }
3676 
3677 
3678 AssociatedDataOwner::AssociatedDataOwner()
3679 {
3680 	mutex_init(&fLock, "associated data owner");
3681 }
3682 
3683 
3684 AssociatedDataOwner::~AssociatedDataOwner()
3685 {
3686 	mutex_destroy(&fLock);
3687 }
3688 
3689 
3690 bool
3691 AssociatedDataOwner::AddData(AssociatedData* data)
3692 {
3693 	MutexLocker locker(fLock);
3694 
3695 	if (data->Owner() != NULL)
3696 		return false;
3697 
3698 	data->AcquireReference();
3699 	fList.Add(data);
3700 	data->SetOwner(this);
3701 
3702 	return true;
3703 }
3704 
3705 
3706 bool
3707 AssociatedDataOwner::RemoveData(AssociatedData* data)
3708 {
3709 	MutexLocker locker(fLock);
3710 
3711 	if (data->Owner() != this)
3712 		return false;
3713 
3714 	data->SetOwner(NULL);
3715 	fList.Remove(data);
3716 
3717 	locker.Unlock();
3718 
3719 	data->ReleaseReference();
3720 
3721 	return true;
3722 }
3723 
3724 
3725 void
3726 AssociatedDataOwner::PrepareForDeletion()
3727 {
3728 	MutexLocker locker(fLock);
3729 
3730 	// move all data to a temporary list and unset the owner
3731 	DataList list;
3732 	list.MoveFrom(&fList);
3733 
3734 	for (DataList::Iterator it = list.GetIterator();
3735 		AssociatedData* data = it.Next();) {
3736 		data->SetOwner(NULL);
3737 	}
3738 
3739 	locker.Unlock();
3740 
3741 	// call the notification hooks and release our references
3742 	while (AssociatedData* data = list.RemoveHead()) {
3743 		data->OwnerDeleted(this);
3744 		data->ReleaseReference();
3745 	}
3746 }
3747 
3748 
3749 /*!	Associates data with the current team.
3750 	When the team is deleted, the data object is notified.
3751 	The team acquires a reference to the object.
3752 
3753 	\param data The data object.
3754 	\return \c true on success, \c false otherwise. Fails only when the supplied
3755 		data object is already associated with another owner.
3756 */
3757 bool
3758 team_associate_data(AssociatedData* data)
3759 {
3760 	return thread_get_current_thread()->team->AddData(data);
3761 }
3762 
3763 
3764 /*!	Dissociates data from the current team.
3765 	Balances an earlier call to team_associate_data().
3766 
3767 	\param data The data object.
3768 	\return \c true on success, \c false otherwise. Fails only when the data
3769 		object is not associated with the current team.
3770 */
3771 bool
3772 team_dissociate_data(AssociatedData* data)
3773 {
3774 	return thread_get_current_thread()->team->RemoveData(data);
3775 }
3776 
3777 
3778 //	#pragma mark - Public kernel API
3779 
3780 
3781 thread_id
3782 load_image(int32 argCount, const char** args, const char** env)
3783 {
3784 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3785 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3786 }
3787 
3788 
3789 thread_id
3790 load_image_etc(int32 argCount, const char* const* args,
3791 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3792 {
3793 	// we need to flatten the args and environment
3794 
3795 	if (args == NULL)
3796 		return B_BAD_VALUE;
3797 
3798 	// determine total needed size
3799 	int32 argSize = 0;
3800 	for (int32 i = 0; i < argCount; i++)
3801 		argSize += strlen(args[i]) + 1;
3802 
3803 	int32 envCount = 0;
3804 	int32 envSize = 0;
3805 	while (env != NULL && env[envCount] != NULL)
3806 		envSize += strlen(env[envCount++]) + 1;
3807 
3808 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3809 	if (size > MAX_PROCESS_ARGS_SIZE)
3810 		return B_TOO_MANY_ARGS;
3811 
3812 	// allocate space
3813 	char** flatArgs = (char**)malloc(size);
3814 	if (flatArgs == NULL)
3815 		return B_NO_MEMORY;
3816 
3817 	char** slot = flatArgs;
3818 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3819 
3820 	// copy arguments and environment
3821 	for (int32 i = 0; i < argCount; i++) {
3822 		int32 argSize = strlen(args[i]) + 1;
3823 		memcpy(stringSpace, args[i], argSize);
3824 		*slot++ = stringSpace;
3825 		stringSpace += argSize;
3826 	}
3827 
3828 	*slot++ = NULL;
3829 
3830 	for (int32 i = 0; i < envCount; i++) {
3831 		int32 envSize = strlen(env[i]) + 1;
3832 		memcpy(stringSpace, env[i], envSize);
3833 		*slot++ = stringSpace;
3834 		stringSpace += envSize;
3835 	}
3836 
3837 	*slot++ = NULL;
3838 
3839 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3840 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3841 
3842 	free(flatArgs);
3843 		// load_image_internal() unset our variable if it took over ownership
3844 
3845 	return thread;
3846 }
3847 
3848 
3849 status_t
3850 wait_for_team(team_id id, status_t* _returnCode)
3851 {
3852 	// check whether the team exists
3853 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3854 
3855 	Team* team = team_get_team_struct_locked(id);
3856 	if (team == NULL)
3857 		return B_BAD_TEAM_ID;
3858 
3859 	id = team->id;
3860 
3861 	teamsLocker.Unlock();
3862 
3863 	// wait for the main thread (it has the same ID as the team)
3864 	return wait_for_thread(id, _returnCode);
3865 }
3866 
3867 
3868 status_t
3869 kill_team(team_id id)
3870 {
3871 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3872 
3873 	Team* team = team_get_team_struct_locked(id);
3874 	if (team == NULL)
3875 		return B_BAD_TEAM_ID;
3876 
3877 	id = team->id;
3878 
3879 	teamsLocker.Unlock();
3880 
3881 	if (team == sKernelTeam)
3882 		return B_NOT_ALLOWED;
3883 
3884 	// Just kill the team's main thread (it has same ID as the team). The
3885 	// cleanup code there will take care of the team.
3886 	return kill_thread(id);
3887 }
3888 
3889 
3890 status_t
3891 _get_team_info(team_id id, team_info* info, size_t size)
3892 {
3893 	// get the team
3894 	Team* team = Team::Get(id);
3895 	if (team == NULL)
3896 		return B_BAD_TEAM_ID;
3897 	BReference<Team> teamReference(team, true);
3898 
3899 	// fill in the info
3900 	return fill_team_info(team, info, size);
3901 }
3902 
3903 
3904 status_t
3905 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3906 {
3907 	int32 slot = *cookie;
3908 	if (slot < 1)
3909 		slot = 1;
3910 
3911 	InterruptsReadSpinLocker locker(sTeamHashLock);
3912 
3913 	team_id lastTeamID = peek_next_thread_id();
3914 		// TODO: This is broken, since the id can wrap around!
3915 
3916 	// get next valid team
3917 	Team* team = NULL;
3918 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3919 		slot++;
3920 
3921 	if (team == NULL)
3922 		return B_BAD_TEAM_ID;
3923 
3924 	// get a reference to the team and unlock
3925 	BReference<Team> teamReference(team);
3926 	locker.Unlock();
3927 
3928 	// fill in the info
3929 	*cookie = ++slot;
3930 	return fill_team_info(team, info, size);
3931 }
3932 
3933 
3934 status_t
3935 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3936 {
3937 	if (size != sizeof(team_usage_info))
3938 		return B_BAD_VALUE;
3939 
3940 	return common_get_team_usage_info(id, who, info, 0);
3941 }
3942 
3943 
3944 pid_t
3945 getpid(void)
3946 {
3947 	return thread_get_current_thread()->team->id;
3948 }
3949 
3950 
3951 pid_t
3952 getppid()
3953 {
3954 	return _getppid(0);
3955 }
3956 
3957 
3958 pid_t
3959 getpgid(pid_t id)
3960 {
3961 	if (id < 0) {
3962 		errno = EINVAL;
3963 		return -1;
3964 	}
3965 
3966 	if (id == 0) {
3967 		// get process group of the calling process
3968 		Team* team = thread_get_current_thread()->team;
3969 		TeamLocker teamLocker(team);
3970 		return team->group_id;
3971 	}
3972 
3973 	// get the team
3974 	Team* team = Team::GetAndLock(id);
3975 	if (team == NULL) {
3976 		errno = ESRCH;
3977 		return -1;
3978 	}
3979 
3980 	// get the team's process group ID
3981 	pid_t groupID = team->group_id;
3982 
3983 	team->UnlockAndReleaseReference();
3984 
3985 	return groupID;
3986 }
3987 
3988 
3989 pid_t
3990 getsid(pid_t id)
3991 {
3992 	if (id < 0) {
3993 		errno = EINVAL;
3994 		return -1;
3995 	}
3996 
3997 	if (id == 0) {
3998 		// get session of the calling process
3999 		Team* team = thread_get_current_thread()->team;
4000 		TeamLocker teamLocker(team);
4001 		return team->session_id;
4002 	}
4003 
4004 	// get the team
4005 	Team* team = Team::GetAndLock(id);
4006 	if (team == NULL) {
4007 		errno = ESRCH;
4008 		return -1;
4009 	}
4010 
4011 	// get the team's session ID
4012 	pid_t sessionID = team->session_id;
4013 
4014 	team->UnlockAndReleaseReference();
4015 
4016 	return sessionID;
4017 }
4018 
4019 
4020 //	#pragma mark - User syscalls
4021 
4022 
4023 status_t
4024 _user_exec(const char* userPath, const char* const* userFlatArgs,
4025 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
4026 {
4027 	// NOTE: Since this function normally doesn't return, don't use automatic
4028 	// variables that need destruction in the function scope.
4029 	char path[B_PATH_NAME_LENGTH];
4030 
4031 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
4032 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
4033 		return B_BAD_ADDRESS;
4034 
4035 	// copy and relocate the flat arguments
4036 	char** flatArgs;
4037 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4038 		argCount, envCount, flatArgs);
4039 
4040 	if (error == B_OK) {
4041 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
4042 			envCount, umask);
4043 			// this one only returns in case of error
4044 	}
4045 
4046 	free(flatArgs);
4047 	return error;
4048 }
4049 
4050 
4051 thread_id
4052 _user_fork(void)
4053 {
4054 	return fork_team();
4055 }
4056 
4057 
4058 pid_t
4059 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4060 	team_usage_info* usageInfo)
4061 {
4062 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4063 		return B_BAD_ADDRESS;
4064 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4065 		return B_BAD_ADDRESS;
4066 
4067 	siginfo_t info;
4068 	team_usage_info usage_info;
4069 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4070 	if (foundChild < 0)
4071 		return syscall_restart_handle_post(foundChild);
4072 
4073 	// copy info back to userland
4074 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4075 		return B_BAD_ADDRESS;
4076 	// copy usage_info back to userland
4077 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4078 		sizeof(usage_info)) != B_OK) {
4079 		return B_BAD_ADDRESS;
4080 	}
4081 
4082 	return foundChild;
4083 }
4084 
4085 
4086 pid_t
4087 _user_process_info(pid_t process, int32 which)
4088 {
4089 	pid_t result;
4090 	switch (which) {
4091 		case SESSION_ID:
4092 			result = getsid(process);
4093 			break;
4094 		case GROUP_ID:
4095 			result = getpgid(process);
4096 			break;
4097 		case PARENT_ID:
4098 			result = _getppid(process);
4099 			break;
4100 		default:
4101 			return B_BAD_VALUE;
4102 	}
4103 
4104 	return result >= 0 ? result : errno;
4105 }
4106 
4107 
4108 pid_t
4109 _user_setpgid(pid_t processID, pid_t groupID)
4110 {
4111 	// setpgid() can be called either by the parent of the target process or
4112 	// by the process itself to do one of two things:
4113 	// * Create a new process group with the target process' ID and the target
4114 	//   process as group leader.
4115 	// * Set the target process' process group to an already existing one in the
4116 	//   same session.
4117 
4118 	if (groupID < 0)
4119 		return B_BAD_VALUE;
4120 
4121 	Team* currentTeam = thread_get_current_thread()->team;
4122 	if (processID == 0)
4123 		processID = currentTeam->id;
4124 
4125 	// if the group ID is not specified, use the target process' ID
4126 	if (groupID == 0)
4127 		groupID = processID;
4128 
4129 	// We loop when running into the following race condition: We create a new
4130 	// process group, because there isn't one with that ID yet, but later when
4131 	// trying to publish it, we find that someone else created and published
4132 	// a group with that ID in the meantime. In that case we just restart the
4133 	// whole action.
4134 	while (true) {
4135 		// Look up the process group by ID. If it doesn't exist yet and we are
4136 		// allowed to create a new one, do that.
4137 		ProcessGroup* group = ProcessGroup::Get(groupID);
4138 		bool newGroup = false;
4139 		if (group == NULL) {
4140 			if (groupID != processID)
4141 				return B_NOT_ALLOWED;
4142 
4143 			group = new(std::nothrow) ProcessGroup(groupID);
4144 			if (group == NULL)
4145 				return B_NO_MEMORY;
4146 
4147 			newGroup = true;
4148 		}
4149 		BReference<ProcessGroup> groupReference(group, true);
4150 
4151 		// get the target team
4152 		Team* team = Team::Get(processID);
4153 		if (team == NULL)
4154 			return ESRCH;
4155 		BReference<Team> teamReference(team, true);
4156 
4157 		// lock the new process group and the team's current process group
4158 		while (true) {
4159 			// lock the team's current process group
4160 			team->LockProcessGroup();
4161 
4162 			ProcessGroup* oldGroup = team->group;
4163 			if (oldGroup == NULL) {
4164 				// This can only happen if the team is exiting.
4165 				ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4166 				return ESRCH;
4167 			}
4168 
4169 			if (oldGroup == group) {
4170 				// it's the same as the target group, so just bail out
4171 				oldGroup->Unlock();
4172 				return group->id;
4173 			}
4174 
4175 			oldGroup->AcquireReference();
4176 
4177 			// lock the target process group, if locking order allows it
4178 			if (newGroup || group->id > oldGroup->id) {
4179 				group->Lock();
4180 				break;
4181 			}
4182 
4183 			// try to lock
4184 			if (group->TryLock())
4185 				break;
4186 
4187 			// no dice -- unlock the team's current process group and relock in
4188 			// the correct order
4189 			oldGroup->Unlock();
4190 
4191 			group->Lock();
4192 			oldGroup->Lock();
4193 
4194 			// check whether things are still the same
4195 			TeamLocker teamLocker(team);
4196 			if (team->group == oldGroup)
4197 				break;
4198 
4199 			// something changed -- unlock everything and retry
4200 			teamLocker.Unlock();
4201 			oldGroup->Unlock();
4202 			group->Unlock();
4203 			oldGroup->ReleaseReference();
4204 		}
4205 
4206 		// we now have references and locks of both new and old process group
4207 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4208 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4209 		AutoLocker<ProcessGroup> groupLocker(group, true);
4210 
4211 		// also lock the target team and its parent
4212 		team->LockTeamAndParent(false);
4213 		TeamLocker parentLocker(team->parent, true);
4214 		TeamLocker teamLocker(team, true);
4215 
4216 		// perform the checks
4217 		if (team == currentTeam) {
4218 			// we set our own group
4219 
4220 			// we must not change our process group ID if we're a session leader
4221 			if (is_session_leader(currentTeam))
4222 				return B_NOT_ALLOWED;
4223 		} else {
4224 			// Calling team != target team. The target team must be a child of
4225 			// the calling team and in the same session. (If that's the case it
4226 			// isn't a session leader either.)
4227 			if (team->parent != currentTeam
4228 				|| team->session_id != currentTeam->session_id) {
4229 				return B_NOT_ALLOWED;
4230 			}
4231 
4232 			// The call is also supposed to fail on a child, when the child has
4233 			// already executed exec*() [EACCES].
4234 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4235 				return EACCES;
4236 		}
4237 
4238 		// If we created a new process group, publish it now.
4239 		if (newGroup) {
4240 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4241 			if (sGroupHash.Lookup(groupID)) {
4242 				// A group with the group ID appeared since we first checked.
4243 				// Back to square one.
4244 				continue;
4245 			}
4246 
4247 			group->PublishLocked(team->group->Session());
4248 		} else if (group->Session()->id != team->session_id) {
4249 			// The existing target process group belongs to a different session.
4250 			// That's not allowed.
4251 			return B_NOT_ALLOWED;
4252 		}
4253 
4254 		// Everything is ready -- set the group.
4255 		remove_team_from_group(team);
4256 		insert_team_into_group(group, team);
4257 
4258 		// Changing the process group might have changed the situation for a
4259 		// parent waiting in wait_for_child(). Hence we notify it.
4260 		team->parent->dead_children.condition_variable.NotifyAll();
4261 
4262 		return group->id;
4263 	}
4264 }
4265 
4266 
4267 pid_t
4268 _user_setsid(void)
4269 {
4270 	Team* team = thread_get_current_thread()->team;
4271 
4272 	// create a new process group and session
4273 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4274 	if (group == NULL)
4275 		return B_NO_MEMORY;
4276 	BReference<ProcessGroup> groupReference(group, true);
4277 	AutoLocker<ProcessGroup> groupLocker(group);
4278 
4279 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4280 	if (session == NULL)
4281 		return B_NO_MEMORY;
4282 	BReference<ProcessSession> sessionReference(session, true);
4283 
4284 	// lock the team's current process group, parent, and the team itself
4285 	team->LockTeamParentAndProcessGroup();
4286 	BReference<ProcessGroup> oldGroupReference(team->group);
4287 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4288 	TeamLocker parentLocker(team->parent, true);
4289 	TeamLocker teamLocker(team, true);
4290 
4291 	// the team must not already be a process group leader
4292 	if (is_process_group_leader(team))
4293 		return B_NOT_ALLOWED;
4294 
4295 	// remove the team from the old and add it to the new process group
4296 	remove_team_from_group(team);
4297 	group->Publish(session);
4298 	insert_team_into_group(group, team);
4299 
4300 	// Changing the process group might have changed the situation for a
4301 	// parent waiting in wait_for_child(). Hence we notify it.
4302 	team->parent->dead_children.condition_variable.NotifyAll();
4303 
4304 	return group->id;
4305 }
4306 
4307 
4308 status_t
4309 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4310 {
4311 	status_t returnCode;
4312 	status_t status;
4313 
4314 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4315 		return B_BAD_ADDRESS;
4316 
4317 	status = wait_for_team(id, &returnCode);
4318 	if (status >= B_OK && _userReturnCode != NULL) {
4319 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4320 				!= B_OK)
4321 			return B_BAD_ADDRESS;
4322 		return B_OK;
4323 	}
4324 
4325 	return syscall_restart_handle_post(status);
4326 }
4327 
4328 
4329 thread_id
4330 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4331 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4332 	port_id errorPort, uint32 errorToken)
4333 {
4334 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4335 
4336 	if (argCount < 1)
4337 		return B_BAD_VALUE;
4338 
4339 	// copy and relocate the flat arguments
4340 	char** flatArgs;
4341 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4342 		argCount, envCount, flatArgs);
4343 	if (error != B_OK)
4344 		return error;
4345 
4346 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4347 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4348 		errorToken);
4349 
4350 	free(flatArgs);
4351 		// load_image_internal() unset our variable if it took over ownership
4352 
4353 	return thread;
4354 }
4355 
4356 
4357 void
4358 _user_exit_team(status_t returnValue)
4359 {
4360 	Thread* thread = thread_get_current_thread();
4361 	Team* team = thread->team;
4362 
4363 	// set this thread's exit status
4364 	thread->exit.status = returnValue;
4365 
4366 	// set the team exit status
4367 	TeamLocker teamLocker(team);
4368 
4369 	if (!team->exit.initialized) {
4370 		team->exit.reason = CLD_EXITED;
4371 		team->exit.signal = 0;
4372 		team->exit.signaling_user = 0;
4373 		team->exit.status = returnValue;
4374 		team->exit.initialized = true;
4375 	}
4376 
4377 	teamLocker.Unlock();
4378 
4379 	// Stop the thread, if the team is being debugged and that has been
4380 	// requested.
4381 	// Note: GCC 13 marks the following call as potentially overflowing, since it thinks team may
4382 	//       be `nullptr`. This cannot be the case in reality, therefore ignore this specific
4383 	//       error.
4384 	#pragma GCC diagnostic push
4385 	#pragma GCC diagnostic ignored "-Wstringop-overflow"
4386 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4387 		user_debug_stop_thread();
4388 	#pragma GCC diagnostic pop
4389 
4390 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4391 	// userland. The signal handling code forwards the signal to the main
4392 	// thread (if that's not already this one), which will take the team down.
4393 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4394 	send_signal_to_thread(thread, signal, 0);
4395 }
4396 
4397 
4398 status_t
4399 _user_kill_team(team_id team)
4400 {
4401 	return kill_team(team);
4402 }
4403 
4404 
4405 status_t
4406 _user_get_team_info(team_id id, team_info* userInfo, size_t size)
4407 {
4408 	status_t status;
4409 	team_info info;
4410 
4411 	if (size > sizeof(team_info))
4412 		return B_BAD_VALUE;
4413 
4414 	if (!IS_USER_ADDRESS(userInfo))
4415 		return B_BAD_ADDRESS;
4416 
4417 	status = _get_team_info(id, &info, size);
4418 	if (status == B_OK) {
4419 		if (user_memcpy(userInfo, &info, size) < B_OK)
4420 			return B_BAD_ADDRESS;
4421 	}
4422 
4423 	return status;
4424 }
4425 
4426 
4427 status_t
4428 _user_get_next_team_info(int32* userCookie, team_info* userInfo, size_t size)
4429 {
4430 	status_t status;
4431 	team_info info;
4432 	int32 cookie;
4433 
4434 	if (size > sizeof(team_info))
4435 		return B_BAD_VALUE;
4436 
4437 	if (!IS_USER_ADDRESS(userCookie)
4438 		|| !IS_USER_ADDRESS(userInfo)
4439 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4440 		return B_BAD_ADDRESS;
4441 
4442 	status = _get_next_team_info(&cookie, &info, size);
4443 	if (status != B_OK)
4444 		return status;
4445 
4446 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4447 		|| user_memcpy(userInfo, &info, size) < B_OK)
4448 		return B_BAD_ADDRESS;
4449 
4450 	return status;
4451 }
4452 
4453 
4454 team_id
4455 _user_get_current_team(void)
4456 {
4457 	return team_get_current_team_id();
4458 }
4459 
4460 
4461 status_t
4462 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4463 	size_t size)
4464 {
4465 	if (size != sizeof(team_usage_info))
4466 		return B_BAD_VALUE;
4467 
4468 	team_usage_info info;
4469 	status_t status = common_get_team_usage_info(team, who, &info,
4470 		B_CHECK_PERMISSION);
4471 
4472 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4473 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4474 		return B_BAD_ADDRESS;
4475 	}
4476 
4477 	return status;
4478 }
4479 
4480 
4481 status_t
4482 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4483 	size_t size, size_t* _sizeNeeded)
4484 {
4485 	// check parameters
4486 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4487 		|| (buffer == NULL && size > 0)
4488 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4489 		return B_BAD_ADDRESS;
4490 	}
4491 
4492 	KMessage info;
4493 
4494 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4495 		// allocate memory for a copy of the needed team data
4496 		struct ExtendedTeamData {
4497 			team_id	id;
4498 			pid_t	group_id;
4499 			pid_t	session_id;
4500 			uid_t	real_uid;
4501 			gid_t	real_gid;
4502 			uid_t	effective_uid;
4503 			gid_t	effective_gid;
4504 			char	name[B_OS_NAME_LENGTH];
4505 		} teamClone;
4506 
4507 		io_context* ioContext;
4508 		{
4509 			// get the team structure
4510 			Team* team = Team::GetAndLock(teamID);
4511 			if (team == NULL)
4512 				return B_BAD_TEAM_ID;
4513 			BReference<Team> teamReference(team, true);
4514 			TeamLocker teamLocker(team, true);
4515 
4516 			// copy the data
4517 			teamClone.id = team->id;
4518 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4519 			teamClone.group_id = team->group_id;
4520 			teamClone.session_id = team->session_id;
4521 			teamClone.real_uid = team->real_uid;
4522 			teamClone.real_gid = team->real_gid;
4523 			teamClone.effective_uid = team->effective_uid;
4524 			teamClone.effective_gid = team->effective_gid;
4525 
4526 			// also fetch a reference to the I/O context
4527 			ioContext = team->io_context;
4528 			vfs_get_io_context(ioContext);
4529 		}
4530 		CObjectDeleter<io_context, void, vfs_put_io_context>
4531 			ioContextPutter(ioContext);
4532 
4533 		// add the basic data to the info message
4534 		if (info.AddInt32("id", teamClone.id) != B_OK
4535 			|| info.AddString("name", teamClone.name) != B_OK
4536 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4537 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4538 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4539 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4540 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4541 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4542 			return B_NO_MEMORY;
4543 		}
4544 
4545 		// get the current working directory from the I/O context
4546 		dev_t cwdDevice;
4547 		ino_t cwdDirectory;
4548 		{
4549 			MutexLocker ioContextLocker(ioContext->io_mutex);
4550 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4551 		}
4552 
4553 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4554 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4555 			return B_NO_MEMORY;
4556 		}
4557 	}
4558 
4559 	// TODO: Support the other flags!
4560 
4561 	// copy the needed size and, if it fits, the message back to userland
4562 	size_t sizeNeeded = info.ContentSize();
4563 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4564 		return B_BAD_ADDRESS;
4565 
4566 	if (sizeNeeded > size)
4567 		return B_BUFFER_OVERFLOW;
4568 
4569 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4570 		return B_BAD_ADDRESS;
4571 
4572 	return B_OK;
4573 }
4574