xref: /haiku/src/system/kernel/team.cpp (revision 13581b3d2a71545960b98fefebc5225b5bf29072)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_mutex.h>
55 #include <user_runtime.h>
56 #include <user_thread.h>
57 #include <usergroup.h>
58 #include <vfs.h>
59 #include <vm/vm.h>
60 #include <vm/VMAddressSpace.h>
61 #include <util/AutoLock.h>
62 #include <util/ThreadAutoLock.h>
63 
64 #include "TeamThreadTables.h"
65 
66 
67 //#define TRACE_TEAM
68 #ifdef TRACE_TEAM
69 #	define TRACE(x) dprintf x
70 #else
71 #	define TRACE(x) ;
72 #endif
73 
74 
75 struct team_key {
76 	team_id id;
77 };
78 
79 struct team_arg {
80 	char	*path;
81 	char	**flat_args;
82 	size_t	flat_args_size;
83 	uint32	arg_count;
84 	uint32	env_count;
85 	mode_t	umask;
86 	uint32	flags;
87 	port_id	error_port;
88 	uint32	error_token;
89 };
90 
91 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
92 
93 
94 namespace {
95 
96 
97 class TeamNotificationService : public DefaultNotificationService {
98 public:
99 							TeamNotificationService();
100 
101 			void			Notify(uint32 eventCode, Team* team);
102 };
103 
104 
105 // #pragma mark - TeamTable
106 
107 
108 typedef BKernel::TeamThreadTable<Team> TeamTable;
109 
110 
111 // #pragma mark - ProcessGroupHashDefinition
112 
113 
114 struct ProcessGroupHashDefinition {
115 	typedef pid_t			KeyType;
116 	typedef	ProcessGroup	ValueType;
117 
118 	size_t HashKey(pid_t key) const
119 	{
120 		return key;
121 	}
122 
123 	size_t Hash(ProcessGroup* value) const
124 	{
125 		return HashKey(value->id);
126 	}
127 
128 	bool Compare(pid_t key, ProcessGroup* value) const
129 	{
130 		return value->id == key;
131 	}
132 
133 	ProcessGroup*& GetLink(ProcessGroup* value) const
134 	{
135 		return value->next;
136 	}
137 };
138 
139 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
140 
141 
142 }	// unnamed namespace
143 
144 
145 // #pragma mark -
146 
147 
148 // the team_id -> Team hash table and the lock protecting it
149 static TeamTable sTeamHash;
150 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
151 
152 // the pid_t -> ProcessGroup hash table and the lock protecting it
153 static ProcessGroupHashTable sGroupHash;
154 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
155 
156 static Team* sKernelTeam = NULL;
157 static bool sDisableUserAddOns = false;
158 
159 // A list of process groups of children of dying session leaders that need to
160 // be signalled, if they have become orphaned and contain stopped processes.
161 static ProcessGroupList sOrphanedCheckProcessGroups;
162 static mutex sOrphanedCheckLock
163 	= MUTEX_INITIALIZER("orphaned process group check");
164 
165 // some arbitrarily chosen limits -- should probably depend on the available
166 // memory (the limit is not yet enforced)
167 static int32 sMaxTeams = 2048;
168 static int32 sUsedTeams = 1;
169 
170 static TeamNotificationService sNotificationService;
171 
172 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
173 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
174 
175 
176 // #pragma mark - TeamListIterator
177 
178 
179 TeamListIterator::TeamListIterator()
180 {
181 	// queue the entry
182 	InterruptsWriteSpinLocker locker(sTeamHashLock);
183 	sTeamHash.InsertIteratorEntry(&fEntry);
184 }
185 
186 
187 TeamListIterator::~TeamListIterator()
188 {
189 	// remove the entry
190 	InterruptsWriteSpinLocker locker(sTeamHashLock);
191 	sTeamHash.RemoveIteratorEntry(&fEntry);
192 }
193 
194 
195 Team*
196 TeamListIterator::Next()
197 {
198 	// get the next team -- if there is one, get reference for it
199 	InterruptsWriteSpinLocker locker(sTeamHashLock);
200 	Team* team = sTeamHash.NextElement(&fEntry);
201 	if (team != NULL)
202 		team->AcquireReference();
203 
204 	return team;
205 }
206 
207 
208 // #pragma mark - Tracing
209 
210 
211 #if TEAM_TRACING
212 namespace TeamTracing {
213 
214 class TeamForked : public AbstractTraceEntry {
215 public:
216 	TeamForked(thread_id forkedThread)
217 		:
218 		fForkedThread(forkedThread)
219 	{
220 		Initialized();
221 	}
222 
223 	virtual void AddDump(TraceOutput& out)
224 	{
225 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
226 	}
227 
228 private:
229 	thread_id			fForkedThread;
230 };
231 
232 
233 class ExecTeam : public AbstractTraceEntry {
234 public:
235 	ExecTeam(const char* path, int32 argCount, const char* const* args,
236 			int32 envCount, const char* const* env)
237 		:
238 		fArgCount(argCount),
239 		fArgs(NULL)
240 	{
241 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
242 			false);
243 
244 		// determine the buffer size we need for the args
245 		size_t argBufferSize = 0;
246 		for (int32 i = 0; i < argCount; i++)
247 			argBufferSize += strlen(args[i]) + 1;
248 
249 		// allocate a buffer
250 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
251 		if (fArgs) {
252 			char* buffer = fArgs;
253 			for (int32 i = 0; i < argCount; i++) {
254 				size_t argSize = strlen(args[i]) + 1;
255 				memcpy(buffer, args[i], argSize);
256 				buffer += argSize;
257 			}
258 		}
259 
260 		// ignore env for the time being
261 		(void)envCount;
262 		(void)env;
263 
264 		Initialized();
265 	}
266 
267 	virtual void AddDump(TraceOutput& out)
268 	{
269 		out.Print("team exec, \"%p\", args:", fPath);
270 
271 		if (fArgs != NULL) {
272 			char* args = fArgs;
273 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
274 				out.Print(" \"%s\"", args);
275 				args += strlen(args) + 1;
276 			}
277 		} else
278 			out.Print(" <too long>");
279 	}
280 
281 private:
282 	char*	fPath;
283 	int32	fArgCount;
284 	char*	fArgs;
285 };
286 
287 
288 static const char*
289 job_control_state_name(job_control_state state)
290 {
291 	switch (state) {
292 		case JOB_CONTROL_STATE_NONE:
293 			return "none";
294 		case JOB_CONTROL_STATE_STOPPED:
295 			return "stopped";
296 		case JOB_CONTROL_STATE_CONTINUED:
297 			return "continued";
298 		case JOB_CONTROL_STATE_DEAD:
299 			return "dead";
300 		default:
301 			return "invalid";
302 	}
303 }
304 
305 
306 class SetJobControlState : public AbstractTraceEntry {
307 public:
308 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
309 		:
310 		fTeam(team),
311 		fNewState(newState),
312 		fSignal(signal != NULL ? signal->Number() : 0)
313 	{
314 		Initialized();
315 	}
316 
317 	virtual void AddDump(TraceOutput& out)
318 	{
319 		out.Print("team set job control state, team %" B_PRId32 ", "
320 			"new state: %s, signal: %d",
321 			fTeam, job_control_state_name(fNewState), fSignal);
322 	}
323 
324 private:
325 	team_id				fTeam;
326 	job_control_state	fNewState;
327 	int					fSignal;
328 };
329 
330 
331 class WaitForChild : public AbstractTraceEntry {
332 public:
333 	WaitForChild(pid_t child, uint32 flags)
334 		:
335 		fChild(child),
336 		fFlags(flags)
337 	{
338 		Initialized();
339 	}
340 
341 	virtual void AddDump(TraceOutput& out)
342 	{
343 		out.Print("team wait for child, child: %" B_PRId32 ", "
344 			"flags: %#" B_PRIx32, fChild, fFlags);
345 	}
346 
347 private:
348 	pid_t	fChild;
349 	uint32	fFlags;
350 };
351 
352 
353 class WaitForChildDone : public AbstractTraceEntry {
354 public:
355 	WaitForChildDone(const job_control_entry& entry)
356 		:
357 		fState(entry.state),
358 		fTeam(entry.thread),
359 		fStatus(entry.status),
360 		fReason(entry.reason),
361 		fSignal(entry.signal)
362 	{
363 		Initialized();
364 	}
365 
366 	WaitForChildDone(status_t error)
367 		:
368 		fTeam(error)
369 	{
370 		Initialized();
371 	}
372 
373 	virtual void AddDump(TraceOutput& out)
374 	{
375 		if (fTeam >= 0) {
376 			out.Print("team wait for child done, team: %" B_PRId32 ", "
377 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
378 				fTeam, job_control_state_name(fState), fStatus, fReason,
379 				fSignal);
380 		} else {
381 			out.Print("team wait for child failed, error: "
382 				"%#" B_PRIx32 ", ", fTeam);
383 		}
384 	}
385 
386 private:
387 	job_control_state	fState;
388 	team_id				fTeam;
389 	status_t			fStatus;
390 	uint16				fReason;
391 	uint16				fSignal;
392 };
393 
394 }	// namespace TeamTracing
395 
396 #	define T(x) new(std::nothrow) TeamTracing::x;
397 #else
398 #	define T(x) ;
399 #endif
400 
401 
402 //	#pragma mark - TeamNotificationService
403 
404 
405 TeamNotificationService::TeamNotificationService()
406 	: DefaultNotificationService("teams")
407 {
408 }
409 
410 
411 void
412 TeamNotificationService::Notify(uint32 eventCode, Team* team)
413 {
414 	char eventBuffer[128];
415 	KMessage event;
416 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
417 	event.AddInt32("event", eventCode);
418 	event.AddInt32("team", team->id);
419 	event.AddPointer("teamStruct", team);
420 
421 	DefaultNotificationService::Notify(event, eventCode);
422 }
423 
424 
425 //	#pragma mark - Team
426 
427 
428 Team::Team(team_id id, bool kernel)
429 {
430 	// allocate an ID
431 	this->id = id;
432 	visible = true;
433 
434 	hash_next = siblings_next = parent = children = group_next = NULL;
435 	serial_number = -1;
436 
437 	group_id = session_id = -1;
438 	group = NULL;
439 
440 	num_threads = 0;
441 	state = TEAM_STATE_BIRTH;
442 	flags = 0;
443 	io_context = NULL;
444 	user_mutex_context = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	death_entry = NULL;
448 	list_init(&dead_threads);
449 
450 	dead_children.condition_variable.Init(&dead_children, "team children");
451 	dead_children.count = 0;
452 	dead_children.kernel_time = 0;
453 	dead_children.user_time = 0;
454 
455 	job_control_entry = new(nothrow) ::job_control_entry;
456 	if (job_control_entry != NULL) {
457 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
458 		job_control_entry->thread = id;
459 		job_control_entry->team = this;
460 	}
461 
462 	address_space = NULL;
463 	main_thread = NULL;
464 	thread_list = NULL;
465 	loading_info = NULL;
466 
467 	list_init(&image_list);
468 	list_init(&watcher_list);
469 	list_init(&sem_list);
470 	list_init_etc(&port_list, port_team_link_offset());
471 
472 	user_data = 0;
473 	user_data_area = -1;
474 	used_user_data = 0;
475 	user_data_size = 0;
476 	free_user_threads = NULL;
477 
478 	commpage_address = NULL;
479 
480 	clear_team_debug_info(&debug_info, true);
481 
482 	dead_threads_kernel_time = 0;
483 	dead_threads_user_time = 0;
484 	cpu_clock_offset = 0;
485 	B_INITIALIZE_SPINLOCK(&time_lock);
486 
487 	saved_set_uid = real_uid = effective_uid = -1;
488 	saved_set_gid = real_gid = effective_gid = -1;
489 
490 	// exit status -- setting initialized to false suffices
491 	exit.initialized = false;
492 
493 	B_INITIALIZE_SPINLOCK(&signal_lock);
494 
495 	// init mutex
496 	if (kernel) {
497 		mutex_init(&fLock, "Team:kernel");
498 	} else {
499 		char lockName[16];
500 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
501 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
502 	}
503 
504 	fName[0] = '\0';
505 	fArgs[0] = '\0';
506 
507 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
508 		kernel ? -1 : MAX_QUEUED_SIGNALS);
509 	memset(fSignalActions, 0, sizeof(fSignalActions));
510 	fUserDefinedTimerCount = 0;
511 
512 	fCoreDumpCondition = NULL;
513 }
514 
515 
516 Team::~Team()
517 {
518 	// get rid of all associated data
519 	PrepareForDeletion();
520 
521 	if (io_context != NULL)
522 		vfs_put_io_context(io_context);
523 	delete_owned_ports(this);
524 	sem_delete_owned_sems(this);
525 
526 	DeleteUserTimers(false);
527 
528 	fPendingSignals.Clear();
529 
530 	if (fQueuedSignalsCounter != NULL)
531 		fQueuedSignalsCounter->ReleaseReference();
532 
533 	while (thread_death_entry* threadDeathEntry
534 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
535 		free(threadDeathEntry);
536 	}
537 
538 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
539 		delete entry;
540 
541 	while (free_user_thread* entry = free_user_threads) {
542 		free_user_threads = entry->next;
543 		free(entry);
544 	}
545 
546 	delete job_control_entry;
547 		// usually already NULL and transferred to the parent
548 
549 	mutex_destroy(&fLock);
550 }
551 
552 
553 /*static*/ Team*
554 Team::Create(team_id id, const char* name, bool kernel)
555 {
556 	// create the team object
557 	Team* team = new(std::nothrow) Team(id, kernel);
558 	if (team == NULL)
559 		return NULL;
560 	ObjectDeleter<Team> teamDeleter(team);
561 
562 	if (name != NULL)
563 		team->SetName(name);
564 
565 	// check initialization
566 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
567 		return NULL;
568 
569 	// finish initialization (arch specifics)
570 	if (arch_team_init_team_struct(team, kernel) != B_OK)
571 		return NULL;
572 
573 	if (!kernel) {
574 		status_t error = user_timer_create_team_timers(team);
575 		if (error != B_OK)
576 			return NULL;
577 	}
578 
579 	team->start_time = system_time();
580 
581 	// everything went fine
582 	return teamDeleter.Detach();
583 }
584 
585 
586 /*!	\brief Returns the team with the given ID.
587 	Returns a reference to the team.
588 	Team and thread spinlock must not be held.
589 */
590 /*static*/ Team*
591 Team::Get(team_id id)
592 {
593 	if (id == B_CURRENT_TEAM) {
594 		Team* team = thread_get_current_thread()->team;
595 		team->AcquireReference();
596 		return team;
597 	}
598 
599 	InterruptsReadSpinLocker locker(sTeamHashLock);
600 	Team* team = sTeamHash.Lookup(id);
601 	if (team != NULL)
602 		team->AcquireReference();
603 	return team;
604 }
605 
606 
607 /*!	\brief Returns the team with the given ID in a locked state.
608 	Returns a reference to the team.
609 	Team and thread spinlock must not be held.
610 */
611 /*static*/ Team*
612 Team::GetAndLock(team_id id)
613 {
614 	// get the team
615 	Team* team = Get(id);
616 	if (team == NULL)
617 		return NULL;
618 
619 	// lock it
620 	team->Lock();
621 
622 	// only return the team, when it isn't already dying
623 	if (team->state >= TEAM_STATE_SHUTDOWN) {
624 		team->Unlock();
625 		team->ReleaseReference();
626 		return NULL;
627 	}
628 
629 	return team;
630 }
631 
632 
633 /*!	Locks the team and its parent team (if any).
634 	The caller must hold a reference to the team or otherwise make sure that
635 	it won't be deleted.
636 	If the team doesn't have a parent, only the team itself is locked. If the
637 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
638 	only the team itself is locked.
639 
640 	\param dontLockParentIfKernel If \c true, the team's parent team is only
641 		locked, if it is not the kernel team.
642 */
643 void
644 Team::LockTeamAndParent(bool dontLockParentIfKernel)
645 {
646 	// The locking order is parent -> child. Since the parent can change as long
647 	// as we don't lock the team, we need to do a trial and error loop.
648 	Lock();
649 
650 	while (true) {
651 		// If the team doesn't have a parent, we're done. Otherwise try to lock
652 		// the parent.This will succeed in most cases, simplifying things.
653 		Team* parent = this->parent;
654 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
655 			|| parent->TryLock()) {
656 			return;
657 		}
658 
659 		// get a temporary reference to the parent, unlock this team, lock the
660 		// parent, and re-lock this team
661 		BReference<Team> parentReference(parent);
662 
663 		Unlock();
664 		parent->Lock();
665 		Lock();
666 
667 		// If the parent hasn't changed in the meantime, we're done.
668 		if (this->parent == parent)
669 			return;
670 
671 		// The parent has changed -- unlock and retry.
672 		parent->Unlock();
673 	}
674 }
675 
676 
677 /*!	Unlocks the team and its parent team (if any).
678 */
679 void
680 Team::UnlockTeamAndParent()
681 {
682 	if (parent != NULL)
683 		parent->Unlock();
684 
685 	Unlock();
686 }
687 
688 
689 /*!	Locks the team, its parent team (if any), and the team's process group.
690 	The caller must hold a reference to the team or otherwise make sure that
691 	it won't be deleted.
692 	If the team doesn't have a parent, only the team itself is locked.
693 */
694 void
695 Team::LockTeamParentAndProcessGroup()
696 {
697 	LockTeamAndProcessGroup();
698 
699 	// We hold the group's and the team's lock, but not the parent team's lock.
700 	// If we have a parent, try to lock it.
701 	if (this->parent == NULL || this->parent->TryLock())
702 		return;
703 
704 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
705 	// the job.
706 	Unlock();
707 	LockTeamAndParent(false);
708 }
709 
710 
711 /*!	Unlocks the team, its parent team (if any), and the team's process group.
712 */
713 void
714 Team::UnlockTeamParentAndProcessGroup()
715 {
716 	group->Unlock();
717 
718 	if (parent != NULL)
719 		parent->Unlock();
720 
721 	Unlock();
722 }
723 
724 
725 void
726 Team::LockTeamAndProcessGroup()
727 {
728 	// The locking order is process group -> child. Since the process group can
729 	// change as long as we don't lock the team, we need to do a trial and error
730 	// loop.
731 	Lock();
732 
733 	while (true) {
734 		// Try to lock the group. This will succeed in most cases, simplifying
735 		// things.
736 		ProcessGroup* group = this->group;
737 		if (group == NULL)
738 			return;
739 
740 		if (group->TryLock())
741 			return;
742 
743 		// get a temporary reference to the group, unlock this team, lock the
744 		// group, and re-lock this team
745 		BReference<ProcessGroup> groupReference(group);
746 
747 		Unlock();
748 		group->Lock();
749 		Lock();
750 
751 		// If the group hasn't changed in the meantime, we're done.
752 		if (this->group == group)
753 			return;
754 
755 		// The group has changed -- unlock and retry.
756 		group->Unlock();
757 	}
758 }
759 
760 
761 void
762 Team::UnlockTeamAndProcessGroup()
763 {
764 	group->Unlock();
765 	Unlock();
766 }
767 
768 
769 void
770 Team::SetName(const char* name)
771 {
772 	if (const char* lastSlash = strrchr(name, '/'))
773 		name = lastSlash + 1;
774 
775 	strlcpy(fName, name, B_OS_NAME_LENGTH);
776 }
777 
778 
779 void
780 Team::SetArgs(const char* args)
781 {
782 	strlcpy(fArgs, args, sizeof(fArgs));
783 }
784 
785 
786 void
787 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
788 {
789 	fArgs[0] = '\0';
790 	strlcpy(fArgs, path, sizeof(fArgs));
791 	for (int i = 0; i < otherArgCount; i++) {
792 		strlcat(fArgs, " ", sizeof(fArgs));
793 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
794 	}
795 }
796 
797 
798 void
799 Team::ResetSignalsOnExec()
800 {
801 	// We are supposed to keep pending signals. Signal actions shall be reset
802 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
803 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
804 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
805 	// flags, but since there aren't any handlers, they make little sense, so
806 	// we clear them.
807 
808 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
809 		struct sigaction& action = SignalActionFor(i);
810 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
811 			action.sa_handler = SIG_DFL;
812 
813 		action.sa_mask = 0;
814 		action.sa_flags = 0;
815 		action.sa_userdata = NULL;
816 	}
817 }
818 
819 
820 void
821 Team::InheritSignalActions(Team* parent)
822 {
823 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
824 }
825 
826 
827 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
828 	ID.
829 
830 	The caller must hold the team's lock.
831 
832 	\param timer The timer to be added. If it doesn't have an ID yet, it is
833 		considered user-defined and will be assigned an ID.
834 	\return \c B_OK, if the timer was added successfully, another error code
835 		otherwise.
836 */
837 status_t
838 Team::AddUserTimer(UserTimer* timer)
839 {
840 	// don't allow addition of timers when already shutting the team down
841 	if (state >= TEAM_STATE_SHUTDOWN)
842 		return B_BAD_TEAM_ID;
843 
844 	// If the timer is user-defined, check timer limit and increment
845 	// user-defined count.
846 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
847 		return EAGAIN;
848 
849 	fUserTimers.AddTimer(timer);
850 
851 	return B_OK;
852 }
853 
854 
855 /*!	Removes the given user timer from the team.
856 
857 	The caller must hold the team's lock.
858 
859 	\param timer The timer to be removed.
860 
861 */
862 void
863 Team::RemoveUserTimer(UserTimer* timer)
864 {
865 	fUserTimers.RemoveTimer(timer);
866 
867 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
868 		UserDefinedTimersRemoved(1);
869 }
870 
871 
872 /*!	Deletes all (or all user-defined) user timers of the team.
873 
874 	Timer's belonging to the team's threads are not affected.
875 	The caller must hold the team's lock.
876 
877 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
878 		otherwise all timers are deleted.
879 */
880 void
881 Team::DeleteUserTimers(bool userDefinedOnly)
882 {
883 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
884 	UserDefinedTimersRemoved(count);
885 }
886 
887 
888 /*!	If not at the limit yet, increments the team's user-defined timer count.
889 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
890 */
891 bool
892 Team::CheckAddUserDefinedTimer()
893 {
894 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
895 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
896 		atomic_add(&fUserDefinedTimerCount, -1);
897 		return false;
898 	}
899 
900 	return true;
901 }
902 
903 
904 /*!	Subtracts the given count for the team's user-defined timer count.
905 	\param count The count to subtract.
906 */
907 void
908 Team::UserDefinedTimersRemoved(int32 count)
909 {
910 	atomic_add(&fUserDefinedTimerCount, -count);
911 }
912 
913 
914 void
915 Team::DeactivateCPUTimeUserTimers()
916 {
917 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
918 		timer->Deactivate();
919 
920 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
921 		timer->Deactivate();
922 }
923 
924 
925 /*!	Returns the team's current total CPU time (kernel + user + offset).
926 
927 	The caller must hold \c time_lock.
928 
929 	\param ignoreCurrentRun If \c true and the current thread is one team's
930 		threads, don't add the time since the last time \c last_time was
931 		updated. Should be used in "thread unscheduled" scheduler callbacks,
932 		since although the thread is still running at that time, its time has
933 		already been stopped.
934 	\return The team's current total CPU time.
935 */
936 bigtime_t
937 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
938 {
939 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
940 		+ dead_threads_user_time;
941 
942 	Thread* currentThread = thread_get_current_thread();
943 	bigtime_t now = system_time();
944 
945 	for (Thread* thread = thread_list; thread != NULL;
946 			thread = thread->team_next) {
947 		bool alreadyLocked = thread == lockedThread;
948 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
949 		time += thread->kernel_time + thread->user_time;
950 
951 		if (thread->last_time != 0) {
952 			if (!ignoreCurrentRun || thread != currentThread)
953 				time += now - thread->last_time;
954 		}
955 
956 		if (alreadyLocked)
957 			threadTimeLocker.Detach();
958 	}
959 
960 	return time;
961 }
962 
963 
964 /*!	Returns the team's current user CPU time.
965 
966 	The caller must hold \c time_lock.
967 
968 	\return The team's current user CPU time.
969 */
970 bigtime_t
971 Team::UserCPUTime() const
972 {
973 	bigtime_t time = dead_threads_user_time;
974 
975 	bigtime_t now = system_time();
976 
977 	for (Thread* thread = thread_list; thread != NULL;
978 			thread = thread->team_next) {
979 		SpinLocker threadTimeLocker(thread->time_lock);
980 		time += thread->user_time;
981 
982 		if (thread->last_time != 0 && !thread->in_kernel)
983 			time += now - thread->last_time;
984 	}
985 
986 	return time;
987 }
988 
989 
990 //	#pragma mark - ProcessGroup
991 
992 
993 ProcessGroup::ProcessGroup(pid_t id)
994 	:
995 	id(id),
996 	teams(NULL),
997 	fSession(NULL),
998 	fInOrphanedCheckList(false)
999 {
1000 	char lockName[32];
1001 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
1002 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1003 }
1004 
1005 
1006 ProcessGroup::~ProcessGroup()
1007 {
1008 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1009 
1010 	// If the group is in the orphaned check list, remove it.
1011 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1012 
1013 	if (fInOrphanedCheckList)
1014 		sOrphanedCheckProcessGroups.Remove(this);
1015 
1016 	orphanedCheckLocker.Unlock();
1017 
1018 	// remove group from the hash table and from the session
1019 	if (fSession != NULL) {
1020 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1021 		sGroupHash.RemoveUnchecked(this);
1022 		groupHashLocker.Unlock();
1023 
1024 		fSession->ReleaseReference();
1025 	}
1026 
1027 	mutex_destroy(&fLock);
1028 }
1029 
1030 
1031 /*static*/ ProcessGroup*
1032 ProcessGroup::Get(pid_t id)
1033 {
1034 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1035 	ProcessGroup* group = sGroupHash.Lookup(id);
1036 	if (group != NULL)
1037 		group->AcquireReference();
1038 	return group;
1039 }
1040 
1041 
1042 /*!	Adds the group the given session and makes it publicly accessible.
1043 	The caller must not hold the process group hash lock.
1044 */
1045 void
1046 ProcessGroup::Publish(ProcessSession* session)
1047 {
1048 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1049 	PublishLocked(session);
1050 }
1051 
1052 
1053 /*!	Adds the group to the given session and makes it publicly accessible.
1054 	The caller must hold the process group hash lock.
1055 */
1056 void
1057 ProcessGroup::PublishLocked(ProcessSession* session)
1058 {
1059 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1060 
1061 	fSession = session;
1062 	fSession->AcquireReference();
1063 
1064 	sGroupHash.InsertUnchecked(this);
1065 }
1066 
1067 
1068 /*!	Checks whether the process group is orphaned.
1069 	The caller must hold the group's lock.
1070 	\return \c true, if the group is orphaned, \c false otherwise.
1071 */
1072 bool
1073 ProcessGroup::IsOrphaned() const
1074 {
1075 	// Orphaned Process Group: "A process group in which the parent of every
1076 	// member is either itself a member of the group or is not a member of the
1077 	// group's session." (Open Group Base Specs Issue 7)
1078 	bool orphaned = true;
1079 
1080 	Team* team = teams;
1081 	while (orphaned && team != NULL) {
1082 		team->LockTeamAndParent(false);
1083 
1084 		Team* parent = team->parent;
1085 		if (parent != NULL && parent->group_id != id
1086 			&& parent->session_id == fSession->id) {
1087 			orphaned = false;
1088 		}
1089 
1090 		team->UnlockTeamAndParent();
1091 
1092 		team = team->group_next;
1093 	}
1094 
1095 	return orphaned;
1096 }
1097 
1098 
1099 void
1100 ProcessGroup::ScheduleOrphanedCheck()
1101 {
1102 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1103 
1104 	if (!fInOrphanedCheckList) {
1105 		sOrphanedCheckProcessGroups.Add(this);
1106 		fInOrphanedCheckList = true;
1107 	}
1108 }
1109 
1110 
1111 void
1112 ProcessGroup::UnsetOrphanedCheck()
1113 {
1114 	fInOrphanedCheckList = false;
1115 }
1116 
1117 
1118 //	#pragma mark - ProcessSession
1119 
1120 
1121 ProcessSession::ProcessSession(pid_t id)
1122 	:
1123 	id(id),
1124 	controlling_tty(NULL),
1125 	foreground_group(-1)
1126 {
1127 	char lockName[32];
1128 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1129 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1130 }
1131 
1132 
1133 ProcessSession::~ProcessSession()
1134 {
1135 	mutex_destroy(&fLock);
1136 }
1137 
1138 
1139 //	#pragma mark - KDL functions
1140 
1141 
1142 static void
1143 _dump_team_info(Team* team)
1144 {
1145 	kprintf("TEAM: %p\n", team);
1146 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1147 		team->id);
1148 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1149 	kprintf("name:             '%s'\n", team->Name());
1150 	kprintf("args:             '%s'\n", team->Args());
1151 	kprintf("hash_next:        %p\n", team->hash_next);
1152 	kprintf("parent:           %p", team->parent);
1153 	if (team->parent != NULL) {
1154 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1155 	} else
1156 		kprintf("\n");
1157 
1158 	kprintf("children:         %p\n", team->children);
1159 	kprintf("num_threads:      %d\n", team->num_threads);
1160 	kprintf("state:            %d\n", team->state);
1161 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1162 	kprintf("io_context:       %p\n", team->io_context);
1163 	if (team->address_space)
1164 		kprintf("address_space:    %p\n", team->address_space);
1165 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1166 		(void*)team->user_data, team->user_data_area);
1167 	kprintf("free user thread: %p\n", team->free_user_threads);
1168 	kprintf("main_thread:      %p\n", team->main_thread);
1169 	kprintf("thread_list:      %p\n", team->thread_list);
1170 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1171 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1172 }
1173 
1174 
1175 static int
1176 dump_team_info(int argc, char** argv)
1177 {
1178 	ulong arg;
1179 	bool found = false;
1180 
1181 	if (argc < 2) {
1182 		Thread* thread = thread_get_current_thread();
1183 		if (thread != NULL && thread->team != NULL)
1184 			_dump_team_info(thread->team);
1185 		else
1186 			kprintf("No current team!\n");
1187 		return 0;
1188 	}
1189 
1190 	arg = strtoul(argv[1], NULL, 0);
1191 	if (IS_KERNEL_ADDRESS(arg)) {
1192 		// semi-hack
1193 		_dump_team_info((Team*)arg);
1194 		return 0;
1195 	}
1196 
1197 	// walk through the thread list, trying to match name or id
1198 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1199 		Team* team = it.Next();) {
1200 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1201 			|| team->id == (team_id)arg) {
1202 			_dump_team_info(team);
1203 			found = true;
1204 			break;
1205 		}
1206 	}
1207 
1208 	if (!found)
1209 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1210 	return 0;
1211 }
1212 
1213 
1214 static int
1215 dump_teams(int argc, char** argv)
1216 {
1217 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1218 		B_PRINTF_POINTER_WIDTH, "parent");
1219 
1220 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1221 		Team* team = it.Next();) {
1222 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 
1229 //	#pragma mark - Private functions
1230 
1231 
1232 /*! Get the parent of a given process.
1233 
1234 	Used in the implementation of getppid (where a process can get its own
1235 	parent, only) as well as in user_process_info where the information is
1236 	available to anyone (allowing to display a tree of running processes)
1237 */
1238 static pid_t
1239 _getppid(pid_t id)
1240 {
1241 	if (id < 0) {
1242 		errno = EINVAL;
1243 		return -1;
1244 	}
1245 
1246 	if (id == 0) {
1247 		Team* team = thread_get_current_thread()->team;
1248 		TeamLocker teamLocker(team);
1249 		if (team->parent == NULL) {
1250 			errno = EINVAL;
1251 			return -1;
1252 		}
1253 		return team->parent->id;
1254 	}
1255 
1256 	Team* team = Team::GetAndLock(id);
1257 	if (team == NULL) {
1258 		errno = ESRCH;
1259 		return -1;
1260 	}
1261 
1262 	pid_t parentID;
1263 
1264 	if (team->parent == NULL) {
1265 		errno = EINVAL;
1266 		parentID = -1;
1267 	} else
1268 		parentID = team->parent->id;
1269 
1270 	team->UnlockAndReleaseReference();
1271 
1272 	return parentID;
1273 }
1274 
1275 
1276 /*!	Inserts team \a team into the child list of team \a parent.
1277 
1278 	The caller must hold the lock of both \a parent and \a team.
1279 
1280 	\param parent The parent team.
1281 	\param team The team to be inserted into \a parent's child list.
1282 */
1283 static void
1284 insert_team_into_parent(Team* parent, Team* team)
1285 {
1286 	ASSERT(parent != NULL);
1287 
1288 	team->siblings_next = parent->children;
1289 	parent->children = team;
1290 	team->parent = parent;
1291 }
1292 
1293 
1294 /*!	Removes team \a team from the child list of team \a parent.
1295 
1296 	The caller must hold the lock of both \a parent and \a team.
1297 
1298 	\param parent The parent team.
1299 	\param team The team to be removed from \a parent's child list.
1300 */
1301 static void
1302 remove_team_from_parent(Team* parent, Team* team)
1303 {
1304 	Team* child;
1305 	Team* last = NULL;
1306 
1307 	for (child = parent->children; child != NULL;
1308 			child = child->siblings_next) {
1309 		if (child == team) {
1310 			if (last == NULL)
1311 				parent->children = child->siblings_next;
1312 			else
1313 				last->siblings_next = child->siblings_next;
1314 
1315 			team->parent = NULL;
1316 			break;
1317 		}
1318 		last = child;
1319 	}
1320 }
1321 
1322 
1323 /*!	Returns whether the given team is a session leader.
1324 	The caller must hold the team's lock or its process group's lock.
1325 */
1326 static bool
1327 is_session_leader(Team* team)
1328 {
1329 	return team->session_id == team->id;
1330 }
1331 
1332 
1333 /*!	Returns whether the given team is a process group leader.
1334 	The caller must hold the team's lock or its process group's lock.
1335 */
1336 static bool
1337 is_process_group_leader(Team* team)
1338 {
1339 	return team->group_id == team->id;
1340 }
1341 
1342 
1343 /*!	Inserts the given team into the given process group.
1344 	The caller must hold the process group's lock, the team's lock, and the
1345 	team's parent's lock.
1346 */
1347 static void
1348 insert_team_into_group(ProcessGroup* group, Team* team)
1349 {
1350 	team->group = group;
1351 	team->group_id = group->id;
1352 	team->session_id = group->Session()->id;
1353 
1354 	team->group_next = group->teams;
1355 	group->teams = team;
1356 	group->AcquireReference();
1357 }
1358 
1359 
1360 /*!	Removes the given team from its process group.
1361 
1362 	The caller must hold the process group's lock, the team's lock, and the
1363 	team's parent's lock. Interrupts must be enabled.
1364 
1365 	\param team The team that'll be removed from its process group.
1366 */
1367 static void
1368 remove_team_from_group(Team* team)
1369 {
1370 	ProcessGroup* group = team->group;
1371 	Team* current;
1372 	Team* last = NULL;
1373 
1374 	// the team must be in a process group to let this function have any effect
1375 	if (group == NULL)
1376 		return;
1377 
1378 	for (current = group->teams; current != NULL;
1379 			current = current->group_next) {
1380 		if (current == team) {
1381 			if (last == NULL)
1382 				group->teams = current->group_next;
1383 			else
1384 				last->group_next = current->group_next;
1385 
1386 			break;
1387 		}
1388 		last = current;
1389 	}
1390 
1391 	team->group = NULL;
1392 	team->group_next = NULL;
1393 	team->group_id = -1;
1394 
1395 	group->ReleaseReference();
1396 }
1397 
1398 
1399 static status_t
1400 create_team_user_data(Team* team, void* exactAddress = NULL)
1401 {
1402 	void* address;
1403 	uint32 addressSpec;
1404 
1405 	if (exactAddress != NULL) {
1406 		address = exactAddress;
1407 		addressSpec = B_EXACT_ADDRESS;
1408 	} else {
1409 		address = (void*)KERNEL_USER_DATA_BASE;
1410 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1411 	}
1412 
1413 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1414 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1415 
1416 	virtual_address_restrictions virtualRestrictions = {};
1417 	if (result == B_OK || exactAddress != NULL) {
1418 		if (exactAddress != NULL)
1419 			virtualRestrictions.address = exactAddress;
1420 		else
1421 			virtualRestrictions.address = address;
1422 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1423 	} else {
1424 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1425 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1426 	}
1427 
1428 	physical_address_restrictions physicalRestrictions = {};
1429 	team->user_data_area = create_area_etc(team->id, "user area",
1430 		kTeamUserDataInitialSize, B_FULL_LOCK,
1431 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1432 		&virtualRestrictions, &physicalRestrictions, &address);
1433 	if (team->user_data_area < 0)
1434 		return team->user_data_area;
1435 
1436 	team->user_data = (addr_t)address;
1437 	team->used_user_data = 0;
1438 	team->user_data_size = kTeamUserDataInitialSize;
1439 	team->free_user_threads = NULL;
1440 
1441 	return B_OK;
1442 }
1443 
1444 
1445 static void
1446 delete_team_user_data(Team* team)
1447 {
1448 	if (team->user_data_area >= 0) {
1449 		vm_delete_area(team->id, team->user_data_area, true);
1450 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1451 			kTeamUserDataReservedSize);
1452 
1453 		team->user_data = 0;
1454 		team->used_user_data = 0;
1455 		team->user_data_size = 0;
1456 		team->user_data_area = -1;
1457 		while (free_user_thread* entry = team->free_user_threads) {
1458 			team->free_user_threads = entry->next;
1459 			free(entry);
1460 		}
1461 	}
1462 }
1463 
1464 
1465 static status_t
1466 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1467 	int32 argCount, int32 envCount, char**& _flatArgs)
1468 {
1469 	if (argCount < 0 || envCount < 0)
1470 		return B_BAD_VALUE;
1471 
1472 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1473 		return B_TOO_MANY_ARGS;
1474 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1475 		return B_BAD_VALUE;
1476 
1477 	if (!IS_USER_ADDRESS(userFlatArgs))
1478 		return B_BAD_ADDRESS;
1479 
1480 	// allocate kernel memory
1481 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1482 	if (flatArgs == NULL)
1483 		return B_NO_MEMORY;
1484 
1485 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1486 		free(flatArgs);
1487 		return B_BAD_ADDRESS;
1488 	}
1489 
1490 	// check and relocate the array
1491 	status_t error = B_OK;
1492 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1493 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1494 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1495 		if (i == argCount || i == argCount + envCount + 1) {
1496 			// check array null termination
1497 			if (flatArgs[i] != NULL) {
1498 				error = B_BAD_VALUE;
1499 				break;
1500 			}
1501 		} else {
1502 			// check string
1503 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1504 			size_t maxLen = stringEnd - arg;
1505 			if (arg < stringBase || arg >= stringEnd
1506 					|| strnlen(arg, maxLen) == maxLen) {
1507 				error = B_BAD_VALUE;
1508 				break;
1509 			}
1510 
1511 			flatArgs[i] = arg;
1512 		}
1513 	}
1514 
1515 	if (error == B_OK)
1516 		_flatArgs = flatArgs;
1517 	else
1518 		free(flatArgs);
1519 
1520 	return error;
1521 }
1522 
1523 
1524 static void
1525 free_team_arg(struct team_arg* teamArg)
1526 {
1527 	if (teamArg != NULL) {
1528 		free(teamArg->flat_args);
1529 		free(teamArg->path);
1530 		free(teamArg);
1531 	}
1532 }
1533 
1534 
1535 static status_t
1536 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1537 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1538 	port_id port, uint32 token)
1539 {
1540 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1541 	if (teamArg == NULL)
1542 		return B_NO_MEMORY;
1543 
1544 	teamArg->path = strdup(path);
1545 	if (teamArg->path == NULL) {
1546 		free(teamArg);
1547 		return B_NO_MEMORY;
1548 	}
1549 
1550 	// copy the args over
1551 	teamArg->flat_args = flatArgs;
1552 	teamArg->flat_args_size = flatArgsSize;
1553 	teamArg->arg_count = argCount;
1554 	teamArg->env_count = envCount;
1555 	teamArg->flags = 0;
1556 	teamArg->umask = umask;
1557 	teamArg->error_port = port;
1558 	teamArg->error_token = token;
1559 
1560 	// determine the flags from the environment
1561 	const char* const* env = flatArgs + argCount + 1;
1562 	for (int32 i = 0; i < envCount; i++) {
1563 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1564 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1565 			break;
1566 		}
1567 	}
1568 
1569 	*_teamArg = teamArg;
1570 	return B_OK;
1571 }
1572 
1573 
1574 static status_t
1575 team_create_thread_start_internal(void* args)
1576 {
1577 	status_t err;
1578 	Thread* thread;
1579 	Team* team;
1580 	struct team_arg* teamArgs = (struct team_arg*)args;
1581 	const char* path;
1582 	addr_t entry;
1583 	char** userArgs;
1584 	char** userEnv;
1585 	struct user_space_program_args* programArgs;
1586 	uint32 argCount, envCount;
1587 
1588 	thread = thread_get_current_thread();
1589 	team = thread->team;
1590 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1591 
1592 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1593 		thread->id));
1594 
1595 	// Main stack area layout is currently as follows (starting from 0):
1596 	//
1597 	// size								| usage
1598 	// ---------------------------------+--------------------------------
1599 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1600 	// TLS_SIZE							| TLS data
1601 	// sizeof(user_space_program_args)	| argument structure for the runtime
1602 	//									| loader
1603 	// flat arguments size				| flat process arguments and environment
1604 
1605 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1606 	// the heap
1607 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1608 
1609 	argCount = teamArgs->arg_count;
1610 	envCount = teamArgs->env_count;
1611 
1612 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1613 		+ thread->user_stack_size + TLS_SIZE);
1614 
1615 	userArgs = (char**)(programArgs + 1);
1616 	userEnv = userArgs + argCount + 1;
1617 	path = teamArgs->path;
1618 
1619 	if (user_strlcpy(programArgs->program_path, path,
1620 				sizeof(programArgs->program_path)) < B_OK
1621 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1622 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1623 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1624 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1625 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1626 				sizeof(port_id)) < B_OK
1627 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1628 				sizeof(uint32)) < B_OK
1629 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1630 		|| user_memcpy(&programArgs->disable_user_addons,
1631 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1632 		|| user_memcpy(userArgs, teamArgs->flat_args,
1633 				teamArgs->flat_args_size) < B_OK) {
1634 		// the team deletion process will clean this mess
1635 		free_team_arg(teamArgs);
1636 		return B_BAD_ADDRESS;
1637 	}
1638 
1639 	free_team_arg(teamArgs);
1640 		// the arguments are already on the user stack, we no longer need
1641 		// them in this form
1642 
1643 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1644 
1645 	// update state
1646 	team->Lock();
1647 	team->state = TEAM_STATE_NORMAL;
1648 	team->Unlock();
1649 
1650 	// Clone commpage area
1651 	area_id commPageArea = clone_commpage_area(team->id,
1652 		&team->commpage_address);
1653 	if (commPageArea  < B_OK) {
1654 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1655 			strerror(commPageArea)));
1656 		return commPageArea;
1657 	}
1658 
1659 	// Register commpage image
1660 	image_id commPageImage = get_commpage_image();
1661 	extended_image_info imageInfo;
1662 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1663 	if (err != B_OK) {
1664 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1665 			strerror(err)));
1666 		return err;
1667 	}
1668 	imageInfo.basic_info.text = team->commpage_address;
1669 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1670 	imageInfo.symbol_table = NULL;
1671 	imageInfo.symbol_hash = NULL;
1672 	imageInfo.string_table = NULL;
1673 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1674 	if (image < 0) {
1675 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1676 			strerror(image)));
1677 		return image;
1678 	}
1679 
1680 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1681 	// automatic variables with function scope will never be destroyed.
1682 	{
1683 		// find runtime_loader path
1684 		KPath runtimeLoaderPath;
1685 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1686 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1687 		if (err < B_OK) {
1688 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1689 				strerror(err)));
1690 			return err;
1691 		}
1692 		runtimeLoaderPath.UnlockBuffer();
1693 		err = runtimeLoaderPath.Append("runtime_loader");
1694 
1695 		if (err == B_OK) {
1696 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1697 				&entry);
1698 		}
1699 	}
1700 
1701 	if (err < B_OK) {
1702 		// Luckily, we don't have to clean up the mess we created - that's
1703 		// done for us by the normal team deletion process
1704 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1705 			"%s\n", strerror(err)));
1706 		return err;
1707 	}
1708 
1709 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1710 
1711 	// enter userspace -- returns only in case of error
1712 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1713 		programArgs, team->commpage_address);
1714 }
1715 
1716 
1717 static status_t
1718 team_create_thread_start(void* args)
1719 {
1720 	team_create_thread_start_internal(args);
1721 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1722 	thread_exit();
1723 		// does not return
1724 	return B_OK;
1725 }
1726 
1727 
1728 static thread_id
1729 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1730 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1731 	port_id errorPort, uint32 errorToken)
1732 {
1733 	char** flatArgs = _flatArgs;
1734 	thread_id thread;
1735 	status_t status;
1736 	struct team_arg* teamArgs;
1737 	struct team_loading_info loadingInfo;
1738 	ConditionVariableEntry loadingWaitEntry;
1739 	io_context* parentIOContext = NULL;
1740 	team_id teamID;
1741 	bool teamLimitReached = false;
1742 
1743 	if (flatArgs == NULL || argCount == 0)
1744 		return B_BAD_VALUE;
1745 
1746 	const char* path = flatArgs[0];
1747 
1748 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1749 		"\n", path, flatArgs, argCount));
1750 
1751 	// cut the path from the main thread name
1752 	const char* threadName = strrchr(path, '/');
1753 	if (threadName != NULL)
1754 		threadName++;
1755 	else
1756 		threadName = path;
1757 
1758 	// create the main thread object
1759 	Thread* mainThread;
1760 	status = Thread::Create(threadName, mainThread);
1761 	if (status != B_OK)
1762 		return status;
1763 	BReference<Thread> mainThreadReference(mainThread, true);
1764 
1765 	// create team object
1766 	Team* team = Team::Create(mainThread->id, path, false);
1767 	if (team == NULL)
1768 		return B_NO_MEMORY;
1769 	BReference<Team> teamReference(team, true);
1770 
1771 	BReference<Team> teamLoadingReference;
1772 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1773 		loadingInfo.condition.Init(team, "image load");
1774 		loadingInfo.condition.Add(&loadingWaitEntry);
1775 		loadingInfo.result = B_ERROR;
1776 		team->loading_info = &loadingInfo;
1777 		teamLoadingReference = teamReference;
1778 	}
1779 
1780 	// get the parent team
1781 	Team* parent = Team::Get(parentID);
1782 	if (parent == NULL)
1783 		return B_BAD_TEAM_ID;
1784 	BReference<Team> parentReference(parent, true);
1785 
1786 	parent->LockTeamAndProcessGroup();
1787 	team->Lock();
1788 
1789 	// inherit the parent's user/group
1790 	inherit_parent_user_and_group(team, parent);
1791 
1792 	// get a reference to the parent's I/O context -- we need it to create ours
1793 	parentIOContext = parent->io_context;
1794 	vfs_get_io_context(parentIOContext);
1795 
1796 	team->Unlock();
1797 	parent->UnlockTeamAndProcessGroup();
1798 
1799 	// check the executable's set-user/group-id permission
1800 	update_set_id_user_and_group(team, path);
1801 
1802 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1803 		envCount, (mode_t)-1, errorPort, errorToken);
1804 	if (status != B_OK)
1805 		goto err1;
1806 
1807 	_flatArgs = NULL;
1808 		// args are owned by the team_arg structure now
1809 
1810 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1811 
1812 	// create a new io_context for this team
1813 	team->io_context = vfs_new_io_context(parentIOContext, true);
1814 	if (!team->io_context) {
1815 		status = B_NO_MEMORY;
1816 		goto err2;
1817 	}
1818 
1819 	// We don't need the parent's I/O context any longer.
1820 	vfs_put_io_context(parentIOContext);
1821 	parentIOContext = NULL;
1822 
1823 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1824 	vfs_exec_io_context(team->io_context);
1825 
1826 	// create an address space for this team
1827 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1828 		&team->address_space);
1829 	if (status != B_OK)
1830 		goto err2;
1831 
1832 	team->address_space->SetRandomizingEnabled(
1833 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1834 
1835 	// create the user data area
1836 	status = create_team_user_data(team);
1837 	if (status != B_OK)
1838 		goto err4;
1839 
1840 	// insert the team into its parent and the teams hash
1841 	parent->LockTeamAndProcessGroup();
1842 	team->Lock();
1843 
1844 	{
1845 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1846 
1847 		sTeamHash.Insert(team);
1848 		teamLimitReached = sUsedTeams >= sMaxTeams;
1849 		if (!teamLimitReached)
1850 			sUsedTeams++;
1851 	}
1852 
1853 	insert_team_into_parent(parent, team);
1854 	insert_team_into_group(parent->group, team);
1855 
1856 	team->Unlock();
1857 	parent->UnlockTeamAndProcessGroup();
1858 
1859 	// notify team listeners
1860 	sNotificationService.Notify(TEAM_ADDED, team);
1861 
1862 	if (teamLimitReached) {
1863 		status = B_NO_MORE_TEAMS;
1864 		goto err6;
1865 	}
1866 
1867 	// In case we start the main thread, we shouldn't access the team object
1868 	// afterwards, so cache the team's ID.
1869 	teamID = team->id;
1870 
1871 	// Create a kernel thread, but under the context of the new team
1872 	// The new thread will take over ownership of teamArgs.
1873 	{
1874 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1875 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1876 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1877 			+ teamArgs->flat_args_size;
1878 		thread = thread_create_thread(threadAttributes, false);
1879 		if (thread < 0) {
1880 			status = thread;
1881 			goto err6;
1882 		}
1883 	}
1884 
1885 	// The team has been created successfully, so we keep the reference. Or
1886 	// more precisely: It's owned by the team's main thread, now.
1887 	teamReference.Detach();
1888 
1889 	// notify the debugger while the main thread is still suspended so that it
1890 	// has a chance to attach early to the child.
1891 	user_debug_team_created(teamID);
1892 
1893 	// wait for the loader of the new team to finish its work
1894 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1895 		if (mainThread != NULL) {
1896 			// resume the team's main thread
1897 			thread_continue(mainThread);
1898 		}
1899 
1900 		// Now wait until loading is finished. We will be woken either by the
1901 		// thread, when it finished or aborted loading, or when the team is
1902 		// going to die (e.g. is killed). In either case the one notifying is
1903 		// responsible for unsetting `loading_info` in the team structure.
1904 		loadingWaitEntry.Wait();
1905 
1906 		// We must synchronize with the thread that woke us up, to ensure
1907 		// there are no remaining consumers of the team_loading_info.
1908 		team->Lock();
1909 		if (team->loading_info != NULL)
1910 			panic("team loading wait complete, but loading_info != NULL");
1911 		team->Unlock();
1912 		teamLoadingReference.Unset();
1913 
1914 		if (loadingInfo.result < B_OK)
1915 			return loadingInfo.result;
1916 	}
1917 
1918 	return thread;
1919 
1920 err6:
1921 	// Remove the team structure from the process group, the parent team, and
1922 	// the team hash table and delete the team structure.
1923 	parent->LockTeamAndProcessGroup();
1924 	team->Lock();
1925 
1926 	remove_team_from_group(team);
1927 	remove_team_from_parent(team->parent, team);
1928 
1929 	team->Unlock();
1930 	parent->UnlockTeamAndProcessGroup();
1931 
1932 	{
1933 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1934 		sTeamHash.Remove(team);
1935 		if (!teamLimitReached)
1936 			sUsedTeams--;
1937 	}
1938 
1939 	sNotificationService.Notify(TEAM_REMOVED, team);
1940 
1941 	delete_team_user_data(team);
1942 err4:
1943 	team->address_space->Put();
1944 err2:
1945 	free_team_arg(teamArgs);
1946 err1:
1947 	if (parentIOContext != NULL)
1948 		vfs_put_io_context(parentIOContext);
1949 
1950 	return status;
1951 }
1952 
1953 
1954 /*!	Almost shuts down the current team and loads a new image into it.
1955 	If successful, this function does not return and will takeover ownership of
1956 	the arguments provided.
1957 	This function may only be called in a userland team (caused by one of the
1958 	exec*() syscalls).
1959 */
1960 static status_t
1961 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1962 	int32 argCount, int32 envCount, mode_t umask)
1963 {
1964 	// NOTE: Since this function normally doesn't return, don't use automatic
1965 	// variables that need destruction in the function scope.
1966 	char** flatArgs = _flatArgs;
1967 	Team* team = thread_get_current_thread()->team;
1968 	struct team_arg* teamArgs;
1969 	const char* threadName;
1970 	thread_id nubThreadID = -1;
1971 
1972 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1973 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1974 		team->id));
1975 
1976 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1977 
1978 	// switching the kernel at run time is probably not a good idea :)
1979 	if (team == team_get_kernel_team())
1980 		return B_NOT_ALLOWED;
1981 
1982 	// we currently need to be single threaded here
1983 	// TODO: maybe we should just kill all other threads and
1984 	//	make the current thread the team's main thread?
1985 	Thread* currentThread = thread_get_current_thread();
1986 	if (currentThread != team->main_thread)
1987 		return B_NOT_ALLOWED;
1988 
1989 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1990 	// We iterate through the thread list to make sure that there's no other
1991 	// thread.
1992 	TeamLocker teamLocker(team);
1993 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1994 
1995 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1996 		nubThreadID = team->debug_info.nub_thread;
1997 
1998 	debugInfoLocker.Unlock();
1999 
2000 	for (Thread* thread = team->thread_list; thread != NULL;
2001 			thread = thread->team_next) {
2002 		if (thread != team->main_thread && thread->id != nubThreadID)
2003 			return B_NOT_ALLOWED;
2004 	}
2005 
2006 	team->DeleteUserTimers(true);
2007 	team->ResetSignalsOnExec();
2008 
2009 	teamLocker.Unlock();
2010 
2011 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
2012 		argCount, envCount, umask, -1, 0);
2013 	if (status != B_OK)
2014 		return status;
2015 
2016 	_flatArgs = NULL;
2017 		// args are owned by the team_arg structure now
2018 
2019 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
2020 
2021 	// TODO: remove team resources if there are any left
2022 	// thread_atkernel_exit() might not be called at all
2023 
2024 	thread_reset_for_exec();
2025 
2026 	user_debug_prepare_for_exec();
2027 
2028 	delete_team_user_data(team);
2029 	vm_delete_areas(team->address_space, false);
2030 	xsi_sem_undo(team);
2031 	delete_owned_ports(team);
2032 	sem_delete_owned_sems(team);
2033 	remove_images(team);
2034 	vfs_exec_io_context(team->io_context);
2035 	delete_user_mutex_context(team->user_mutex_context);
2036 	team->user_mutex_context = NULL;
2037 	delete_realtime_sem_context(team->realtime_sem_context);
2038 	team->realtime_sem_context = NULL;
2039 
2040 	// update ASLR
2041 	team->address_space->SetRandomizingEnabled(
2042 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2043 
2044 	status = create_team_user_data(team);
2045 	if (status != B_OK) {
2046 		// creating the user data failed -- we're toast
2047 		free_team_arg(teamArgs);
2048 		exit_thread(status);
2049 		return status;
2050 	}
2051 
2052 	user_debug_finish_after_exec();
2053 
2054 	// rename the team
2055 
2056 	team->Lock();
2057 	team->SetName(path);
2058 	team->Unlock();
2059 
2060 	// cut the path from the team name and rename the main thread, too
2061 	threadName = strrchr(path, '/');
2062 	if (threadName != NULL)
2063 		threadName++;
2064 	else
2065 		threadName = path;
2066 	rename_thread(thread_get_current_thread_id(), threadName);
2067 
2068 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2069 
2070 	// Update user/group according to the executable's set-user/group-id
2071 	// permission.
2072 	update_set_id_user_and_group(team, path);
2073 
2074 	user_debug_team_exec();
2075 
2076 	// notify team listeners
2077 	sNotificationService.Notify(TEAM_EXEC, team);
2078 
2079 	// get a user thread for the thread
2080 	user_thread* userThread = team_allocate_user_thread(team);
2081 		// cannot fail (the allocation for the team would have failed already)
2082 	ThreadLocker currentThreadLocker(currentThread);
2083 	currentThread->user_thread = userThread;
2084 	currentThreadLocker.Unlock();
2085 
2086 	// create the user stack for the thread
2087 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2088 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2089 	if (status == B_OK) {
2090 		// prepare the stack, load the runtime loader, and enter userspace
2091 		team_create_thread_start(teamArgs);
2092 			// does never return
2093 	} else
2094 		free_team_arg(teamArgs);
2095 
2096 	// Sorry, we have to kill ourselves, there is no way out anymore
2097 	// (without any areas left and all that).
2098 	exit_thread(status);
2099 
2100 	// We return a status here since the signal that is sent by the
2101 	// call above is not immediately handled.
2102 	return B_ERROR;
2103 }
2104 
2105 
2106 static thread_id
2107 fork_team(void)
2108 {
2109 	Thread* parentThread = thread_get_current_thread();
2110 	Team* parentTeam = parentThread->team;
2111 	Team* team;
2112 	arch_fork_arg* forkArgs;
2113 	struct area_info info;
2114 	thread_id threadID;
2115 	status_t status;
2116 	ssize_t areaCookie;
2117 	bool teamLimitReached = false;
2118 
2119 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2120 
2121 	if (parentTeam == team_get_kernel_team())
2122 		return B_NOT_ALLOWED;
2123 
2124 	// create a new team
2125 	// TODO: this is very similar to load_image_internal() - maybe we can do
2126 	// something about it :)
2127 
2128 	// create the main thread object
2129 	Thread* thread;
2130 	status = Thread::Create(parentThread->name, thread);
2131 	if (status != B_OK)
2132 		return status;
2133 	BReference<Thread> threadReference(thread, true);
2134 
2135 	// create the team object
2136 	team = Team::Create(thread->id, NULL, false);
2137 	if (team == NULL)
2138 		return B_NO_MEMORY;
2139 
2140 	parentTeam->LockTeamAndProcessGroup();
2141 	team->Lock();
2142 
2143 	team->SetName(parentTeam->Name());
2144 	team->SetArgs(parentTeam->Args());
2145 
2146 	team->commpage_address = parentTeam->commpage_address;
2147 
2148 	// Inherit the parent's user/group.
2149 	inherit_parent_user_and_group(team, parentTeam);
2150 
2151 	// inherit signal handlers
2152 	team->InheritSignalActions(parentTeam);
2153 
2154 	team->Unlock();
2155 	parentTeam->UnlockTeamAndProcessGroup();
2156 
2157 	// inherit some team debug flags
2158 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2159 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2160 
2161 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2162 	if (forkArgs == NULL) {
2163 		status = B_NO_MEMORY;
2164 		goto err1;
2165 	}
2166 
2167 	// create a new io_context for this team
2168 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2169 	if (!team->io_context) {
2170 		status = B_NO_MEMORY;
2171 		goto err2;
2172 	}
2173 
2174 	// duplicate the realtime sem context
2175 	if (parentTeam->realtime_sem_context) {
2176 		team->realtime_sem_context = clone_realtime_sem_context(
2177 			parentTeam->realtime_sem_context);
2178 		if (team->realtime_sem_context == NULL) {
2179 			status = B_NO_MEMORY;
2180 			goto err2;
2181 		}
2182 	}
2183 
2184 	// create an address space for this team
2185 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2186 		&team->address_space);
2187 	if (status < B_OK)
2188 		goto err3;
2189 
2190 	// copy all areas of the team
2191 	// TODO: should be able to handle stack areas differently (ie. don't have
2192 	// them copy-on-write)
2193 
2194 	areaCookie = 0;
2195 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2196 		if (info.area == parentTeam->user_data_area) {
2197 			// don't clone the user area; just create a new one
2198 			status = create_team_user_data(team, info.address);
2199 			if (status != B_OK)
2200 				break;
2201 
2202 			thread->user_thread = team_allocate_user_thread(team);
2203 		} else {
2204 			void* address;
2205 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2206 				&address, B_CLONE_ADDRESS, info.area);
2207 			if (area < B_OK) {
2208 				status = area;
2209 				break;
2210 			}
2211 
2212 			if (info.area == parentThread->user_stack_area)
2213 				thread->user_stack_area = area;
2214 		}
2215 	}
2216 
2217 	if (status < B_OK)
2218 		goto err4;
2219 
2220 	if (thread->user_thread == NULL) {
2221 #if KDEBUG
2222 		panic("user data area not found, parent area is %" B_PRId32,
2223 			parentTeam->user_data_area);
2224 #endif
2225 		status = B_ERROR;
2226 		goto err4;
2227 	}
2228 
2229 	thread->user_stack_base = parentThread->user_stack_base;
2230 	thread->user_stack_size = parentThread->user_stack_size;
2231 	thread->user_local_storage = parentThread->user_local_storage;
2232 	thread->sig_block_mask = parentThread->sig_block_mask;
2233 	thread->signal_stack_base = parentThread->signal_stack_base;
2234 	thread->signal_stack_size = parentThread->signal_stack_size;
2235 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2236 
2237 	arch_store_fork_frame(forkArgs);
2238 
2239 	// copy image list
2240 	if (copy_images(parentTeam->id, team) != B_OK)
2241 		goto err5;
2242 
2243 	// insert the team into its parent and the teams hash
2244 	parentTeam->LockTeamAndProcessGroup();
2245 	team->Lock();
2246 
2247 	{
2248 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2249 
2250 		sTeamHash.Insert(team);
2251 		teamLimitReached = sUsedTeams >= sMaxTeams;
2252 		if (!teamLimitReached)
2253 			sUsedTeams++;
2254 	}
2255 
2256 	insert_team_into_parent(parentTeam, team);
2257 	insert_team_into_group(parentTeam->group, team);
2258 
2259 	team->Unlock();
2260 	parentTeam->UnlockTeamAndProcessGroup();
2261 
2262 	// notify team listeners
2263 	sNotificationService.Notify(TEAM_ADDED, team);
2264 
2265 	if (teamLimitReached) {
2266 		status = B_NO_MORE_TEAMS;
2267 		goto err6;
2268 	}
2269 
2270 	// create the main thread
2271 	{
2272 		ThreadCreationAttributes threadCreationAttributes(NULL,
2273 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2274 		threadCreationAttributes.forkArgs = forkArgs;
2275 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2276 		threadID = thread_create_thread(threadCreationAttributes, false);
2277 		if (threadID < 0) {
2278 			status = threadID;
2279 			goto err6;
2280 		}
2281 	}
2282 
2283 	// notify the debugger
2284 	user_debug_team_created(team->id);
2285 
2286 	T(TeamForked(threadID));
2287 
2288 	resume_thread(threadID);
2289 	return threadID;
2290 
2291 err6:
2292 	// Remove the team structure from the process group, the parent team, and
2293 	// the team hash table and delete the team structure.
2294 	parentTeam->LockTeamAndProcessGroup();
2295 	team->Lock();
2296 
2297 	remove_team_from_group(team);
2298 	remove_team_from_parent(team->parent, team);
2299 
2300 	team->Unlock();
2301 	parentTeam->UnlockTeamAndProcessGroup();
2302 
2303 	{
2304 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2305 		sTeamHash.Remove(team);
2306 		if (!teamLimitReached)
2307 			sUsedTeams--;
2308 	}
2309 
2310 	sNotificationService.Notify(TEAM_REMOVED, team);
2311 err5:
2312 	remove_images(team);
2313 err4:
2314 	team->address_space->RemoveAndPut();
2315 err3:
2316 	delete_realtime_sem_context(team->realtime_sem_context);
2317 err2:
2318 	free(forkArgs);
2319 err1:
2320 	team->ReleaseReference();
2321 
2322 	return status;
2323 }
2324 
2325 
2326 /*!	Returns if the specified team \a parent has any children belonging to the
2327 	process group with the specified ID \a groupID.
2328 	The caller must hold \a parent's lock.
2329 */
2330 static bool
2331 has_children_in_group(Team* parent, pid_t groupID)
2332 {
2333 	for (Team* child = parent->children; child != NULL;
2334 			child = child->siblings_next) {
2335 		TeamLocker childLocker(child);
2336 		if (child->group_id == groupID)
2337 			return true;
2338 	}
2339 
2340 	return false;
2341 }
2342 
2343 
2344 /*!	Returns the first job control entry from \a children, which matches \a id.
2345 	\a id can be:
2346 	- \code > 0 \endcode: Matching an entry with that team ID.
2347 	- \code == -1 \endcode: Matching any entry.
2348 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2349 	\c 0 is an invalid value for \a id.
2350 
2351 	The caller must hold the lock of the team that \a children belongs to.
2352 
2353 	\param children The job control entry list to check.
2354 	\param id The match criterion.
2355 	\return The first matching entry or \c NULL, if none matches.
2356 */
2357 static job_control_entry*
2358 get_job_control_entry(team_job_control_children& children, pid_t id)
2359 {
2360 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2361 		 job_control_entry* entry = it.Next();) {
2362 
2363 		if (id > 0) {
2364 			if (entry->thread == id)
2365 				return entry;
2366 		} else if (id == -1) {
2367 			return entry;
2368 		} else {
2369 			pid_t processGroup
2370 				= (entry->team ? entry->team->group_id : entry->group_id);
2371 			if (processGroup == -id)
2372 				return entry;
2373 		}
2374 	}
2375 
2376 	return NULL;
2377 }
2378 
2379 
2380 /*!	Returns the first job control entry from one of team's dead, continued, or
2381 	stopped children which matches \a id.
2382 	\a id can be:
2383 	- \code > 0 \endcode: Matching an entry with that team ID.
2384 	- \code == -1 \endcode: Matching any entry.
2385 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2386 	\c 0 is an invalid value for \a id.
2387 
2388 	The caller must hold \a team's lock.
2389 
2390 	\param team The team whose dead, stopped, and continued child lists shall be
2391 		checked.
2392 	\param id The match criterion.
2393 	\param flags Specifies which children shall be considered. Dead children
2394 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2395 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2396 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2397 		\c WCONTINUED.
2398 	\return The first matching entry or \c NULL, if none matches.
2399 */
2400 static job_control_entry*
2401 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2402 {
2403 	job_control_entry* entry = NULL;
2404 
2405 	if ((flags & WEXITED) != 0)
2406 		entry = get_job_control_entry(team->dead_children, id);
2407 
2408 	if (entry == NULL && (flags & WCONTINUED) != 0)
2409 		entry = get_job_control_entry(team->continued_children, id);
2410 
2411 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2412 		entry = get_job_control_entry(team->stopped_children, id);
2413 
2414 	return entry;
2415 }
2416 
2417 
2418 job_control_entry::job_control_entry()
2419 	:
2420 	has_group_ref(false)
2421 {
2422 }
2423 
2424 
2425 job_control_entry::~job_control_entry()
2426 {
2427 	if (has_group_ref) {
2428 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2429 
2430 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2431 		if (group == NULL) {
2432 			panic("job_control_entry::~job_control_entry(): unknown group "
2433 				"ID: %" B_PRId32, group_id);
2434 			return;
2435 		}
2436 
2437 		groupHashLocker.Unlock();
2438 
2439 		group->ReleaseReference();
2440 	}
2441 }
2442 
2443 
2444 /*!	Invoked when the owning team is dying, initializing the entry according to
2445 	the dead state.
2446 
2447 	The caller must hold the owning team's lock and the scheduler lock.
2448 */
2449 void
2450 job_control_entry::InitDeadState()
2451 {
2452 	if (team != NULL) {
2453 		ASSERT(team->exit.initialized);
2454 
2455 		group_id = team->group_id;
2456 		team->group->AcquireReference();
2457 		has_group_ref = true;
2458 
2459 		thread = team->id;
2460 		status = team->exit.status;
2461 		reason = team->exit.reason;
2462 		signal = team->exit.signal;
2463 		signaling_user = team->exit.signaling_user;
2464 		user_time = team->dead_threads_user_time
2465 			+ team->dead_children.user_time;
2466 		kernel_time = team->dead_threads_kernel_time
2467 			+ team->dead_children.kernel_time;
2468 
2469 		team = NULL;
2470 	}
2471 }
2472 
2473 
2474 job_control_entry&
2475 job_control_entry::operator=(const job_control_entry& other)
2476 {
2477 	state = other.state;
2478 	thread = other.thread;
2479 	signal = other.signal;
2480 	has_group_ref = false;
2481 	signaling_user = other.signaling_user;
2482 	team = other.team;
2483 	group_id = other.group_id;
2484 	status = other.status;
2485 	reason = other.reason;
2486 	user_time = other.user_time;
2487 	kernel_time = other.kernel_time;
2488 
2489 	return *this;
2490 }
2491 
2492 
2493 /*! This is the kernel backend for waitid().
2494 */
2495 static thread_id
2496 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2497 	team_usage_info& _usage_info)
2498 {
2499 	Thread* thread = thread_get_current_thread();
2500 	Team* team = thread->team;
2501 	struct job_control_entry foundEntry;
2502 	struct job_control_entry* freeDeathEntry = NULL;
2503 	status_t status = B_OK;
2504 
2505 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2506 		child, flags));
2507 
2508 	T(WaitForChild(child, flags));
2509 
2510 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2511 		T(WaitForChildDone(B_BAD_VALUE));
2512 		return B_BAD_VALUE;
2513 	}
2514 
2515 	pid_t originalChild = child;
2516 
2517 	bool ignoreFoundEntries = false;
2518 	bool ignoreFoundEntriesChecked = false;
2519 
2520 	while (true) {
2521 		// lock the team
2522 		TeamLocker teamLocker(team);
2523 
2524 		// A 0 child argument means to wait for all children in the process
2525 		// group of the calling team.
2526 		child = originalChild == 0 ? -team->group_id : originalChild;
2527 
2528 		// check whether any condition holds
2529 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2530 
2531 		// If we don't have an entry yet, check whether there are any children
2532 		// complying to the process group specification at all.
2533 		if (entry == NULL) {
2534 			// No success yet -- check whether there are any children complying
2535 			// to the process group specification at all.
2536 			bool childrenExist = false;
2537 			if (child == -1) {
2538 				childrenExist = team->children != NULL;
2539 			} else if (child < -1) {
2540 				childrenExist = has_children_in_group(team, -child);
2541 			} else if (child != team->id) {
2542 				if (Team* childTeam = Team::Get(child)) {
2543 					BReference<Team> childTeamReference(childTeam, true);
2544 					TeamLocker childTeamLocker(childTeam);
2545 					childrenExist = childTeam->parent == team;
2546 				}
2547 			}
2548 
2549 			if (!childrenExist) {
2550 				// there is no child we could wait for
2551 				status = ECHILD;
2552 			} else {
2553 				// the children we're waiting for are still running
2554 				status = B_WOULD_BLOCK;
2555 			}
2556 		} else {
2557 			// got something
2558 			foundEntry = *entry;
2559 
2560 			// unless WNOWAIT has been specified, "consume" the wait state
2561 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2562 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2563 					// The child is dead. Reap its death entry.
2564 					freeDeathEntry = entry;
2565 					team->dead_children.entries.Remove(entry);
2566 					team->dead_children.count--;
2567 				} else {
2568 					// The child is well. Reset its job control state.
2569 					team_set_job_control_state(entry->team,
2570 						JOB_CONTROL_STATE_NONE, NULL);
2571 				}
2572 			}
2573 		}
2574 
2575 		// If we haven't got anything yet, prepare for waiting for the
2576 		// condition variable.
2577 		ConditionVariableEntry deadWaitEntry;
2578 
2579 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2580 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2581 
2582 		teamLocker.Unlock();
2583 
2584 		// we got our entry and can return to our caller
2585 		if (status == B_OK) {
2586 			if (ignoreFoundEntries) {
2587 				// ... unless we shall ignore found entries
2588 				delete freeDeathEntry;
2589 				freeDeathEntry = NULL;
2590 				continue;
2591 			}
2592 
2593 			break;
2594 		}
2595 
2596 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2597 			T(WaitForChildDone(status));
2598 			return status;
2599 		}
2600 
2601 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2602 		if (status == B_INTERRUPTED) {
2603 			T(WaitForChildDone(status));
2604 			return status;
2605 		}
2606 
2607 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2608 		// all our children are dead and fail with ECHILD. We check the
2609 		// condition at this point.
2610 		if (!ignoreFoundEntriesChecked) {
2611 			teamLocker.Lock();
2612 
2613 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2614 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2615 				|| handler.sa_handler == SIG_IGN) {
2616 				ignoreFoundEntries = true;
2617 			}
2618 
2619 			teamLocker.Unlock();
2620 
2621 			ignoreFoundEntriesChecked = true;
2622 		}
2623 	}
2624 
2625 	delete freeDeathEntry;
2626 
2627 	// When we got here, we have a valid death entry, and already got
2628 	// unregistered from the team or group. Fill in the returned info.
2629 	memset(&_info, 0, sizeof(_info));
2630 	_info.si_signo = SIGCHLD;
2631 	_info.si_pid = foundEntry.thread;
2632 	_info.si_uid = foundEntry.signaling_user;
2633 	// TODO: Fill in si_errno?
2634 
2635 	switch (foundEntry.state) {
2636 		case JOB_CONTROL_STATE_DEAD:
2637 			_info.si_code = foundEntry.reason;
2638 			_info.si_status = foundEntry.reason == CLD_EXITED
2639 				? foundEntry.status : foundEntry.signal;
2640 			_usage_info.user_time = foundEntry.user_time;
2641 			_usage_info.kernel_time = foundEntry.kernel_time;
2642 			break;
2643 		case JOB_CONTROL_STATE_STOPPED:
2644 			_info.si_code = CLD_STOPPED;
2645 			_info.si_status = foundEntry.signal;
2646 			break;
2647 		case JOB_CONTROL_STATE_CONTINUED:
2648 			_info.si_code = CLD_CONTINUED;
2649 			_info.si_status = 0;
2650 			break;
2651 		case JOB_CONTROL_STATE_NONE:
2652 			// can't happen
2653 			break;
2654 	}
2655 
2656 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2657 	// status is available.
2658 	TeamLocker teamLocker(team);
2659 	InterruptsSpinLocker signalLocker(team->signal_lock);
2660 	SpinLocker threadCreationLocker(gThreadCreationLock);
2661 
2662 	if (is_team_signal_blocked(team, SIGCHLD)) {
2663 		if (get_job_control_entry(team, child, flags) == NULL)
2664 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2665 	}
2666 
2667 	threadCreationLocker.Unlock();
2668 	signalLocker.Unlock();
2669 	teamLocker.Unlock();
2670 
2671 	// When the team is dead, the main thread continues to live in the kernel
2672 	// team for a very short time. To avoid surprises for the caller we rather
2673 	// wait until the thread is really gone.
2674 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2675 		wait_for_thread(foundEntry.thread, NULL);
2676 
2677 	T(WaitForChildDone(foundEntry));
2678 
2679 	return foundEntry.thread;
2680 }
2681 
2682 
2683 /*! Fills the team_info structure with information from the specified team.
2684 	Interrupts must be enabled. The team must not be locked.
2685 */
2686 static status_t
2687 fill_team_info(Team* team, team_info* info, size_t size)
2688 {
2689 	if (size > sizeof(team_info))
2690 		return B_BAD_VALUE;
2691 
2692 	// TODO: Set more informations for team_info
2693 	memset(info, 0, size);
2694 
2695 	info->team = team->id;
2696 		// immutable
2697 	info->image_count = count_images(team);
2698 		// protected by sImageMutex
2699 
2700 	TeamLocker teamLocker(team);
2701 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2702 
2703 	info->thread_count = team->num_threads;
2704 	//info->area_count =
2705 	info->debugger_nub_thread = team->debug_info.nub_thread;
2706 	info->debugger_nub_port = team->debug_info.nub_port;
2707 	info->uid = team->effective_uid;
2708 	info->gid = team->effective_gid;
2709 
2710 	strlcpy(info->args, team->Args(), sizeof(info->args));
2711 	info->argc = 1;
2712 
2713 	if (size > offsetof(team_info, real_uid)) {
2714 		info->real_uid = team->real_uid;
2715 		info->real_gid = team->real_gid;
2716 		info->group_id = team->group_id;
2717 		info->session_id = team->session_id;
2718 
2719 		if (team->parent != NULL)
2720 			info->parent = team->parent->id;
2721 		else
2722 			info->parent = -1;
2723 
2724 		strlcpy(info->name, team->Name(), sizeof(info->name));
2725 		info->start_time = team->start_time;
2726 	}
2727 
2728 	return B_OK;
2729 }
2730 
2731 
2732 /*!	Returns whether the process group contains stopped processes.
2733 	The caller must hold the process group's lock.
2734 */
2735 static bool
2736 process_group_has_stopped_processes(ProcessGroup* group)
2737 {
2738 	Team* team = group->teams;
2739 	while (team != NULL) {
2740 		// the parent team's lock guards the job control entry -- acquire it
2741 		team->LockTeamAndParent(false);
2742 
2743 		if (team->job_control_entry != NULL
2744 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2745 			team->UnlockTeamAndParent();
2746 			return true;
2747 		}
2748 
2749 		team->UnlockTeamAndParent();
2750 
2751 		team = team->group_next;
2752 	}
2753 
2754 	return false;
2755 }
2756 
2757 
2758 /*!	Iterates through all process groups queued in team_remove_team() and signals
2759 	those that are orphaned and have stopped processes.
2760 	The caller must not hold any team or process group locks.
2761 */
2762 static void
2763 orphaned_process_group_check()
2764 {
2765 	// process as long as there are groups in the list
2766 	while (true) {
2767 		// remove the head from the list
2768 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2769 
2770 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2771 		if (group == NULL)
2772 			return;
2773 
2774 		group->UnsetOrphanedCheck();
2775 		BReference<ProcessGroup> groupReference(group);
2776 
2777 		orphanedCheckLocker.Unlock();
2778 
2779 		AutoLocker<ProcessGroup> groupLocker(group);
2780 
2781 		// If the group is orphaned and contains stopped processes, we're
2782 		// supposed to send SIGHUP + SIGCONT.
2783 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2784 			Thread* currentThread = thread_get_current_thread();
2785 
2786 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2787 			send_signal_to_process_group_locked(group, signal, 0);
2788 
2789 			signal.SetNumber(SIGCONT);
2790 			send_signal_to_process_group_locked(group, signal, 0);
2791 		}
2792 	}
2793 }
2794 
2795 
2796 static status_t
2797 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2798 	uint32 flags)
2799 {
2800 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2801 		return B_BAD_VALUE;
2802 
2803 	// get the team
2804 	Team* team = Team::GetAndLock(id);
2805 	if (team == NULL)
2806 		return B_BAD_TEAM_ID;
2807 	BReference<Team> teamReference(team, true);
2808 	TeamLocker teamLocker(team, true);
2809 
2810 	if ((flags & B_CHECK_PERMISSION) != 0) {
2811 		uid_t uid = geteuid();
2812 		if (uid != 0 && uid != team->effective_uid)
2813 			return B_NOT_ALLOWED;
2814 	}
2815 
2816 	bigtime_t kernelTime = 0;
2817 	bigtime_t userTime = 0;
2818 
2819 	switch (who) {
2820 		case B_TEAM_USAGE_SELF:
2821 		{
2822 			Thread* thread = team->thread_list;
2823 
2824 			for (; thread != NULL; thread = thread->team_next) {
2825 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2826 				kernelTime += thread->kernel_time;
2827 				userTime += thread->user_time;
2828 			}
2829 
2830 			kernelTime += team->dead_threads_kernel_time;
2831 			userTime += team->dead_threads_user_time;
2832 			break;
2833 		}
2834 
2835 		case B_TEAM_USAGE_CHILDREN:
2836 		{
2837 			Team* child = team->children;
2838 			for (; child != NULL; child = child->siblings_next) {
2839 				TeamLocker childLocker(child);
2840 
2841 				Thread* thread = team->thread_list;
2842 
2843 				for (; thread != NULL; thread = thread->team_next) {
2844 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2845 					kernelTime += thread->kernel_time;
2846 					userTime += thread->user_time;
2847 				}
2848 
2849 				kernelTime += child->dead_threads_kernel_time;
2850 				userTime += child->dead_threads_user_time;
2851 			}
2852 
2853 			kernelTime += team->dead_children.kernel_time;
2854 			userTime += team->dead_children.user_time;
2855 			break;
2856 		}
2857 	}
2858 
2859 	info->kernel_time = kernelTime;
2860 	info->user_time = userTime;
2861 
2862 	return B_OK;
2863 }
2864 
2865 
2866 //	#pragma mark - Private kernel API
2867 
2868 
2869 status_t
2870 team_init(kernel_args* args)
2871 {
2872 	// create the team hash table
2873 	new(&sTeamHash) TeamTable;
2874 	if (sTeamHash.Init(64) != B_OK)
2875 		panic("Failed to init team hash table!");
2876 
2877 	new(&sGroupHash) ProcessGroupHashTable;
2878 	if (sGroupHash.Init() != B_OK)
2879 		panic("Failed to init process group hash table!");
2880 
2881 	// create initial session and process groups
2882 
2883 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2884 	if (session == NULL)
2885 		panic("Could not create initial session.\n");
2886 	BReference<ProcessSession> sessionReference(session, true);
2887 
2888 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2889 	if (group == NULL)
2890 		panic("Could not create initial process group.\n");
2891 	BReference<ProcessGroup> groupReference(group, true);
2892 
2893 	group->Publish(session);
2894 
2895 	// create the kernel team
2896 	sKernelTeam = Team::Create(1, "kernel_team", true);
2897 	if (sKernelTeam == NULL)
2898 		panic("could not create kernel team!\n");
2899 
2900 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2901 	sKernelTeam->SetArgs(sKernelTeam->Name());
2902 	sKernelTeam->state = TEAM_STATE_NORMAL;
2903 
2904 	sKernelTeam->saved_set_uid = 0;
2905 	sKernelTeam->real_uid = 0;
2906 	sKernelTeam->effective_uid = 0;
2907 	sKernelTeam->saved_set_gid = 0;
2908 	sKernelTeam->real_gid = 0;
2909 	sKernelTeam->effective_gid = 0;
2910 	sKernelTeam->supplementary_groups = NULL;
2911 
2912 	insert_team_into_group(group, sKernelTeam);
2913 
2914 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2915 	if (sKernelTeam->io_context == NULL)
2916 		panic("could not create io_context for kernel team!\n");
2917 
2918 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2919 		dprintf("Failed to resize FD table for kernel team!\n");
2920 
2921 	// stick it in the team hash
2922 	sTeamHash.Insert(sKernelTeam);
2923 
2924 	// check safe mode settings
2925 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2926 		false);
2927 
2928 	add_debugger_command_etc("team", &dump_team_info,
2929 		"Dump info about a particular team",
2930 		"[ <id> | <address> | <name> ]\n"
2931 		"Prints information about the specified team. If no argument is given\n"
2932 		"the current team is selected.\n"
2933 		"  <id>       - The ID of the team.\n"
2934 		"  <address>  - The address of the team structure.\n"
2935 		"  <name>     - The team's name.\n", 0);
2936 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2937 		"\n"
2938 		"Prints a list of all existing teams.\n", 0);
2939 
2940 	new(&sNotificationService) TeamNotificationService();
2941 
2942 	sNotificationService.Register();
2943 
2944 	return B_OK;
2945 }
2946 
2947 
2948 int32
2949 team_max_teams(void)
2950 {
2951 	return sMaxTeams;
2952 }
2953 
2954 
2955 int32
2956 team_used_teams(void)
2957 {
2958 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2959 	return sUsedTeams;
2960 }
2961 
2962 
2963 /*! Returns a death entry of a child team specified by ID (if any).
2964 	The caller must hold the team's lock.
2965 
2966 	\param team The team whose dead children list to check.
2967 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2968 	\param _deleteEntry Return variable, indicating whether the caller needs to
2969 		delete the returned entry.
2970 	\return The death entry of the matching team, or \c NULL, if no death entry
2971 		for the team was found.
2972 */
2973 job_control_entry*
2974 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2975 {
2976 	if (child <= 0)
2977 		return NULL;
2978 
2979 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2980 		child);
2981 	if (entry) {
2982 		// remove the entry only, if the caller is the parent of the found team
2983 		if (team_get_current_team_id() == entry->thread) {
2984 			team->dead_children.entries.Remove(entry);
2985 			team->dead_children.count--;
2986 			*_deleteEntry = true;
2987 		} else {
2988 			*_deleteEntry = false;
2989 		}
2990 	}
2991 
2992 	return entry;
2993 }
2994 
2995 
2996 /*! Quick check to see if we have a valid team ID. */
2997 bool
2998 team_is_valid(team_id id)
2999 {
3000 	if (id <= 0)
3001 		return false;
3002 
3003 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3004 	return team_get_team_struct_locked(id) != NULL;
3005 }
3006 
3007 
3008 Team*
3009 team_get_team_struct_locked(team_id id)
3010 {
3011 	return sTeamHash.Lookup(id);
3012 }
3013 
3014 
3015 void
3016 team_set_controlling_tty(void* tty)
3017 {
3018 	// lock the team, so its session won't change while we're playing with it
3019 	Team* team = thread_get_current_thread()->team;
3020 	TeamLocker teamLocker(team);
3021 
3022 	// get and lock the session
3023 	ProcessSession* session = team->group->Session();
3024 	AutoLocker<ProcessSession> sessionLocker(session);
3025 
3026 	// set the session's fields
3027 	session->controlling_tty = tty;
3028 	session->foreground_group = -1;
3029 }
3030 
3031 
3032 void*
3033 team_get_controlling_tty()
3034 {
3035 	// lock the team, so its session won't change while we're playing with it
3036 	Team* team = thread_get_current_thread()->team;
3037 	TeamLocker teamLocker(team);
3038 
3039 	// get and lock the session
3040 	ProcessSession* session = team->group->Session();
3041 	AutoLocker<ProcessSession> sessionLocker(session);
3042 
3043 	// get the session's field
3044 	return session->controlling_tty;
3045 }
3046 
3047 
3048 status_t
3049 team_set_foreground_process_group(void* tty, pid_t processGroupID)
3050 {
3051 	// lock the team, so its session won't change while we're playing with it
3052 	Thread* thread = thread_get_current_thread();
3053 	Team* team = thread->team;
3054 	TeamLocker teamLocker(team);
3055 
3056 	// get and lock the session
3057 	ProcessSession* session = team->group->Session();
3058 	AutoLocker<ProcessSession> sessionLocker(session);
3059 
3060 	// check given TTY -- must be the controlling tty of the calling process
3061 	if (session->controlling_tty != tty)
3062 		return ENOTTY;
3063 
3064 	// check given process group -- must belong to our session
3065 	{
3066 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3067 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3068 		if (group == NULL || group->Session() != session)
3069 			return B_BAD_VALUE;
3070 	}
3071 
3072 	// If we are a background group, we can do that unharmed only when we
3073 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3074 	if (session->foreground_group != -1
3075 		&& session->foreground_group != team->group_id
3076 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3077 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3078 		InterruptsSpinLocker signalLocker(team->signal_lock);
3079 
3080 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3081 			pid_t groupID = team->group_id;
3082 
3083 			signalLocker.Unlock();
3084 			sessionLocker.Unlock();
3085 			teamLocker.Unlock();
3086 
3087 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3088 			send_signal_to_process_group(groupID, signal, 0);
3089 			return B_INTERRUPTED;
3090 		}
3091 	}
3092 
3093 	session->foreground_group = processGroupID;
3094 
3095 	return B_OK;
3096 }
3097 
3098 
3099 uid_t
3100 team_geteuid(team_id id)
3101 {
3102 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3103 	Team* team = team_get_team_struct_locked(id);
3104 	if (team == NULL)
3105 		return (uid_t)-1;
3106 	return team->effective_uid;
3107 }
3108 
3109 
3110 /*!	Removes the specified team from the global team hash, from its process
3111 	group, and from its parent.
3112 	It also moves all of its children to the kernel team.
3113 
3114 	The caller must hold the following locks:
3115 	- \a team's process group's lock,
3116 	- the kernel team's lock,
3117 	- \a team's parent team's lock (might be the kernel team), and
3118 	- \a team's lock.
3119 */
3120 void
3121 team_remove_team(Team* team, pid_t& _signalGroup)
3122 {
3123 	Team* parent = team->parent;
3124 
3125 	// remember how long this team lasted
3126 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3127 		+ team->dead_children.kernel_time;
3128 	parent->dead_children.user_time += team->dead_threads_user_time
3129 		+ team->dead_children.user_time;
3130 
3131 	// remove the team from the hash table
3132 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3133 	sTeamHash.Remove(team);
3134 	sUsedTeams--;
3135 	teamsLocker.Unlock();
3136 
3137 	// The team can no longer be accessed by ID. Navigation to it is still
3138 	// possible from its process group and its parent and children, but that
3139 	// will be rectified shortly.
3140 	team->state = TEAM_STATE_DEATH;
3141 
3142 	// If we're a controlling process (i.e. a session leader with controlling
3143 	// terminal), there's a bit of signalling we have to do. We can't do any of
3144 	// the signaling here due to the bunch of locks we're holding, but we need
3145 	// to determine, whom to signal.
3146 	_signalGroup = -1;
3147 	bool isSessionLeader = false;
3148 	if (team->session_id == team->id
3149 		&& team->group->Session()->controlling_tty != NULL) {
3150 		isSessionLeader = true;
3151 
3152 		ProcessSession* session = team->group->Session();
3153 
3154 		AutoLocker<ProcessSession> sessionLocker(session);
3155 
3156 		session->controlling_tty = NULL;
3157 		_signalGroup = session->foreground_group;
3158 	}
3159 
3160 	// remove us from our process group
3161 	remove_team_from_group(team);
3162 
3163 	// move the team's children to the kernel team
3164 	while (Team* child = team->children) {
3165 		// remove the child from the current team and add it to the kernel team
3166 		TeamLocker childLocker(child);
3167 
3168 		remove_team_from_parent(team, child);
3169 		insert_team_into_parent(sKernelTeam, child);
3170 
3171 		// move job control entries too
3172 		sKernelTeam->stopped_children.entries.MoveFrom(
3173 			&team->stopped_children.entries);
3174 		sKernelTeam->continued_children.entries.MoveFrom(
3175 			&team->continued_children.entries);
3176 
3177 		// If the team was a session leader with controlling terminal,
3178 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3179 		// groups with stopped processes. Due to locking complications we can't
3180 		// do that here, so we only check whether we were a reason for the
3181 		// child's process group not being an orphan and, if so, schedule a
3182 		// later check (cf. orphaned_process_group_check()).
3183 		if (isSessionLeader) {
3184 			ProcessGroup* childGroup = child->group;
3185 			if (childGroup->Session()->id == team->session_id
3186 				&& childGroup->id != team->group_id) {
3187 				childGroup->ScheduleOrphanedCheck();
3188 			}
3189 		}
3190 
3191 		// Note, we don't move the dead children entries. Those will be deleted
3192 		// when the team structure is deleted.
3193 	}
3194 
3195 	// remove us from our parent
3196 	remove_team_from_parent(parent, team);
3197 }
3198 
3199 
3200 /*!	Kills all threads but the main thread of the team and shuts down user
3201 	debugging for it.
3202 	To be called on exit of the team's main thread. No locks must be held.
3203 
3204 	\param team The team in question.
3205 	\return The port of the debugger for the team, -1 if none. To be passed to
3206 		team_delete_team().
3207 */
3208 port_id
3209 team_shutdown_team(Team* team)
3210 {
3211 	ASSERT(thread_get_current_thread() == team->main_thread);
3212 
3213 	TeamLocker teamLocker(team);
3214 
3215 	// Make sure debugging changes won't happen anymore.
3216 	port_id debuggerPort = -1;
3217 	while (true) {
3218 		// If a debugger change is in progress for the team, we'll have to
3219 		// wait until it is done.
3220 		ConditionVariableEntry waitForDebuggerEntry;
3221 		bool waitForDebugger = false;
3222 
3223 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3224 
3225 		if (team->debug_info.debugger_changed_condition != NULL) {
3226 			team->debug_info.debugger_changed_condition->Add(
3227 				&waitForDebuggerEntry);
3228 			waitForDebugger = true;
3229 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3230 			// The team is being debugged. That will stop with the termination
3231 			// of the nub thread. Since we set the team state to death, no one
3232 			// can install a debugger anymore. We fetch the debugger's port to
3233 			// send it a message at the bitter end.
3234 			debuggerPort = team->debug_info.debugger_port;
3235 		}
3236 
3237 		debugInfoLocker.Unlock();
3238 
3239 		if (!waitForDebugger)
3240 			break;
3241 
3242 		// wait for the debugger change to be finished
3243 		teamLocker.Unlock();
3244 
3245 		waitForDebuggerEntry.Wait();
3246 
3247 		teamLocker.Lock();
3248 	}
3249 
3250 	// Mark the team as shutting down. That will prevent new threads from being
3251 	// created and debugger changes from taking place.
3252 	team->state = TEAM_STATE_SHUTDOWN;
3253 
3254 	// delete all timers
3255 	team->DeleteUserTimers(false);
3256 
3257 	// deactivate CPU time user timers for the team
3258 	InterruptsSpinLocker timeLocker(team->time_lock);
3259 
3260 	if (team->HasActiveCPUTimeUserTimers())
3261 		team->DeactivateCPUTimeUserTimers();
3262 
3263 	timeLocker.Unlock();
3264 
3265 	// kill all threads but the main thread
3266 	team_death_entry deathEntry;
3267 	deathEntry.condition.Init(team, "team death");
3268 
3269 	while (true) {
3270 		team->death_entry = &deathEntry;
3271 		deathEntry.remaining_threads = 0;
3272 
3273 		Thread* thread = team->thread_list;
3274 		while (thread != NULL) {
3275 			if (thread != team->main_thread) {
3276 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3277 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3278 				deathEntry.remaining_threads++;
3279 			}
3280 
3281 			thread = thread->team_next;
3282 		}
3283 
3284 		if (deathEntry.remaining_threads == 0)
3285 			break;
3286 
3287 		// there are threads to wait for
3288 		ConditionVariableEntry entry;
3289 		deathEntry.condition.Add(&entry);
3290 
3291 		teamLocker.Unlock();
3292 
3293 		entry.Wait();
3294 
3295 		teamLocker.Lock();
3296 	}
3297 
3298 	team->death_entry = NULL;
3299 
3300 	return debuggerPort;
3301 }
3302 
3303 
3304 /*!	Called on team exit to notify threads waiting on the team and free most
3305 	resources associated with it.
3306 	The caller shouldn't hold any locks.
3307 */
3308 void
3309 team_delete_team(Team* team, port_id debuggerPort)
3310 {
3311 	// Not quite in our job description, but work that has been left by
3312 	// team_remove_team() and that can be done now that we're not holding any
3313 	// locks.
3314 	orphaned_process_group_check();
3315 
3316 	team_id teamID = team->id;
3317 
3318 	ASSERT(team->num_threads == 0);
3319 
3320 	// If someone is waiting for this team to be loaded, but it dies
3321 	// unexpectedly before being done, we need to notify the waiting
3322 	// thread now.
3323 
3324 	TeamLocker teamLocker(team);
3325 
3326 	if (team->loading_info != NULL) {
3327 		// there's indeed someone waiting
3328 		team->loading_info->result = B_ERROR;
3329 
3330 		// wake up the waiting thread
3331 		team->loading_info->condition.NotifyAll();
3332 		team->loading_info = NULL;
3333 	}
3334 
3335 	// notify team watchers
3336 
3337 	{
3338 		// we're not reachable from anyone anymore at this point, so we
3339 		// can safely access the list without any locking
3340 		struct team_watcher* watcher;
3341 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3342 				&team->watcher_list)) != NULL) {
3343 			watcher->hook(teamID, watcher->data);
3344 			free(watcher);
3345 		}
3346 	}
3347 
3348 	// get team exit information
3349 	status_t exitStatus = -1;
3350 	int signal = -1;
3351 
3352 	switch (team->exit.reason) {
3353 		case CLD_EXITED:
3354 			exitStatus = team->exit.status;
3355 			break;
3356 		case CLD_KILLED:
3357 			signal = team->exit.signal;
3358 			break;
3359 	}
3360 
3361 	teamLocker.Unlock();
3362 
3363 	sNotificationService.Notify(TEAM_REMOVED, team);
3364 
3365 	// get team usage information
3366 	InterruptsSpinLocker timeLocker(team->time_lock);
3367 
3368 	team_usage_info usageInfo;
3369 	usageInfo.kernel_time = team->dead_threads_kernel_time;
3370 	usageInfo.user_time = team->dead_threads_user_time;
3371 
3372 	timeLocker.Unlock();
3373 
3374 	// free team resources
3375 
3376 	delete_user_mutex_context(team->user_mutex_context);
3377 	delete_realtime_sem_context(team->realtime_sem_context);
3378 	xsi_sem_undo(team);
3379 	remove_images(team);
3380 	team->address_space->RemoveAndPut();
3381 
3382 	team->ReleaseReference();
3383 
3384 	// notify the debugger, that the team is gone
3385 	user_debug_team_deleted(teamID, debuggerPort, exitStatus, signal, &usageInfo);
3386 }
3387 
3388 
3389 Team*
3390 team_get_kernel_team(void)
3391 {
3392 	return sKernelTeam;
3393 }
3394 
3395 
3396 team_id
3397 team_get_kernel_team_id(void)
3398 {
3399 	if (!sKernelTeam)
3400 		return 0;
3401 
3402 	return sKernelTeam->id;
3403 }
3404 
3405 
3406 team_id
3407 team_get_current_team_id(void)
3408 {
3409 	return thread_get_current_thread()->team->id;
3410 }
3411 
3412 
3413 status_t
3414 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3415 {
3416 	if (id == sKernelTeam->id) {
3417 		// we're the kernel team, so we don't have to go through all
3418 		// the hassle (locking and hash lookup)
3419 		*_addressSpace = VMAddressSpace::GetKernel();
3420 		return B_OK;
3421 	}
3422 
3423 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3424 
3425 	Team* team = team_get_team_struct_locked(id);
3426 	if (team == NULL)
3427 		return B_BAD_VALUE;
3428 
3429 	team->address_space->Get();
3430 	*_addressSpace = team->address_space;
3431 	return B_OK;
3432 }
3433 
3434 
3435 /*!	Sets the team's job control state.
3436 	The caller must hold the parent team's lock. Interrupts are allowed to be
3437 	enabled or disabled.
3438 	\a team The team whose job control state shall be set.
3439 	\a newState The new state to be set.
3440 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3441 		the caller is responsible for filling in the following fields of the
3442 		entry before releasing the parent team's lock, unless the new state is
3443 		\c JOB_CONTROL_STATE_NONE:
3444 		- \c signal: The number of the signal causing the state change.
3445 		- \c signaling_user: The real UID of the user sending the signal.
3446 */
3447 void
3448 team_set_job_control_state(Team* team, job_control_state newState,
3449 	Signal* signal)
3450 {
3451 	if (team == NULL || team->job_control_entry == NULL)
3452 		return;
3453 
3454 	// don't touch anything, if the state stays the same or the team is already
3455 	// dead
3456 	job_control_entry* entry = team->job_control_entry;
3457 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3458 		return;
3459 
3460 	T(SetJobControlState(team->id, newState, signal));
3461 
3462 	// remove from the old list
3463 	switch (entry->state) {
3464 		case JOB_CONTROL_STATE_NONE:
3465 			// entry is in no list ATM
3466 			break;
3467 		case JOB_CONTROL_STATE_DEAD:
3468 			// can't get here
3469 			break;
3470 		case JOB_CONTROL_STATE_STOPPED:
3471 			team->parent->stopped_children.entries.Remove(entry);
3472 			break;
3473 		case JOB_CONTROL_STATE_CONTINUED:
3474 			team->parent->continued_children.entries.Remove(entry);
3475 			break;
3476 	}
3477 
3478 	entry->state = newState;
3479 
3480 	if (signal != NULL) {
3481 		entry->signal = signal->Number();
3482 		entry->signaling_user = signal->SendingUser();
3483 	}
3484 
3485 	// add to new list
3486 	team_job_control_children* childList = NULL;
3487 	switch (entry->state) {
3488 		case JOB_CONTROL_STATE_NONE:
3489 			// entry doesn't get into any list
3490 			break;
3491 		case JOB_CONTROL_STATE_DEAD:
3492 			childList = &team->parent->dead_children;
3493 			team->parent->dead_children.count++;
3494 			break;
3495 		case JOB_CONTROL_STATE_STOPPED:
3496 			childList = &team->parent->stopped_children;
3497 			break;
3498 		case JOB_CONTROL_STATE_CONTINUED:
3499 			childList = &team->parent->continued_children;
3500 			break;
3501 	}
3502 
3503 	if (childList != NULL) {
3504 		childList->entries.Add(entry);
3505 		team->parent->dead_children.condition_variable.NotifyAll();
3506 	}
3507 }
3508 
3509 
3510 /*!	Inits the given team's exit information, if not yet initialized, to some
3511 	generic "killed" status.
3512 	The caller must not hold the team's lock. Interrupts must be enabled.
3513 
3514 	\param team The team whose exit info shall be initialized.
3515 */
3516 void
3517 team_init_exit_info_on_error(Team* team)
3518 {
3519 	TeamLocker teamLocker(team);
3520 
3521 	if (!team->exit.initialized) {
3522 		team->exit.reason = CLD_KILLED;
3523 		team->exit.signal = SIGKILL;
3524 		team->exit.signaling_user = geteuid();
3525 		team->exit.status = 0;
3526 		team->exit.initialized = true;
3527 	}
3528 }
3529 
3530 
3531 /*! Adds a hook to the team that is called as soon as this team goes away.
3532 	This call might get public in the future.
3533 */
3534 status_t
3535 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3536 {
3537 	if (hook == NULL || teamID < B_OK)
3538 		return B_BAD_VALUE;
3539 
3540 	// create the watcher object
3541 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3542 	if (watcher == NULL)
3543 		return B_NO_MEMORY;
3544 
3545 	watcher->hook = hook;
3546 	watcher->data = data;
3547 
3548 	// add watcher, if the team isn't already dying
3549 	// get the team
3550 	Team* team = Team::GetAndLock(teamID);
3551 	if (team == NULL) {
3552 		free(watcher);
3553 		return B_BAD_TEAM_ID;
3554 	}
3555 
3556 	list_add_item(&team->watcher_list, watcher);
3557 
3558 	team->UnlockAndReleaseReference();
3559 
3560 	return B_OK;
3561 }
3562 
3563 
3564 status_t
3565 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3566 {
3567 	if (hook == NULL || teamID < 0)
3568 		return B_BAD_VALUE;
3569 
3570 	// get team and remove watcher (if present)
3571 	Team* team = Team::GetAndLock(teamID);
3572 	if (team == NULL)
3573 		return B_BAD_TEAM_ID;
3574 
3575 	// search for watcher
3576 	team_watcher* watcher = NULL;
3577 	while ((watcher = (team_watcher*)list_get_next_item(
3578 			&team->watcher_list, watcher)) != NULL) {
3579 		if (watcher->hook == hook && watcher->data == data) {
3580 			// got it!
3581 			list_remove_item(&team->watcher_list, watcher);
3582 			break;
3583 		}
3584 	}
3585 
3586 	team->UnlockAndReleaseReference();
3587 
3588 	if (watcher == NULL)
3589 		return B_ENTRY_NOT_FOUND;
3590 
3591 	free(watcher);
3592 	return B_OK;
3593 }
3594 
3595 
3596 /*!	Allocates a user_thread structure from the team.
3597 	The team lock must be held, unless the function is called for the team's
3598 	main thread. Interrupts must be enabled.
3599 */
3600 struct user_thread*
3601 team_allocate_user_thread(Team* team)
3602 {
3603 	if (team->user_data == 0)
3604 		return NULL;
3605 
3606 	// take an entry from the free list, if any
3607 	if (struct free_user_thread* entry = team->free_user_threads) {
3608 		user_thread* thread = entry->thread;
3609 		team->free_user_threads = entry->next;
3610 		free(entry);
3611 		return thread;
3612 	}
3613 
3614 	while (true) {
3615 		// enough space left?
3616 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3617 		if (team->user_data_size - team->used_user_data < needed) {
3618 			// try to resize the area
3619 			if (resize_area(team->user_data_area,
3620 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3621 				return NULL;
3622 			}
3623 
3624 			// resized user area successfully -- try to allocate the user_thread
3625 			// again
3626 			team->user_data_size += B_PAGE_SIZE;
3627 			continue;
3628 		}
3629 
3630 		// allocate the user_thread
3631 		user_thread* thread
3632 			= (user_thread*)(team->user_data + team->used_user_data);
3633 		team->used_user_data += needed;
3634 
3635 		return thread;
3636 	}
3637 }
3638 
3639 
3640 /*!	Frees the given user_thread structure.
3641 	The team's lock must not be held. Interrupts must be enabled.
3642 	\param team The team the user thread was allocated from.
3643 	\param userThread The user thread to free.
3644 */
3645 void
3646 team_free_user_thread(Team* team, struct user_thread* userThread)
3647 {
3648 	if (userThread == NULL)
3649 		return;
3650 
3651 	// create a free list entry
3652 	free_user_thread* entry
3653 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3654 	if (entry == NULL) {
3655 		// we have to leak the user thread :-/
3656 		return;
3657 	}
3658 
3659 	// add to free list
3660 	TeamLocker teamLocker(team);
3661 
3662 	entry->thread = userThread;
3663 	entry->next = team->free_user_threads;
3664 	team->free_user_threads = entry;
3665 }
3666 
3667 
3668 //	#pragma mark - Associated data interface
3669 
3670 
3671 AssociatedData::AssociatedData()
3672 	:
3673 	fOwner(NULL)
3674 {
3675 }
3676 
3677 
3678 AssociatedData::~AssociatedData()
3679 {
3680 }
3681 
3682 
3683 void
3684 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3685 {
3686 }
3687 
3688 
3689 AssociatedDataOwner::AssociatedDataOwner()
3690 {
3691 	mutex_init(&fLock, "associated data owner");
3692 }
3693 
3694 
3695 AssociatedDataOwner::~AssociatedDataOwner()
3696 {
3697 	mutex_destroy(&fLock);
3698 }
3699 
3700 
3701 bool
3702 AssociatedDataOwner::AddData(AssociatedData* data)
3703 {
3704 	MutexLocker locker(fLock);
3705 
3706 	if (data->Owner() != NULL)
3707 		return false;
3708 
3709 	data->AcquireReference();
3710 	fList.Add(data);
3711 	data->SetOwner(this);
3712 
3713 	return true;
3714 }
3715 
3716 
3717 bool
3718 AssociatedDataOwner::RemoveData(AssociatedData* data)
3719 {
3720 	MutexLocker locker(fLock);
3721 
3722 	if (data->Owner() != this)
3723 		return false;
3724 
3725 	data->SetOwner(NULL);
3726 	fList.Remove(data);
3727 
3728 	locker.Unlock();
3729 
3730 	data->ReleaseReference();
3731 
3732 	return true;
3733 }
3734 
3735 
3736 void
3737 AssociatedDataOwner::PrepareForDeletion()
3738 {
3739 	MutexLocker locker(fLock);
3740 
3741 	// move all data to a temporary list and unset the owner
3742 	DataList list;
3743 	list.MoveFrom(&fList);
3744 
3745 	for (DataList::Iterator it = list.GetIterator();
3746 		AssociatedData* data = it.Next();) {
3747 		data->SetOwner(NULL);
3748 	}
3749 
3750 	locker.Unlock();
3751 
3752 	// call the notification hooks and release our references
3753 	while (AssociatedData* data = list.RemoveHead()) {
3754 		data->OwnerDeleted(this);
3755 		data->ReleaseReference();
3756 	}
3757 }
3758 
3759 
3760 /*!	Associates data with the current team.
3761 	When the team is deleted, the data object is notified.
3762 	The team acquires a reference to the object.
3763 
3764 	\param data The data object.
3765 	\return \c true on success, \c false otherwise. Fails only when the supplied
3766 		data object is already associated with another owner.
3767 */
3768 bool
3769 team_associate_data(AssociatedData* data)
3770 {
3771 	return thread_get_current_thread()->team->AddData(data);
3772 }
3773 
3774 
3775 /*!	Dissociates data from the current team.
3776 	Balances an earlier call to team_associate_data().
3777 
3778 	\param data The data object.
3779 	\return \c true on success, \c false otherwise. Fails only when the data
3780 		object is not associated with the current team.
3781 */
3782 bool
3783 team_dissociate_data(AssociatedData* data)
3784 {
3785 	return thread_get_current_thread()->team->RemoveData(data);
3786 }
3787 
3788 
3789 //	#pragma mark - Public kernel API
3790 
3791 
3792 thread_id
3793 load_image(int32 argCount, const char** args, const char** env)
3794 {
3795 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3796 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3797 }
3798 
3799 
3800 thread_id
3801 load_image_etc(int32 argCount, const char* const* args,
3802 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3803 {
3804 	// we need to flatten the args and environment
3805 
3806 	if (args == NULL)
3807 		return B_BAD_VALUE;
3808 
3809 	// determine total needed size
3810 	int32 argSize = 0;
3811 	for (int32 i = 0; i < argCount; i++)
3812 		argSize += strlen(args[i]) + 1;
3813 
3814 	int32 envCount = 0;
3815 	int32 envSize = 0;
3816 	while (env != NULL && env[envCount] != NULL)
3817 		envSize += strlen(env[envCount++]) + 1;
3818 
3819 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3820 	if (size > MAX_PROCESS_ARGS_SIZE)
3821 		return B_TOO_MANY_ARGS;
3822 
3823 	// allocate space
3824 	char** flatArgs = (char**)malloc(size);
3825 	if (flatArgs == NULL)
3826 		return B_NO_MEMORY;
3827 
3828 	char** slot = flatArgs;
3829 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3830 
3831 	// copy arguments and environment
3832 	for (int32 i = 0; i < argCount; i++) {
3833 		int32 argSize = strlen(args[i]) + 1;
3834 		memcpy(stringSpace, args[i], argSize);
3835 		*slot++ = stringSpace;
3836 		stringSpace += argSize;
3837 	}
3838 
3839 	*slot++ = NULL;
3840 
3841 	for (int32 i = 0; i < envCount; i++) {
3842 		int32 envSize = strlen(env[i]) + 1;
3843 		memcpy(stringSpace, env[i], envSize);
3844 		*slot++ = stringSpace;
3845 		stringSpace += envSize;
3846 	}
3847 
3848 	*slot++ = NULL;
3849 
3850 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3851 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3852 
3853 	free(flatArgs);
3854 		// load_image_internal() unset our variable if it took over ownership
3855 
3856 	return thread;
3857 }
3858 
3859 
3860 status_t
3861 wait_for_team(team_id id, status_t* _returnCode)
3862 {
3863 	// check whether the team exists
3864 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3865 
3866 	Team* team = team_get_team_struct_locked(id);
3867 	if (team == NULL)
3868 		return B_BAD_TEAM_ID;
3869 
3870 	id = team->id;
3871 
3872 	teamsLocker.Unlock();
3873 
3874 	// wait for the main thread (it has the same ID as the team)
3875 	return wait_for_thread(id, _returnCode);
3876 }
3877 
3878 
3879 status_t
3880 kill_team(team_id id)
3881 {
3882 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3883 
3884 	Team* team = team_get_team_struct_locked(id);
3885 	if (team == NULL)
3886 		return B_BAD_TEAM_ID;
3887 
3888 	id = team->id;
3889 
3890 	teamsLocker.Unlock();
3891 
3892 	if (team == sKernelTeam)
3893 		return B_NOT_ALLOWED;
3894 
3895 	// Just kill the team's main thread (it has same ID as the team). The
3896 	// cleanup code there will take care of the team.
3897 	return kill_thread(id);
3898 }
3899 
3900 
3901 status_t
3902 _get_team_info(team_id id, team_info* info, size_t size)
3903 {
3904 	// get the team
3905 	Team* team = Team::Get(id);
3906 	if (team == NULL)
3907 		return B_BAD_TEAM_ID;
3908 	BReference<Team> teamReference(team, true);
3909 
3910 	// fill in the info
3911 	return fill_team_info(team, info, size);
3912 }
3913 
3914 
3915 status_t
3916 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3917 {
3918 	int32 slot = *cookie;
3919 	if (slot < 1)
3920 		slot = 1;
3921 
3922 	InterruptsReadSpinLocker locker(sTeamHashLock);
3923 
3924 	team_id lastTeamID = peek_next_thread_id();
3925 		// TODO: This is broken, since the id can wrap around!
3926 
3927 	// get next valid team
3928 	Team* team = NULL;
3929 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3930 		slot++;
3931 
3932 	if (team == NULL)
3933 		return B_BAD_TEAM_ID;
3934 
3935 	// get a reference to the team and unlock
3936 	BReference<Team> teamReference(team);
3937 	locker.Unlock();
3938 
3939 	// fill in the info
3940 	*cookie = ++slot;
3941 	return fill_team_info(team, info, size);
3942 }
3943 
3944 
3945 status_t
3946 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3947 {
3948 	if (size != sizeof(team_usage_info))
3949 		return B_BAD_VALUE;
3950 
3951 	return common_get_team_usage_info(id, who, info, 0);
3952 }
3953 
3954 
3955 pid_t
3956 getpid(void)
3957 {
3958 	return thread_get_current_thread()->team->id;
3959 }
3960 
3961 
3962 pid_t
3963 getppid()
3964 {
3965 	return _getppid(0);
3966 }
3967 
3968 
3969 pid_t
3970 getpgid(pid_t id)
3971 {
3972 	if (id < 0) {
3973 		errno = EINVAL;
3974 		return -1;
3975 	}
3976 
3977 	if (id == 0) {
3978 		// get process group of the calling process
3979 		Team* team = thread_get_current_thread()->team;
3980 		TeamLocker teamLocker(team);
3981 		return team->group_id;
3982 	}
3983 
3984 	// get the team
3985 	Team* team = Team::GetAndLock(id);
3986 	if (team == NULL) {
3987 		errno = ESRCH;
3988 		return -1;
3989 	}
3990 
3991 	// get the team's process group ID
3992 	pid_t groupID = team->group_id;
3993 
3994 	team->UnlockAndReleaseReference();
3995 
3996 	return groupID;
3997 }
3998 
3999 
4000 pid_t
4001 getsid(pid_t id)
4002 {
4003 	if (id < 0) {
4004 		errno = EINVAL;
4005 		return -1;
4006 	}
4007 
4008 	if (id == 0) {
4009 		// get session of the calling process
4010 		Team* team = thread_get_current_thread()->team;
4011 		TeamLocker teamLocker(team);
4012 		return team->session_id;
4013 	}
4014 
4015 	// get the team
4016 	Team* team = Team::GetAndLock(id);
4017 	if (team == NULL) {
4018 		errno = ESRCH;
4019 		return -1;
4020 	}
4021 
4022 	// get the team's session ID
4023 	pid_t sessionID = team->session_id;
4024 
4025 	team->UnlockAndReleaseReference();
4026 
4027 	return sessionID;
4028 }
4029 
4030 
4031 //	#pragma mark - User syscalls
4032 
4033 
4034 status_t
4035 _user_exec(const char* userPath, const char* const* userFlatArgs,
4036 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
4037 {
4038 	// NOTE: Since this function normally doesn't return, don't use automatic
4039 	// variables that need destruction in the function scope.
4040 	char path[B_PATH_NAME_LENGTH];
4041 
4042 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
4043 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
4044 		return B_BAD_ADDRESS;
4045 
4046 	// copy and relocate the flat arguments
4047 	char** flatArgs;
4048 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4049 		argCount, envCount, flatArgs);
4050 
4051 	if (error == B_OK) {
4052 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
4053 			envCount, umask);
4054 			// this one only returns in case of error
4055 	}
4056 
4057 	free(flatArgs);
4058 	return error;
4059 }
4060 
4061 
4062 thread_id
4063 _user_fork(void)
4064 {
4065 	return fork_team();
4066 }
4067 
4068 
4069 pid_t
4070 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4071 	team_usage_info* usageInfo)
4072 {
4073 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4074 		return B_BAD_ADDRESS;
4075 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4076 		return B_BAD_ADDRESS;
4077 
4078 	siginfo_t info;
4079 	team_usage_info usage_info;
4080 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4081 	if (foundChild < 0)
4082 		return syscall_restart_handle_post(foundChild);
4083 
4084 	// copy info back to userland
4085 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4086 		return B_BAD_ADDRESS;
4087 	// copy usage_info back to userland
4088 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4089 		sizeof(usage_info)) != B_OK) {
4090 		return B_BAD_ADDRESS;
4091 	}
4092 
4093 	return foundChild;
4094 }
4095 
4096 
4097 pid_t
4098 _user_process_info(pid_t process, int32 which)
4099 {
4100 	pid_t result;
4101 	switch (which) {
4102 		case SESSION_ID:
4103 			result = getsid(process);
4104 			break;
4105 		case GROUP_ID:
4106 			result = getpgid(process);
4107 			break;
4108 		case PARENT_ID:
4109 			result = _getppid(process);
4110 			break;
4111 		default:
4112 			return B_BAD_VALUE;
4113 	}
4114 
4115 	return result >= 0 ? result : errno;
4116 }
4117 
4118 
4119 pid_t
4120 _user_setpgid(pid_t processID, pid_t groupID)
4121 {
4122 	// setpgid() can be called either by the parent of the target process or
4123 	// by the process itself to do one of two things:
4124 	// * Create a new process group with the target process' ID and the target
4125 	//   process as group leader.
4126 	// * Set the target process' process group to an already existing one in the
4127 	//   same session.
4128 
4129 	if (groupID < 0)
4130 		return B_BAD_VALUE;
4131 
4132 	Team* currentTeam = thread_get_current_thread()->team;
4133 	if (processID == 0)
4134 		processID = currentTeam->id;
4135 
4136 	// if the group ID is not specified, use the target process' ID
4137 	if (groupID == 0)
4138 		groupID = processID;
4139 
4140 	// We loop when running into the following race condition: We create a new
4141 	// process group, because there isn't one with that ID yet, but later when
4142 	// trying to publish it, we find that someone else created and published
4143 	// a group with that ID in the meantime. In that case we just restart the
4144 	// whole action.
4145 	while (true) {
4146 		// Look up the process group by ID. If it doesn't exist yet and we are
4147 		// allowed to create a new one, do that.
4148 		ProcessGroup* group = ProcessGroup::Get(groupID);
4149 		bool newGroup = false;
4150 		if (group == NULL) {
4151 			if (groupID != processID)
4152 				return B_NOT_ALLOWED;
4153 
4154 			group = new(std::nothrow) ProcessGroup(groupID);
4155 			if (group == NULL)
4156 				return B_NO_MEMORY;
4157 
4158 			newGroup = true;
4159 		}
4160 		BReference<ProcessGroup> groupReference(group, true);
4161 
4162 		// get the target team
4163 		Team* team = Team::Get(processID);
4164 		if (team == NULL)
4165 			return ESRCH;
4166 		BReference<Team> teamReference(team, true);
4167 
4168 		// lock the new process group and the team's current process group
4169 		while (true) {
4170 			// lock the team's current process group
4171 			team->LockProcessGroup();
4172 
4173 			ProcessGroup* oldGroup = team->group;
4174 			if (oldGroup == NULL) {
4175 				// This can only happen if the team is exiting.
4176 				ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4177 				return ESRCH;
4178 			}
4179 
4180 			if (oldGroup == group) {
4181 				// it's the same as the target group, so just bail out
4182 				oldGroup->Unlock();
4183 				return group->id;
4184 			}
4185 
4186 			oldGroup->AcquireReference();
4187 
4188 			// lock the target process group, if locking order allows it
4189 			if (newGroup || group->id > oldGroup->id) {
4190 				group->Lock();
4191 				break;
4192 			}
4193 
4194 			// try to lock
4195 			if (group->TryLock())
4196 				break;
4197 
4198 			// no dice -- unlock the team's current process group and relock in
4199 			// the correct order
4200 			oldGroup->Unlock();
4201 
4202 			group->Lock();
4203 			oldGroup->Lock();
4204 
4205 			// check whether things are still the same
4206 			TeamLocker teamLocker(team);
4207 			if (team->group == oldGroup)
4208 				break;
4209 
4210 			// something changed -- unlock everything and retry
4211 			teamLocker.Unlock();
4212 			oldGroup->Unlock();
4213 			group->Unlock();
4214 			oldGroup->ReleaseReference();
4215 		}
4216 
4217 		// we now have references and locks of both new and old process group
4218 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4219 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4220 		AutoLocker<ProcessGroup> groupLocker(group, true);
4221 
4222 		// also lock the target team and its parent
4223 		team->LockTeamAndParent(false);
4224 		TeamLocker parentLocker(team->parent, true);
4225 		TeamLocker teamLocker(team, true);
4226 
4227 		// perform the checks
4228 		if (team == currentTeam) {
4229 			// we set our own group
4230 
4231 			// we must not change our process group ID if we're a session leader
4232 			if (is_session_leader(currentTeam))
4233 				return B_NOT_ALLOWED;
4234 		} else {
4235 			// Calling team != target team. The target team must be a child of
4236 			// the calling team and in the same session. (If that's the case it
4237 			// isn't a session leader either.)
4238 			if (team->parent != currentTeam
4239 				|| team->session_id != currentTeam->session_id) {
4240 				return B_NOT_ALLOWED;
4241 			}
4242 
4243 			// The call is also supposed to fail on a child, when the child has
4244 			// already executed exec*() [EACCES].
4245 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4246 				return EACCES;
4247 		}
4248 
4249 		// If we created a new process group, publish it now.
4250 		if (newGroup) {
4251 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4252 			if (sGroupHash.Lookup(groupID)) {
4253 				// A group with the group ID appeared since we first checked.
4254 				// Back to square one.
4255 				continue;
4256 			}
4257 
4258 			group->PublishLocked(team->group->Session());
4259 		} else if (group->Session()->id != team->session_id) {
4260 			// The existing target process group belongs to a different session.
4261 			// That's not allowed.
4262 			return B_NOT_ALLOWED;
4263 		}
4264 
4265 		// Everything is ready -- set the group.
4266 		remove_team_from_group(team);
4267 		insert_team_into_group(group, team);
4268 
4269 		// Changing the process group might have changed the situation for a
4270 		// parent waiting in wait_for_child(). Hence we notify it.
4271 		team->parent->dead_children.condition_variable.NotifyAll();
4272 
4273 		return group->id;
4274 	}
4275 }
4276 
4277 
4278 pid_t
4279 _user_setsid(void)
4280 {
4281 	Team* team = thread_get_current_thread()->team;
4282 
4283 	// create a new process group and session
4284 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4285 	if (group == NULL)
4286 		return B_NO_MEMORY;
4287 	BReference<ProcessGroup> groupReference(group, true);
4288 	AutoLocker<ProcessGroup> groupLocker(group);
4289 
4290 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4291 	if (session == NULL)
4292 		return B_NO_MEMORY;
4293 	BReference<ProcessSession> sessionReference(session, true);
4294 
4295 	// lock the team's current process group, parent, and the team itself
4296 	team->LockTeamParentAndProcessGroup();
4297 	BReference<ProcessGroup> oldGroupReference(team->group);
4298 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4299 	TeamLocker parentLocker(team->parent, true);
4300 	TeamLocker teamLocker(team, true);
4301 
4302 	// the team must not already be a process group leader
4303 	if (is_process_group_leader(team))
4304 		return B_NOT_ALLOWED;
4305 
4306 	// remove the team from the old and add it to the new process group
4307 	remove_team_from_group(team);
4308 	group->Publish(session);
4309 	insert_team_into_group(group, team);
4310 
4311 	// Changing the process group might have changed the situation for a
4312 	// parent waiting in wait_for_child(). Hence we notify it.
4313 	team->parent->dead_children.condition_variable.NotifyAll();
4314 
4315 	return group->id;
4316 }
4317 
4318 
4319 status_t
4320 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4321 {
4322 	status_t returnCode;
4323 	status_t status;
4324 
4325 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4326 		return B_BAD_ADDRESS;
4327 
4328 	status = wait_for_team(id, &returnCode);
4329 	if (status >= B_OK && _userReturnCode != NULL) {
4330 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4331 				!= B_OK)
4332 			return B_BAD_ADDRESS;
4333 		return B_OK;
4334 	}
4335 
4336 	return syscall_restart_handle_post(status);
4337 }
4338 
4339 
4340 thread_id
4341 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4342 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4343 	port_id errorPort, uint32 errorToken)
4344 {
4345 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4346 
4347 	if (argCount < 1)
4348 		return B_BAD_VALUE;
4349 
4350 	// copy and relocate the flat arguments
4351 	char** flatArgs;
4352 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4353 		argCount, envCount, flatArgs);
4354 	if (error != B_OK)
4355 		return error;
4356 
4357 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4358 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4359 		errorToken);
4360 
4361 	free(flatArgs);
4362 		// load_image_internal() unset our variable if it took over ownership
4363 
4364 	return thread;
4365 }
4366 
4367 
4368 void
4369 _user_exit_team(status_t returnValue)
4370 {
4371 	Thread* thread = thread_get_current_thread();
4372 	Team* team = thread->team;
4373 
4374 	// set this thread's exit status
4375 	thread->exit.status = returnValue;
4376 
4377 	// set the team exit status
4378 	TeamLocker teamLocker(team);
4379 
4380 	if (!team->exit.initialized) {
4381 		team->exit.reason = CLD_EXITED;
4382 		team->exit.signal = 0;
4383 		team->exit.signaling_user = 0;
4384 		team->exit.status = returnValue;
4385 		team->exit.initialized = true;
4386 	}
4387 
4388 	teamLocker.Unlock();
4389 
4390 	// Stop the thread, if the team is being debugged and that has been
4391 	// requested.
4392 	// Note: GCC 13 marks the following call as potentially overflowing, since it thinks team may
4393 	//       be `nullptr`. This cannot be the case in reality, therefore ignore this specific
4394 	//       error.
4395 	#pragma GCC diagnostic push
4396 	#pragma GCC diagnostic ignored "-Wstringop-overflow"
4397 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4398 		user_debug_stop_thread();
4399 	#pragma GCC diagnostic pop
4400 
4401 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4402 	// userland. The signal handling code forwards the signal to the main
4403 	// thread (if that's not already this one), which will take the team down.
4404 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4405 	send_signal_to_thread(thread, signal, 0);
4406 }
4407 
4408 
4409 status_t
4410 _user_kill_team(team_id team)
4411 {
4412 	return kill_team(team);
4413 }
4414 
4415 
4416 status_t
4417 _user_get_team_info(team_id id, team_info* userInfo, size_t size)
4418 {
4419 	status_t status;
4420 	team_info info;
4421 
4422 	if (size > sizeof(team_info))
4423 		return B_BAD_VALUE;
4424 
4425 	if (!IS_USER_ADDRESS(userInfo))
4426 		return B_BAD_ADDRESS;
4427 
4428 	status = _get_team_info(id, &info, size);
4429 	if (status == B_OK) {
4430 		if (user_memcpy(userInfo, &info, size) < B_OK)
4431 			return B_BAD_ADDRESS;
4432 	}
4433 
4434 	return status;
4435 }
4436 
4437 
4438 status_t
4439 _user_get_next_team_info(int32* userCookie, team_info* userInfo, size_t size)
4440 {
4441 	status_t status;
4442 	team_info info;
4443 	int32 cookie;
4444 
4445 	if (size > sizeof(team_info))
4446 		return B_BAD_VALUE;
4447 
4448 	if (!IS_USER_ADDRESS(userCookie)
4449 		|| !IS_USER_ADDRESS(userInfo)
4450 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4451 		return B_BAD_ADDRESS;
4452 
4453 	status = _get_next_team_info(&cookie, &info, size);
4454 	if (status != B_OK)
4455 		return status;
4456 
4457 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4458 		|| user_memcpy(userInfo, &info, size) < B_OK)
4459 		return B_BAD_ADDRESS;
4460 
4461 	return status;
4462 }
4463 
4464 
4465 team_id
4466 _user_get_current_team(void)
4467 {
4468 	return team_get_current_team_id();
4469 }
4470 
4471 
4472 status_t
4473 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4474 	size_t size)
4475 {
4476 	if (size != sizeof(team_usage_info))
4477 		return B_BAD_VALUE;
4478 
4479 	team_usage_info info;
4480 	status_t status = common_get_team_usage_info(team, who, &info,
4481 		B_CHECK_PERMISSION);
4482 
4483 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4484 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4485 		return B_BAD_ADDRESS;
4486 	}
4487 
4488 	return status;
4489 }
4490 
4491 
4492 status_t
4493 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4494 	size_t size, size_t* _sizeNeeded)
4495 {
4496 	// check parameters
4497 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4498 		|| (buffer == NULL && size > 0)
4499 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4500 		return B_BAD_ADDRESS;
4501 	}
4502 
4503 	KMessage info;
4504 
4505 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4506 		// allocate memory for a copy of the needed team data
4507 		struct ExtendedTeamData {
4508 			team_id	id;
4509 			pid_t	group_id;
4510 			pid_t	session_id;
4511 			uid_t	real_uid;
4512 			gid_t	real_gid;
4513 			uid_t	effective_uid;
4514 			gid_t	effective_gid;
4515 			char	name[B_OS_NAME_LENGTH];
4516 		} teamClone;
4517 
4518 		io_context* ioContext;
4519 		{
4520 			// get the team structure
4521 			Team* team = Team::GetAndLock(teamID);
4522 			if (team == NULL)
4523 				return B_BAD_TEAM_ID;
4524 			BReference<Team> teamReference(team, true);
4525 			TeamLocker teamLocker(team, true);
4526 
4527 			// copy the data
4528 			teamClone.id = team->id;
4529 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4530 			teamClone.group_id = team->group_id;
4531 			teamClone.session_id = team->session_id;
4532 			teamClone.real_uid = team->real_uid;
4533 			teamClone.real_gid = team->real_gid;
4534 			teamClone.effective_uid = team->effective_uid;
4535 			teamClone.effective_gid = team->effective_gid;
4536 
4537 			// also fetch a reference to the I/O context
4538 			ioContext = team->io_context;
4539 			vfs_get_io_context(ioContext);
4540 		}
4541 		CObjectDeleter<io_context, void, vfs_put_io_context>
4542 			ioContextPutter(ioContext);
4543 
4544 		// add the basic data to the info message
4545 		if (info.AddInt32("id", teamClone.id) != B_OK
4546 			|| info.AddString("name", teamClone.name) != B_OK
4547 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4548 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4549 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4550 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4551 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4552 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4553 			return B_NO_MEMORY;
4554 		}
4555 
4556 		// get the current working directory from the I/O context
4557 		dev_t cwdDevice;
4558 		ino_t cwdDirectory;
4559 		{
4560 			MutexLocker ioContextLocker(ioContext->io_mutex);
4561 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4562 		}
4563 
4564 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4565 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4566 			return B_NO_MEMORY;
4567 		}
4568 	}
4569 
4570 	// TODO: Support the other flags!
4571 
4572 	// copy the needed size and, if it fits, the message back to userland
4573 	size_t sizeNeeded = info.ContentSize();
4574 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4575 		return B_BAD_ADDRESS;
4576 
4577 	if (sizeNeeded > size)
4578 		return B_BUFFER_OVERFLOW;
4579 
4580 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4581 		return B_BAD_ADDRESS;
4582 
4583 	return B_OK;
4584 }
4585