xref: /haiku/src/system/kernel/team.cpp (revision 06b79f550944f3a88a70d9fa17ddfe804721253a)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <sem.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
49 #include <syscalls.h>
50 #include <tls.h>
51 #include <tracing.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
55 #include <vfs.h>
56 #include <vm/vm.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
59 
60 #include "TeamThreadTables.h"
61 
62 
63 //#define TRACE_TEAM
64 #ifdef TRACE_TEAM
65 #	define TRACE(x) dprintf x
66 #else
67 #	define TRACE(x) ;
68 #endif
69 
70 
71 struct team_key {
72 	team_id id;
73 };
74 
75 struct team_arg {
76 	char	*path;
77 	char	**flat_args;
78 	size_t	flat_args_size;
79 	uint32	arg_count;
80 	uint32	env_count;
81 	mode_t	umask;
82 	uint32	flags;
83 	port_id	error_port;
84 	uint32	error_token;
85 };
86 
87 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
88 
89 
90 namespace {
91 
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, Team* team);
98 };
99 
100 
101 // #pragma mark - TeamTable
102 
103 
104 typedef BKernel::TeamThreadTable<Team> TeamTable;
105 
106 
107 // #pragma mark - ProcessGroupHashDefinition
108 
109 
110 struct ProcessGroupHashDefinition {
111 	typedef pid_t			KeyType;
112 	typedef	ProcessGroup	ValueType;
113 
114 	size_t HashKey(pid_t key) const
115 	{
116 		return key;
117 	}
118 
119 	size_t Hash(ProcessGroup* value) const
120 	{
121 		return HashKey(value->id);
122 	}
123 
124 	bool Compare(pid_t key, ProcessGroup* value) const
125 	{
126 		return value->id == key;
127 	}
128 
129 	ProcessGroup*& GetLink(ProcessGroup* value) const
130 	{
131 		return value->next;
132 	}
133 };
134 
135 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
136 
137 
138 }	// unnamed namespace
139 
140 
141 // #pragma mark -
142 
143 
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash;
146 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
147 
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash;
150 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
151 
152 static Team* sKernelTeam = NULL;
153 
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups;
157 static mutex sOrphanedCheckLock
158 	= MUTEX_INITIALIZER("orphaned process group check");
159 
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams = 2048;
163 static int32 sUsedTeams = 1;
164 
165 static TeamNotificationService sNotificationService;
166 
167 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
168 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
169 
170 
171 // #pragma mark - TeamListIterator
172 
173 
174 TeamListIterator::TeamListIterator()
175 {
176 	// queue the entry
177 	InterruptsSpinLocker locker(sTeamHashLock);
178 	sTeamHash.InsertIteratorEntry(&fEntry);
179 }
180 
181 
182 TeamListIterator::~TeamListIterator()
183 {
184 	// remove the entry
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	sTeamHash.RemoveIteratorEntry(&fEntry);
187 }
188 
189 
190 Team*
191 TeamListIterator::Next()
192 {
193 	// get the next team -- if there is one, get reference for it
194 	InterruptsSpinLocker locker(sTeamHashLock);
195 	Team* team = sTeamHash.NextElement(&fEntry);
196 	if (team != NULL)
197 		team->AcquireReference();
198 
199 	return team;
200 }
201 
202 
203 // #pragma mark - Tracing
204 
205 
206 #if TEAM_TRACING
207 namespace TeamTracing {
208 
209 class TeamForked : public AbstractTraceEntry {
210 public:
211 	TeamForked(thread_id forkedThread)
212 		:
213 		fForkedThread(forkedThread)
214 	{
215 		Initialized();
216 	}
217 
218 	virtual void AddDump(TraceOutput& out)
219 	{
220 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
221 	}
222 
223 private:
224 	thread_id			fForkedThread;
225 };
226 
227 
228 class ExecTeam : public AbstractTraceEntry {
229 public:
230 	ExecTeam(const char* path, int32 argCount, const char* const* args,
231 			int32 envCount, const char* const* env)
232 		:
233 		fArgCount(argCount),
234 		fArgs(NULL)
235 	{
236 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
237 			false);
238 
239 		// determine the buffer size we need for the args
240 		size_t argBufferSize = 0;
241 		for (int32 i = 0; i < argCount; i++)
242 			argBufferSize += strlen(args[i]) + 1;
243 
244 		// allocate a buffer
245 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
246 		if (fArgs) {
247 			char* buffer = fArgs;
248 			for (int32 i = 0; i < argCount; i++) {
249 				size_t argSize = strlen(args[i]) + 1;
250 				memcpy(buffer, args[i], argSize);
251 				buffer += argSize;
252 			}
253 		}
254 
255 		// ignore env for the time being
256 		(void)envCount;
257 		(void)env;
258 
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		out.Print("team exec, \"%p\", args:", fPath);
265 
266 		if (fArgs != NULL) {
267 			char* args = fArgs;
268 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
269 				out.Print(" \"%s\"", args);
270 				args += strlen(args) + 1;
271 			}
272 		} else
273 			out.Print(" <too long>");
274 	}
275 
276 private:
277 	char*	fPath;
278 	int32	fArgCount;
279 	char*	fArgs;
280 };
281 
282 
283 static const char*
284 job_control_state_name(job_control_state state)
285 {
286 	switch (state) {
287 		case JOB_CONTROL_STATE_NONE:
288 			return "none";
289 		case JOB_CONTROL_STATE_STOPPED:
290 			return "stopped";
291 		case JOB_CONTROL_STATE_CONTINUED:
292 			return "continued";
293 		case JOB_CONTROL_STATE_DEAD:
294 			return "dead";
295 		default:
296 			return "invalid";
297 	}
298 }
299 
300 
301 class SetJobControlState : public AbstractTraceEntry {
302 public:
303 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
304 		:
305 		fTeam(team),
306 		fNewState(newState),
307 		fSignal(signal != NULL ? signal->Number() : 0)
308 	{
309 		Initialized();
310 	}
311 
312 	virtual void AddDump(TraceOutput& out)
313 	{
314 		out.Print("team set job control state, team %" B_PRId32 ", "
315 			"new state: %s, signal: %d",
316 			fTeam, job_control_state_name(fNewState), fSignal);
317 	}
318 
319 private:
320 	team_id				fTeam;
321 	job_control_state	fNewState;
322 	int					fSignal;
323 };
324 
325 
326 class WaitForChild : public AbstractTraceEntry {
327 public:
328 	WaitForChild(pid_t child, uint32 flags)
329 		:
330 		fChild(child),
331 		fFlags(flags)
332 	{
333 		Initialized();
334 	}
335 
336 	virtual void AddDump(TraceOutput& out)
337 	{
338 		out.Print("team wait for child, child: %" B_PRId32 ", "
339 			"flags: %#" B_PRIx32, fChild, fFlags);
340 	}
341 
342 private:
343 	pid_t	fChild;
344 	uint32	fFlags;
345 };
346 
347 
348 class WaitForChildDone : public AbstractTraceEntry {
349 public:
350 	WaitForChildDone(const job_control_entry& entry)
351 		:
352 		fState(entry.state),
353 		fTeam(entry.thread),
354 		fStatus(entry.status),
355 		fReason(entry.reason),
356 		fSignal(entry.signal)
357 	{
358 		Initialized();
359 	}
360 
361 	WaitForChildDone(status_t error)
362 		:
363 		fTeam(error)
364 	{
365 		Initialized();
366 	}
367 
368 	virtual void AddDump(TraceOutput& out)
369 	{
370 		if (fTeam >= 0) {
371 			out.Print("team wait for child done, team: %" B_PRId32 ", "
372 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
373 				fTeam, job_control_state_name(fState), fStatus, fReason,
374 				fSignal);
375 		} else {
376 			out.Print("team wait for child failed, error: "
377 				"%#" B_PRIx32 ", ", fTeam);
378 		}
379 	}
380 
381 private:
382 	job_control_state	fState;
383 	team_id				fTeam;
384 	status_t			fStatus;
385 	uint16				fReason;
386 	uint16				fSignal;
387 };
388 
389 }	// namespace TeamTracing
390 
391 #	define T(x) new(std::nothrow) TeamTracing::x;
392 #else
393 #	define T(x) ;
394 #endif
395 
396 
397 //	#pragma mark - TeamNotificationService
398 
399 
400 TeamNotificationService::TeamNotificationService()
401 	: DefaultNotificationService("teams")
402 {
403 }
404 
405 
406 void
407 TeamNotificationService::Notify(uint32 eventCode, Team* team)
408 {
409 	char eventBuffer[128];
410 	KMessage event;
411 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
412 	event.AddInt32("event", eventCode);
413 	event.AddInt32("team", team->id);
414 	event.AddPointer("teamStruct", team);
415 
416 	DefaultNotificationService::Notify(event, eventCode);
417 }
418 
419 
420 //	#pragma mark - Team
421 
422 
423 Team::Team(team_id id, bool kernel)
424 {
425 	// allocate an ID
426 	this->id = id;
427 	visible = true;
428 	serial_number = -1;
429 
430 	// init mutex
431 	if (kernel) {
432 		mutex_init(&fLock, "Team:kernel");
433 	} else {
434 		char lockName[16];
435 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
436 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
437 	}
438 
439 	hash_next = siblings_next = children = parent = NULL;
440 	fName[0] = '\0';
441 	fArgs[0] = '\0';
442 	num_threads = 0;
443 	io_context = NULL;
444 	address_space = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	thread_list = NULL;
448 	main_thread = NULL;
449 	loading_info = NULL;
450 	state = TEAM_STATE_BIRTH;
451 	flags = 0;
452 	death_entry = NULL;
453 	user_data_area = -1;
454 	user_data = 0;
455 	used_user_data = 0;
456 	user_data_size = 0;
457 	free_user_threads = NULL;
458 
459 	commpage_address = NULL;
460 
461 	supplementary_groups = NULL;
462 	supplementary_group_count = 0;
463 
464 	dead_threads_kernel_time = 0;
465 	dead_threads_user_time = 0;
466 	cpu_clock_offset = 0;
467 
468 	// dead threads
469 	list_init(&dead_threads);
470 	dead_threads_count = 0;
471 
472 	// dead children
473 	dead_children.count = 0;
474 	dead_children.kernel_time = 0;
475 	dead_children.user_time = 0;
476 
477 	// job control entry
478 	job_control_entry = new(nothrow) ::job_control_entry;
479 	if (job_control_entry != NULL) {
480 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
481 		job_control_entry->thread = id;
482 		job_control_entry->team = this;
483 	}
484 
485 	// exit status -- setting initialized to false suffices
486 	exit.initialized = false;
487 
488 	list_init(&sem_list);
489 	list_init_etc(&port_list, port_team_link_offset());
490 	list_init(&image_list);
491 	list_init(&watcher_list);
492 
493 	clear_team_debug_info(&debug_info, true);
494 
495 	// init dead/stopped/continued children condition vars
496 	dead_children.condition_variable.Init(&dead_children, "team children");
497 
498 	B_INITIALIZE_SPINLOCK(&time_lock);
499 	B_INITIALIZE_SPINLOCK(&signal_lock);
500 
501 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
502 		kernel ? -1 : MAX_QUEUED_SIGNALS);
503 	memset(fSignalActions, 0, sizeof(fSignalActions));
504 
505 	fUserDefinedTimerCount = 0;
506 
507 	fCoreDumpCondition = NULL;
508 }
509 
510 
511 Team::~Team()
512 {
513 	// get rid of all associated data
514 	PrepareForDeletion();
515 
516 	if (io_context != NULL)
517 		vfs_put_io_context(io_context);
518 	delete_owned_ports(this);
519 	sem_delete_owned_sems(this);
520 
521 	DeleteUserTimers(false);
522 
523 	fPendingSignals.Clear();
524 
525 	if (fQueuedSignalsCounter != NULL)
526 		fQueuedSignalsCounter->ReleaseReference();
527 
528 	while (thread_death_entry* threadDeathEntry
529 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
530 		free(threadDeathEntry);
531 	}
532 
533 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
534 		delete entry;
535 
536 	while (free_user_thread* entry = free_user_threads) {
537 		free_user_threads = entry->next;
538 		free(entry);
539 	}
540 
541 	malloc_referenced_release(supplementary_groups);
542 
543 	delete job_control_entry;
544 		// usually already NULL and transferred to the parent
545 
546 	mutex_destroy(&fLock);
547 }
548 
549 
550 /*static*/ Team*
551 Team::Create(team_id id, const char* name, bool kernel)
552 {
553 	// create the team object
554 	Team* team = new(std::nothrow) Team(id, kernel);
555 	if (team == NULL)
556 		return NULL;
557 	ObjectDeleter<Team> teamDeleter(team);
558 
559 	if (name != NULL)
560 		team->SetName(name);
561 
562 	// check initialization
563 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
564 		return NULL;
565 
566 	// finish initialization (arch specifics)
567 	if (arch_team_init_team_struct(team, kernel) != B_OK)
568 		return NULL;
569 
570 	if (!kernel) {
571 		status_t error = user_timer_create_team_timers(team);
572 		if (error != B_OK)
573 			return NULL;
574 	}
575 
576 	// everything went fine
577 	return teamDeleter.Detach();
578 }
579 
580 
581 /*!	\brief Returns the team with the given ID.
582 	Returns a reference to the team.
583 	Team and thread spinlock must not be held.
584 */
585 /*static*/ Team*
586 Team::Get(team_id id)
587 {
588 	if (id == B_CURRENT_TEAM) {
589 		Team* team = thread_get_current_thread()->team;
590 		team->AcquireReference();
591 		return team;
592 	}
593 
594 	InterruptsSpinLocker locker(sTeamHashLock);
595 	Team* team = sTeamHash.Lookup(id);
596 	if (team != NULL)
597 		team->AcquireReference();
598 	return team;
599 }
600 
601 
602 /*!	\brief Returns the team with the given ID in a locked state.
603 	Returns a reference to the team.
604 	Team and thread spinlock must not be held.
605 */
606 /*static*/ Team*
607 Team::GetAndLock(team_id id)
608 {
609 	// get the team
610 	Team* team = Get(id);
611 	if (team == NULL)
612 		return NULL;
613 
614 	// lock it
615 	team->Lock();
616 
617 	// only return the team, when it isn't already dying
618 	if (team->state >= TEAM_STATE_SHUTDOWN) {
619 		team->Unlock();
620 		team->ReleaseReference();
621 		return NULL;
622 	}
623 
624 	return team;
625 }
626 
627 
628 /*!	Locks the team and its parent team (if any).
629 	The caller must hold a reference to the team or otherwise make sure that
630 	it won't be deleted.
631 	If the team doesn't have a parent, only the team itself is locked. If the
632 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
633 	only the team itself is locked.
634 
635 	\param dontLockParentIfKernel If \c true, the team's parent team is only
636 		locked, if it is not the kernel team.
637 */
638 void
639 Team::LockTeamAndParent(bool dontLockParentIfKernel)
640 {
641 	// The locking order is parent -> child. Since the parent can change as long
642 	// as we don't lock the team, we need to do a trial and error loop.
643 	Lock();
644 
645 	while (true) {
646 		// If the team doesn't have a parent, we're done. Otherwise try to lock
647 		// the parent.This will succeed in most cases, simplifying things.
648 		Team* parent = this->parent;
649 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
650 			|| parent->TryLock()) {
651 			return;
652 		}
653 
654 		// get a temporary reference to the parent, unlock this team, lock the
655 		// parent, and re-lock this team
656 		BReference<Team> parentReference(parent);
657 
658 		Unlock();
659 		parent->Lock();
660 		Lock();
661 
662 		// If the parent hasn't changed in the meantime, we're done.
663 		if (this->parent == parent)
664 			return;
665 
666 		// The parent has changed -- unlock and retry.
667 		parent->Unlock();
668 	}
669 }
670 
671 
672 /*!	Unlocks the team and its parent team (if any).
673 */
674 void
675 Team::UnlockTeamAndParent()
676 {
677 	if (parent != NULL)
678 		parent->Unlock();
679 
680 	Unlock();
681 }
682 
683 
684 /*!	Locks the team, its parent team (if any), and the team's process group.
685 	The caller must hold a reference to the team or otherwise make sure that
686 	it won't be deleted.
687 	If the team doesn't have a parent, only the team itself is locked.
688 */
689 void
690 Team::LockTeamParentAndProcessGroup()
691 {
692 	LockTeamAndProcessGroup();
693 
694 	// We hold the group's and the team's lock, but not the parent team's lock.
695 	// If we have a parent, try to lock it.
696 	if (this->parent == NULL || this->parent->TryLock())
697 		return;
698 
699 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
700 	// the job.
701 	Unlock();
702 	LockTeamAndParent(false);
703 }
704 
705 
706 /*!	Unlocks the team, its parent team (if any), and the team's process group.
707 */
708 void
709 Team::UnlockTeamParentAndProcessGroup()
710 {
711 	group->Unlock();
712 
713 	if (parent != NULL)
714 		parent->Unlock();
715 
716 	Unlock();
717 }
718 
719 
720 void
721 Team::LockTeamAndProcessGroup()
722 {
723 	// The locking order is process group -> child. Since the process group can
724 	// change as long as we don't lock the team, we need to do a trial and error
725 	// loop.
726 	Lock();
727 
728 	while (true) {
729 		// Try to lock the group. This will succeed in most cases, simplifying
730 		// things.
731 		ProcessGroup* group = this->group;
732 		if (group->TryLock())
733 			return;
734 
735 		// get a temporary reference to the group, unlock this team, lock the
736 		// group, and re-lock this team
737 		BReference<ProcessGroup> groupReference(group);
738 
739 		Unlock();
740 		group->Lock();
741 		Lock();
742 
743 		// If the group hasn't changed in the meantime, we're done.
744 		if (this->group == group)
745 			return;
746 
747 		// The group has changed -- unlock and retry.
748 		group->Unlock();
749 	}
750 }
751 
752 
753 void
754 Team::UnlockTeamAndProcessGroup()
755 {
756 	group->Unlock();
757 	Unlock();
758 }
759 
760 
761 void
762 Team::SetName(const char* name)
763 {
764 	if (const char* lastSlash = strrchr(name, '/'))
765 		name = lastSlash + 1;
766 
767 	strlcpy(fName, name, B_OS_NAME_LENGTH);
768 }
769 
770 
771 void
772 Team::SetArgs(const char* args)
773 {
774 	strlcpy(fArgs, args, sizeof(fArgs));
775 }
776 
777 
778 void
779 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
780 {
781 	fArgs[0] = '\0';
782 	strlcpy(fArgs, path, sizeof(fArgs));
783 	for (int i = 0; i < otherArgCount; i++) {
784 		strlcat(fArgs, " ", sizeof(fArgs));
785 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
786 	}
787 }
788 
789 
790 void
791 Team::ResetSignalsOnExec()
792 {
793 	// We are supposed to keep pending signals. Signal actions shall be reset
794 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
795 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
796 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
797 	// flags, but since there aren't any handlers, they make little sense, so
798 	// we clear them.
799 
800 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
801 		struct sigaction& action = SignalActionFor(i);
802 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
803 			action.sa_handler = SIG_DFL;
804 
805 		action.sa_mask = 0;
806 		action.sa_flags = 0;
807 		action.sa_userdata = NULL;
808 	}
809 }
810 
811 
812 void
813 Team::InheritSignalActions(Team* parent)
814 {
815 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
816 }
817 
818 
819 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
820 	ID.
821 
822 	The caller must hold the team's lock.
823 
824 	\param timer The timer to be added. If it doesn't have an ID yet, it is
825 		considered user-defined and will be assigned an ID.
826 	\return \c B_OK, if the timer was added successfully, another error code
827 		otherwise.
828 */
829 status_t
830 Team::AddUserTimer(UserTimer* timer)
831 {
832 	// don't allow addition of timers when already shutting the team down
833 	if (state >= TEAM_STATE_SHUTDOWN)
834 		return B_BAD_TEAM_ID;
835 
836 	// If the timer is user-defined, check timer limit and increment
837 	// user-defined count.
838 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
839 		return EAGAIN;
840 
841 	fUserTimers.AddTimer(timer);
842 
843 	return B_OK;
844 }
845 
846 
847 /*!	Removes the given user timer from the team.
848 
849 	The caller must hold the team's lock.
850 
851 	\param timer The timer to be removed.
852 
853 */
854 void
855 Team::RemoveUserTimer(UserTimer* timer)
856 {
857 	fUserTimers.RemoveTimer(timer);
858 
859 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
860 		UserDefinedTimersRemoved(1);
861 }
862 
863 
864 /*!	Deletes all (or all user-defined) user timers of the team.
865 
866 	Timer's belonging to the team's threads are not affected.
867 	The caller must hold the team's lock.
868 
869 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
870 		otherwise all timers are deleted.
871 */
872 void
873 Team::DeleteUserTimers(bool userDefinedOnly)
874 {
875 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
876 	UserDefinedTimersRemoved(count);
877 }
878 
879 
880 /*!	If not at the limit yet, increments the team's user-defined timer count.
881 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
882 */
883 bool
884 Team::CheckAddUserDefinedTimer()
885 {
886 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
887 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
888 		atomic_add(&fUserDefinedTimerCount, -1);
889 		return false;
890 	}
891 
892 	return true;
893 }
894 
895 
896 /*!	Subtracts the given count for the team's user-defined timer count.
897 	\param count The count to subtract.
898 */
899 void
900 Team::UserDefinedTimersRemoved(int32 count)
901 {
902 	atomic_add(&fUserDefinedTimerCount, -count);
903 }
904 
905 
906 void
907 Team::DeactivateCPUTimeUserTimers()
908 {
909 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
910 		timer->Deactivate();
911 
912 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
913 		timer->Deactivate();
914 }
915 
916 
917 /*!	Returns the team's current total CPU time (kernel + user + offset).
918 
919 	The caller must hold \c time_lock.
920 
921 	\param ignoreCurrentRun If \c true and the current thread is one team's
922 		threads, don't add the time since the last time \c last_time was
923 		updated. Should be used in "thread unscheduled" scheduler callbacks,
924 		since although the thread is still running at that time, its time has
925 		already been stopped.
926 	\return The team's current total CPU time.
927 */
928 bigtime_t
929 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
930 {
931 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
932 		+ dead_threads_user_time;
933 
934 	Thread* currentThread = thread_get_current_thread();
935 	bigtime_t now = system_time();
936 
937 	for (Thread* thread = thread_list; thread != NULL;
938 			thread = thread->team_next) {
939 		bool alreadyLocked = thread == lockedThread;
940 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
941 		time += thread->kernel_time + thread->user_time;
942 
943 		if (thread->last_time != 0) {
944 			if (!ignoreCurrentRun || thread != currentThread)
945 				time += now - thread->last_time;
946 		}
947 
948 		if (alreadyLocked)
949 			threadTimeLocker.Detach();
950 	}
951 
952 	return time;
953 }
954 
955 
956 /*!	Returns the team's current user CPU time.
957 
958 	The caller must hold \c time_lock.
959 
960 	\return The team's current user CPU time.
961 */
962 bigtime_t
963 Team::UserCPUTime() const
964 {
965 	bigtime_t time = dead_threads_user_time;
966 
967 	bigtime_t now = system_time();
968 
969 	for (Thread* thread = thread_list; thread != NULL;
970 			thread = thread->team_next) {
971 		SpinLocker threadTimeLocker(thread->time_lock);
972 		time += thread->user_time;
973 
974 		if (thread->last_time != 0 && !thread->in_kernel)
975 			time += now - thread->last_time;
976 	}
977 
978 	return time;
979 }
980 
981 
982 //	#pragma mark - ProcessGroup
983 
984 
985 ProcessGroup::ProcessGroup(pid_t id)
986 	:
987 	id(id),
988 	teams(NULL),
989 	fSession(NULL),
990 	fInOrphanedCheckList(false)
991 {
992 	char lockName[32];
993 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
994 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
995 }
996 
997 
998 ProcessGroup::~ProcessGroup()
999 {
1000 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1001 
1002 	// If the group is in the orphaned check list, remove it.
1003 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1004 
1005 	if (fInOrphanedCheckList)
1006 		sOrphanedCheckProcessGroups.Remove(this);
1007 
1008 	orphanedCheckLocker.Unlock();
1009 
1010 	// remove group from the hash table and from the session
1011 	if (fSession != NULL) {
1012 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1013 		sGroupHash.RemoveUnchecked(this);
1014 		groupHashLocker.Unlock();
1015 
1016 		fSession->ReleaseReference();
1017 	}
1018 
1019 	mutex_destroy(&fLock);
1020 }
1021 
1022 
1023 /*static*/ ProcessGroup*
1024 ProcessGroup::Get(pid_t id)
1025 {
1026 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1027 	ProcessGroup* group = sGroupHash.Lookup(id);
1028 	if (group != NULL)
1029 		group->AcquireReference();
1030 	return group;
1031 }
1032 
1033 
1034 /*!	Adds the group the given session and makes it publicly accessible.
1035 	The caller must not hold the process group hash lock.
1036 */
1037 void
1038 ProcessGroup::Publish(ProcessSession* session)
1039 {
1040 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1041 	PublishLocked(session);
1042 }
1043 
1044 
1045 /*!	Adds the group to the given session and makes it publicly accessible.
1046 	The caller must hold the process group hash lock.
1047 */
1048 void
1049 ProcessGroup::PublishLocked(ProcessSession* session)
1050 {
1051 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1052 
1053 	fSession = session;
1054 	fSession->AcquireReference();
1055 
1056 	sGroupHash.InsertUnchecked(this);
1057 }
1058 
1059 
1060 /*!	Checks whether the process group is orphaned.
1061 	The caller must hold the group's lock.
1062 	\return \c true, if the group is orphaned, \c false otherwise.
1063 */
1064 bool
1065 ProcessGroup::IsOrphaned() const
1066 {
1067 	// Orphaned Process Group: "A process group in which the parent of every
1068 	// member is either itself a member of the group or is not a member of the
1069 	// group's session." (Open Group Base Specs Issue 7)
1070 	bool orphaned = true;
1071 
1072 	Team* team = teams;
1073 	while (orphaned && team != NULL) {
1074 		team->LockTeamAndParent(false);
1075 
1076 		Team* parent = team->parent;
1077 		if (parent != NULL && parent->group_id != id
1078 			&& parent->session_id == fSession->id) {
1079 			orphaned = false;
1080 		}
1081 
1082 		team->UnlockTeamAndParent();
1083 
1084 		team = team->group_next;
1085 	}
1086 
1087 	return orphaned;
1088 }
1089 
1090 
1091 void
1092 ProcessGroup::ScheduleOrphanedCheck()
1093 {
1094 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1095 
1096 	if (!fInOrphanedCheckList) {
1097 		sOrphanedCheckProcessGroups.Add(this);
1098 		fInOrphanedCheckList = true;
1099 	}
1100 }
1101 
1102 
1103 void
1104 ProcessGroup::UnsetOrphanedCheck()
1105 {
1106 	fInOrphanedCheckList = false;
1107 }
1108 
1109 
1110 //	#pragma mark - ProcessSession
1111 
1112 
1113 ProcessSession::ProcessSession(pid_t id)
1114 	:
1115 	id(id),
1116 	controlling_tty(-1),
1117 	foreground_group(-1)
1118 {
1119 	char lockName[32];
1120 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1121 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1122 }
1123 
1124 
1125 ProcessSession::~ProcessSession()
1126 {
1127 	mutex_destroy(&fLock);
1128 }
1129 
1130 
1131 //	#pragma mark - KDL functions
1132 
1133 
1134 static void
1135 _dump_team_info(Team* team)
1136 {
1137 	kprintf("TEAM: %p\n", team);
1138 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1139 		team->id);
1140 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1141 	kprintf("name:             '%s'\n", team->Name());
1142 	kprintf("args:             '%s'\n", team->Args());
1143 	kprintf("hash_next:        %p\n", team->hash_next);
1144 	kprintf("parent:           %p", team->parent);
1145 	if (team->parent != NULL) {
1146 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1147 	} else
1148 		kprintf("\n");
1149 
1150 	kprintf("children:         %p\n", team->children);
1151 	kprintf("num_threads:      %d\n", team->num_threads);
1152 	kprintf("state:            %d\n", team->state);
1153 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1154 	kprintf("io_context:       %p\n", team->io_context);
1155 	if (team->address_space)
1156 		kprintf("address_space:    %p\n", team->address_space);
1157 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1158 		(void*)team->user_data, team->user_data_area);
1159 	kprintf("free user thread: %p\n", team->free_user_threads);
1160 	kprintf("main_thread:      %p\n", team->main_thread);
1161 	kprintf("thread_list:      %p\n", team->thread_list);
1162 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1163 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1164 }
1165 
1166 
1167 static int
1168 dump_team_info(int argc, char** argv)
1169 {
1170 	ulong arg;
1171 	bool found = false;
1172 
1173 	if (argc < 2) {
1174 		Thread* thread = thread_get_current_thread();
1175 		if (thread != NULL && thread->team != NULL)
1176 			_dump_team_info(thread->team);
1177 		else
1178 			kprintf("No current team!\n");
1179 		return 0;
1180 	}
1181 
1182 	arg = strtoul(argv[1], NULL, 0);
1183 	if (IS_KERNEL_ADDRESS(arg)) {
1184 		// semi-hack
1185 		_dump_team_info((Team*)arg);
1186 		return 0;
1187 	}
1188 
1189 	// walk through the thread list, trying to match name or id
1190 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1191 		Team* team = it.Next();) {
1192 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1193 			|| team->id == (team_id)arg) {
1194 			_dump_team_info(team);
1195 			found = true;
1196 			break;
1197 		}
1198 	}
1199 
1200 	if (!found)
1201 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1202 	return 0;
1203 }
1204 
1205 
1206 static int
1207 dump_teams(int argc, char** argv)
1208 {
1209 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1210 		B_PRINTF_POINTER_WIDTH, "parent");
1211 
1212 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1213 		Team* team = it.Next();) {
1214 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 
1221 //	#pragma mark - Private functions
1222 
1223 
1224 /*!	Inserts team \a team into the child list of team \a parent.
1225 
1226 	The caller must hold the lock of both \a parent and \a team.
1227 
1228 	\param parent The parent team.
1229 	\param team The team to be inserted into \a parent's child list.
1230 */
1231 static void
1232 insert_team_into_parent(Team* parent, Team* team)
1233 {
1234 	ASSERT(parent != NULL);
1235 
1236 	team->siblings_next = parent->children;
1237 	parent->children = team;
1238 	team->parent = parent;
1239 }
1240 
1241 
1242 /*!	Removes team \a team from the child list of team \a parent.
1243 
1244 	The caller must hold the lock of both \a parent and \a team.
1245 
1246 	\param parent The parent team.
1247 	\param team The team to be removed from \a parent's child list.
1248 */
1249 static void
1250 remove_team_from_parent(Team* parent, Team* team)
1251 {
1252 	Team* child;
1253 	Team* last = NULL;
1254 
1255 	for (child = parent->children; child != NULL;
1256 			child = child->siblings_next) {
1257 		if (child == team) {
1258 			if (last == NULL)
1259 				parent->children = child->siblings_next;
1260 			else
1261 				last->siblings_next = child->siblings_next;
1262 
1263 			team->parent = NULL;
1264 			break;
1265 		}
1266 		last = child;
1267 	}
1268 }
1269 
1270 
1271 /*!	Returns whether the given team is a session leader.
1272 	The caller must hold the team's lock or its process group's lock.
1273 */
1274 static bool
1275 is_session_leader(Team* team)
1276 {
1277 	return team->session_id == team->id;
1278 }
1279 
1280 
1281 /*!	Returns whether the given team is a process group leader.
1282 	The caller must hold the team's lock or its process group's lock.
1283 */
1284 static bool
1285 is_process_group_leader(Team* team)
1286 {
1287 	return team->group_id == team->id;
1288 }
1289 
1290 
1291 /*!	Inserts the given team into the given process group.
1292 	The caller must hold the process group's lock, the team's lock, and the
1293 	team's parent's lock.
1294 */
1295 static void
1296 insert_team_into_group(ProcessGroup* group, Team* team)
1297 {
1298 	team->group = group;
1299 	team->group_id = group->id;
1300 	team->session_id = group->Session()->id;
1301 
1302 	team->group_next = group->teams;
1303 	group->teams = team;
1304 	group->AcquireReference();
1305 }
1306 
1307 
1308 /*!	Removes the given team from its process group.
1309 
1310 	The caller must hold the process group's lock, the team's lock, and the
1311 	team's parent's lock. Interrupts must be enabled.
1312 
1313 	\param team The team that'll be removed from its process group.
1314 */
1315 static void
1316 remove_team_from_group(Team* team)
1317 {
1318 	ProcessGroup* group = team->group;
1319 	Team* current;
1320 	Team* last = NULL;
1321 
1322 	// the team must be in a process group to let this function have any effect
1323 	if  (group == NULL)
1324 		return;
1325 
1326 	for (current = group->teams; current != NULL;
1327 			current = current->group_next) {
1328 		if (current == team) {
1329 			if (last == NULL)
1330 				group->teams = current->group_next;
1331 			else
1332 				last->group_next = current->group_next;
1333 
1334 			team->group = NULL;
1335 			break;
1336 		}
1337 		last = current;
1338 	}
1339 
1340 	team->group = NULL;
1341 	team->group_next = NULL;
1342 
1343 	group->ReleaseReference();
1344 }
1345 
1346 
1347 static status_t
1348 create_team_user_data(Team* team, void* exactAddress = NULL)
1349 {
1350 	void* address;
1351 	uint32 addressSpec;
1352 
1353 	if (exactAddress != NULL) {
1354 		address = exactAddress;
1355 		addressSpec = B_EXACT_ADDRESS;
1356 	} else {
1357 		address = (void*)KERNEL_USER_DATA_BASE;
1358 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1359 	}
1360 
1361 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1362 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1363 
1364 	virtual_address_restrictions virtualRestrictions = {};
1365 	if (result == B_OK || exactAddress != NULL) {
1366 		if (exactAddress != NULL)
1367 			virtualRestrictions.address = exactAddress;
1368 		else
1369 			virtualRestrictions.address = address;
1370 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1371 	} else {
1372 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1373 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1374 	}
1375 
1376 	physical_address_restrictions physicalRestrictions = {};
1377 	team->user_data_area = create_area_etc(team->id, "user area",
1378 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1379 		&virtualRestrictions, &physicalRestrictions, &address);
1380 	if (team->user_data_area < 0)
1381 		return team->user_data_area;
1382 
1383 	team->user_data = (addr_t)address;
1384 	team->used_user_data = 0;
1385 	team->user_data_size = kTeamUserDataInitialSize;
1386 	team->free_user_threads = NULL;
1387 
1388 	return B_OK;
1389 }
1390 
1391 
1392 static void
1393 delete_team_user_data(Team* team)
1394 {
1395 	if (team->user_data_area >= 0) {
1396 		vm_delete_area(team->id, team->user_data_area, true);
1397 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1398 			kTeamUserDataReservedSize);
1399 
1400 		team->user_data = 0;
1401 		team->used_user_data = 0;
1402 		team->user_data_size = 0;
1403 		team->user_data_area = -1;
1404 		while (free_user_thread* entry = team->free_user_threads) {
1405 			team->free_user_threads = entry->next;
1406 			free(entry);
1407 		}
1408 	}
1409 }
1410 
1411 
1412 static status_t
1413 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1414 	int32 argCount, int32 envCount, char**& _flatArgs)
1415 {
1416 	if (argCount < 0 || envCount < 0)
1417 		return B_BAD_VALUE;
1418 
1419 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1420 		return B_TOO_MANY_ARGS;
1421 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1422 		return B_BAD_VALUE;
1423 
1424 	if (!IS_USER_ADDRESS(userFlatArgs))
1425 		return B_BAD_ADDRESS;
1426 
1427 	// allocate kernel memory
1428 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1429 	if (flatArgs == NULL)
1430 		return B_NO_MEMORY;
1431 
1432 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1433 		free(flatArgs);
1434 		return B_BAD_ADDRESS;
1435 	}
1436 
1437 	// check and relocate the array
1438 	status_t error = B_OK;
1439 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1440 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1441 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1442 		if (i == argCount || i == argCount + envCount + 1) {
1443 			// check array null termination
1444 			if (flatArgs[i] != NULL) {
1445 				error = B_BAD_VALUE;
1446 				break;
1447 			}
1448 		} else {
1449 			// check string
1450 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1451 			size_t maxLen = stringEnd - arg;
1452 			if (arg < stringBase || arg >= stringEnd
1453 					|| strnlen(arg, maxLen) == maxLen) {
1454 				error = B_BAD_VALUE;
1455 				break;
1456 			}
1457 
1458 			flatArgs[i] = arg;
1459 		}
1460 	}
1461 
1462 	if (error == B_OK)
1463 		_flatArgs = flatArgs;
1464 	else
1465 		free(flatArgs);
1466 
1467 	return error;
1468 }
1469 
1470 
1471 static void
1472 free_team_arg(struct team_arg* teamArg)
1473 {
1474 	if (teamArg != NULL) {
1475 		free(teamArg->flat_args);
1476 		free(teamArg->path);
1477 		free(teamArg);
1478 	}
1479 }
1480 
1481 
1482 static status_t
1483 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1484 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1485 	port_id port, uint32 token)
1486 {
1487 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1488 	if (teamArg == NULL)
1489 		return B_NO_MEMORY;
1490 
1491 	teamArg->path = strdup(path);
1492 	if (teamArg->path == NULL) {
1493 		free(teamArg);
1494 		return B_NO_MEMORY;
1495 	}
1496 
1497 	// copy the args over
1498 	teamArg->flat_args = flatArgs;
1499 	teamArg->flat_args_size = flatArgsSize;
1500 	teamArg->arg_count = argCount;
1501 	teamArg->env_count = envCount;
1502 	teamArg->flags = 0;
1503 	teamArg->umask = umask;
1504 	teamArg->error_port = port;
1505 	teamArg->error_token = token;
1506 
1507 	// determine the flags from the environment
1508 	const char* const* env = flatArgs + argCount + 1;
1509 	for (int32 i = 0; i < envCount; i++) {
1510 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1511 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1512 			break;
1513 		}
1514 	}
1515 
1516 	*_teamArg = teamArg;
1517 	return B_OK;
1518 }
1519 
1520 
1521 static status_t
1522 team_create_thread_start_internal(void* args)
1523 {
1524 	status_t err;
1525 	Thread* thread;
1526 	Team* team;
1527 	struct team_arg* teamArgs = (struct team_arg*)args;
1528 	const char* path;
1529 	addr_t entry;
1530 	char** userArgs;
1531 	char** userEnv;
1532 	struct user_space_program_args* programArgs;
1533 	uint32 argCount, envCount;
1534 
1535 	thread = thread_get_current_thread();
1536 	team = thread->team;
1537 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1538 
1539 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1540 		thread->id));
1541 
1542 	// Main stack area layout is currently as follows (starting from 0):
1543 	//
1544 	// size								| usage
1545 	// ---------------------------------+--------------------------------
1546 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1547 	// TLS_SIZE							| TLS data
1548 	// sizeof(user_space_program_args)	| argument structure for the runtime
1549 	//									| loader
1550 	// flat arguments size				| flat process arguments and environment
1551 
1552 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1553 	// the heap
1554 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1555 
1556 	argCount = teamArgs->arg_count;
1557 	envCount = teamArgs->env_count;
1558 
1559 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1560 		+ thread->user_stack_size + TLS_SIZE);
1561 
1562 	userArgs = (char**)(programArgs + 1);
1563 	userEnv = userArgs + argCount + 1;
1564 	path = teamArgs->path;
1565 
1566 	if (user_strlcpy(programArgs->program_path, path,
1567 				sizeof(programArgs->program_path)) < B_OK
1568 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1569 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1570 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1571 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1572 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1573 				sizeof(port_id)) < B_OK
1574 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1575 				sizeof(uint32)) < B_OK
1576 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1577 		|| user_memcpy(userArgs, teamArgs->flat_args,
1578 				teamArgs->flat_args_size) < B_OK) {
1579 		// the team deletion process will clean this mess
1580 		free_team_arg(teamArgs);
1581 		return B_BAD_ADDRESS;
1582 	}
1583 
1584 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1585 
1586 	// set team args and update state
1587 	team->Lock();
1588 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1589 	team->state = TEAM_STATE_NORMAL;
1590 	team->Unlock();
1591 
1592 	free_team_arg(teamArgs);
1593 		// the arguments are already on the user stack, we no longer need
1594 		// them in this form
1595 
1596 	// Clone commpage area
1597 	area_id commPageArea = clone_commpage_area(team->id,
1598 		&team->commpage_address);
1599 	if (commPageArea  < B_OK) {
1600 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1601 			strerror(commPageArea)));
1602 		return commPageArea;
1603 	}
1604 
1605 	// Register commpage image
1606 	image_id commPageImage = get_commpage_image();
1607 	extended_image_info imageInfo;
1608 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1609 	if (err != B_OK) {
1610 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1611 			strerror(err)));
1612 		return err;
1613 	}
1614 	imageInfo.basic_info.text = team->commpage_address;
1615 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1616 	imageInfo.symbol_table = NULL;
1617 	imageInfo.symbol_hash = NULL;
1618 	imageInfo.string_table = NULL;
1619 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1620 	if (image < 0) {
1621 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1622 			strerror(image)));
1623 		return image;
1624 	}
1625 
1626 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1627 	// automatic variables with function scope will never be destroyed.
1628 	{
1629 		// find runtime_loader path
1630 		KPath runtimeLoaderPath;
1631 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1632 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1633 		if (err < B_OK) {
1634 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1635 				strerror(err)));
1636 			return err;
1637 		}
1638 		runtimeLoaderPath.UnlockBuffer();
1639 		err = runtimeLoaderPath.Append("runtime_loader");
1640 
1641 		if (err == B_OK) {
1642 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1643 				&entry);
1644 		}
1645 	}
1646 
1647 	if (err < B_OK) {
1648 		// Luckily, we don't have to clean up the mess we created - that's
1649 		// done for us by the normal team deletion process
1650 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1651 			"%s\n", strerror(err)));
1652 		return err;
1653 	}
1654 
1655 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1656 
1657 	// enter userspace -- returns only in case of error
1658 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1659 		programArgs, team->commpage_address);
1660 }
1661 
1662 
1663 static status_t
1664 team_create_thread_start(void* args)
1665 {
1666 	team_create_thread_start_internal(args);
1667 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1668 	thread_exit();
1669 		// does not return
1670 	return B_OK;
1671 }
1672 
1673 
1674 static thread_id
1675 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1676 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1677 	port_id errorPort, uint32 errorToken)
1678 {
1679 	char** flatArgs = _flatArgs;
1680 	thread_id thread;
1681 	status_t status;
1682 	struct team_arg* teamArgs;
1683 	struct team_loading_info loadingInfo;
1684 	io_context* parentIOContext = NULL;
1685 	team_id teamID;
1686 
1687 	if (flatArgs == NULL || argCount == 0)
1688 		return B_BAD_VALUE;
1689 
1690 	const char* path = flatArgs[0];
1691 
1692 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1693 		"\n", path, flatArgs, argCount));
1694 
1695 	// cut the path from the main thread name
1696 	const char* threadName = strrchr(path, '/');
1697 	if (threadName != NULL)
1698 		threadName++;
1699 	else
1700 		threadName = path;
1701 
1702 	// create the main thread object
1703 	Thread* mainThread;
1704 	status = Thread::Create(threadName, mainThread);
1705 	if (status != B_OK)
1706 		return status;
1707 	BReference<Thread> mainThreadReference(mainThread, true);
1708 
1709 	// create team object
1710 	Team* team = Team::Create(mainThread->id, path, false);
1711 	if (team == NULL)
1712 		return B_NO_MEMORY;
1713 	BReference<Team> teamReference(team, true);
1714 
1715 	if (flags & B_WAIT_TILL_LOADED) {
1716 		loadingInfo.thread = thread_get_current_thread();
1717 		loadingInfo.result = B_ERROR;
1718 		loadingInfo.done = false;
1719 		team->loading_info = &loadingInfo;
1720 	}
1721 
1722 	// get the parent team
1723 	Team* parent = Team::Get(parentID);
1724 	if (parent == NULL)
1725 		return B_BAD_TEAM_ID;
1726 	BReference<Team> parentReference(parent, true);
1727 
1728 	parent->LockTeamAndProcessGroup();
1729 	team->Lock();
1730 
1731 	// inherit the parent's user/group
1732 	inherit_parent_user_and_group(team, parent);
1733 
1734  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1735 
1736 	sTeamHash.Insert(team);
1737 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1738 	if (!teamLimitReached)
1739 		sUsedTeams++;
1740 
1741 	teamsLocker.Unlock();
1742 
1743 	insert_team_into_parent(parent, team);
1744 	insert_team_into_group(parent->group, team);
1745 
1746 	// get a reference to the parent's I/O context -- we need it to create ours
1747 	parentIOContext = parent->io_context;
1748 	vfs_get_io_context(parentIOContext);
1749 
1750 	team->Unlock();
1751 	parent->UnlockTeamAndProcessGroup();
1752 
1753 	// notify team listeners
1754 	sNotificationService.Notify(TEAM_ADDED, team);
1755 
1756 	// check the executable's set-user/group-id permission
1757 	update_set_id_user_and_group(team, path);
1758 
1759 	if (teamLimitReached) {
1760 		status = B_NO_MORE_TEAMS;
1761 		goto err1;
1762 	}
1763 
1764 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1765 		envCount, (mode_t)-1, errorPort, errorToken);
1766 	if (status != B_OK)
1767 		goto err1;
1768 
1769 	_flatArgs = NULL;
1770 		// args are owned by the team_arg structure now
1771 
1772 	// create a new io_context for this team
1773 	team->io_context = vfs_new_io_context(parentIOContext, true);
1774 	if (!team->io_context) {
1775 		status = B_NO_MEMORY;
1776 		goto err2;
1777 	}
1778 
1779 	// We don't need the parent's I/O context any longer.
1780 	vfs_put_io_context(parentIOContext);
1781 	parentIOContext = NULL;
1782 
1783 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1784 	vfs_exec_io_context(team->io_context);
1785 
1786 	// create an address space for this team
1787 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1788 		&team->address_space);
1789 	if (status != B_OK)
1790 		goto err2;
1791 
1792 	team->address_space->SetRandomizingEnabled(
1793 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1794 
1795 	// create the user data area
1796 	status = create_team_user_data(team);
1797 	if (status != B_OK)
1798 		goto err4;
1799 
1800 	// In case we start the main thread, we shouldn't access the team object
1801 	// afterwards, so cache the team's ID.
1802 	teamID = team->id;
1803 
1804 	// Create a kernel thread, but under the context of the new team
1805 	// The new thread will take over ownership of teamArgs.
1806 	{
1807 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1808 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1809 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1810 			+ teamArgs->flat_args_size;
1811 		thread = thread_create_thread(threadAttributes, false);
1812 		if (thread < 0) {
1813 			status = thread;
1814 			goto err5;
1815 		}
1816 	}
1817 
1818 	// The team has been created successfully, so we keep the reference. Or
1819 	// more precisely: It's owned by the team's main thread, now.
1820 	teamReference.Detach();
1821 
1822 	// wait for the loader of the new team to finish its work
1823 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1824 		if (mainThread != NULL) {
1825 			// resume the team's main thread
1826 			thread_continue(mainThread);
1827 		}
1828 
1829 		// Now suspend ourselves until loading is finished. We will be woken
1830 		// either by the thread, when it finished or aborted loading, or when
1831 		// the team is going to die (e.g. is killed). In either case the one
1832 		// setting `loadingInfo.done' is responsible for removing the info from
1833 		// the team structure.
1834 		while (!loadingInfo.done)
1835 			thread_suspend();
1836 
1837 		if (loadingInfo.result < B_OK)
1838 			return loadingInfo.result;
1839 	}
1840 
1841 	// notify the debugger
1842 	user_debug_team_created(teamID);
1843 
1844 	return thread;
1845 
1846 err5:
1847 	delete_team_user_data(team);
1848 err4:
1849 	team->address_space->Put();
1850 err2:
1851 	free_team_arg(teamArgs);
1852 err1:
1853 	if (parentIOContext != NULL)
1854 		vfs_put_io_context(parentIOContext);
1855 
1856 	// Remove the team structure from the process group, the parent team, and
1857 	// the team hash table and delete the team structure.
1858 	parent->LockTeamAndProcessGroup();
1859 	team->Lock();
1860 
1861 	remove_team_from_group(team);
1862 	remove_team_from_parent(team->parent, team);
1863 
1864 	team->Unlock();
1865 	parent->UnlockTeamAndProcessGroup();
1866 
1867 	teamsLocker.Lock();
1868 	sTeamHash.Remove(team);
1869 	if (!teamLimitReached)
1870 		sUsedTeams--;
1871 	teamsLocker.Unlock();
1872 
1873 	sNotificationService.Notify(TEAM_REMOVED, team);
1874 
1875 	return status;
1876 }
1877 
1878 
1879 /*!	Almost shuts down the current team and loads a new image into it.
1880 	If successful, this function does not return and will takeover ownership of
1881 	the arguments provided.
1882 	This function may only be called in a userland team (caused by one of the
1883 	exec*() syscalls).
1884 */
1885 static status_t
1886 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1887 	int32 argCount, int32 envCount, mode_t umask)
1888 {
1889 	// NOTE: Since this function normally doesn't return, don't use automatic
1890 	// variables that need destruction in the function scope.
1891 	char** flatArgs = _flatArgs;
1892 	Team* team = thread_get_current_thread()->team;
1893 	struct team_arg* teamArgs;
1894 	const char* threadName;
1895 	thread_id nubThreadID = -1;
1896 
1897 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1898 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1899 		team->id));
1900 
1901 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1902 
1903 	// switching the kernel at run time is probably not a good idea :)
1904 	if (team == team_get_kernel_team())
1905 		return B_NOT_ALLOWED;
1906 
1907 	// we currently need to be single threaded here
1908 	// TODO: maybe we should just kill all other threads and
1909 	//	make the current thread the team's main thread?
1910 	Thread* currentThread = thread_get_current_thread();
1911 	if (currentThread != team->main_thread)
1912 		return B_NOT_ALLOWED;
1913 
1914 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1915 	// We iterate through the thread list to make sure that there's no other
1916 	// thread.
1917 	TeamLocker teamLocker(team);
1918 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1919 
1920 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1921 		nubThreadID = team->debug_info.nub_thread;
1922 
1923 	debugInfoLocker.Unlock();
1924 
1925 	for (Thread* thread = team->thread_list; thread != NULL;
1926 			thread = thread->team_next) {
1927 		if (thread != team->main_thread && thread->id != nubThreadID)
1928 			return B_NOT_ALLOWED;
1929 	}
1930 
1931 	team->DeleteUserTimers(true);
1932 	team->ResetSignalsOnExec();
1933 
1934 	teamLocker.Unlock();
1935 
1936 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1937 		argCount, envCount, umask, -1, 0);
1938 	if (status != B_OK)
1939 		return status;
1940 
1941 	_flatArgs = NULL;
1942 		// args are owned by the team_arg structure now
1943 
1944 	// TODO: remove team resources if there are any left
1945 	// thread_atkernel_exit() might not be called at all
1946 
1947 	thread_reset_for_exec();
1948 
1949 	user_debug_prepare_for_exec();
1950 
1951 	delete_team_user_data(team);
1952 	vm_delete_areas(team->address_space, false);
1953 	xsi_sem_undo(team);
1954 	delete_owned_ports(team);
1955 	sem_delete_owned_sems(team);
1956 	remove_images(team);
1957 	vfs_exec_io_context(team->io_context);
1958 	delete_realtime_sem_context(team->realtime_sem_context);
1959 	team->realtime_sem_context = NULL;
1960 
1961 	// update ASLR
1962 	team->address_space->SetRandomizingEnabled(
1963 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1964 
1965 	status = create_team_user_data(team);
1966 	if (status != B_OK) {
1967 		// creating the user data failed -- we're toast
1968 		free_team_arg(teamArgs);
1969 		exit_thread(status);
1970 		return status;
1971 	}
1972 
1973 	user_debug_finish_after_exec();
1974 
1975 	// rename the team
1976 
1977 	team->Lock();
1978 	team->SetName(path);
1979 	team->Unlock();
1980 
1981 	// cut the path from the team name and rename the main thread, too
1982 	threadName = strrchr(path, '/');
1983 	if (threadName != NULL)
1984 		threadName++;
1985 	else
1986 		threadName = path;
1987 	rename_thread(thread_get_current_thread_id(), threadName);
1988 
1989 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1990 
1991 	// Update user/group according to the executable's set-user/group-id
1992 	// permission.
1993 	update_set_id_user_and_group(team, path);
1994 
1995 	user_debug_team_exec();
1996 
1997 	// notify team listeners
1998 	sNotificationService.Notify(TEAM_EXEC, team);
1999 
2000 	// get a user thread for the thread
2001 	user_thread* userThread = team_allocate_user_thread(team);
2002 		// cannot fail (the allocation for the team would have failed already)
2003 	ThreadLocker currentThreadLocker(currentThread);
2004 	currentThread->user_thread = userThread;
2005 	currentThreadLocker.Unlock();
2006 
2007 	// create the user stack for the thread
2008 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2009 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2010 	if (status == B_OK) {
2011 		// prepare the stack, load the runtime loader, and enter userspace
2012 		team_create_thread_start(teamArgs);
2013 			// does never return
2014 	} else
2015 		free_team_arg(teamArgs);
2016 
2017 	// Sorry, we have to kill ourselves, there is no way out anymore
2018 	// (without any areas left and all that).
2019 	exit_thread(status);
2020 
2021 	// We return a status here since the signal that is sent by the
2022 	// call above is not immediately handled.
2023 	return B_ERROR;
2024 }
2025 
2026 
2027 static thread_id
2028 fork_team(void)
2029 {
2030 	Thread* parentThread = thread_get_current_thread();
2031 	Team* parentTeam = parentThread->team;
2032 	Team* team;
2033 	arch_fork_arg* forkArgs;
2034 	struct area_info info;
2035 	thread_id threadID;
2036 	status_t status;
2037 	ssize_t areaCookie;
2038 
2039 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2040 
2041 	if (parentTeam == team_get_kernel_team())
2042 		return B_NOT_ALLOWED;
2043 
2044 	// create a new team
2045 	// TODO: this is very similar to load_image_internal() - maybe we can do
2046 	// something about it :)
2047 
2048 	// create the main thread object
2049 	Thread* thread;
2050 	status = Thread::Create(parentThread->name, thread);
2051 	if (status != B_OK)
2052 		return status;
2053 	BReference<Thread> threadReference(thread, true);
2054 
2055 	// create the team object
2056 	team = Team::Create(thread->id, NULL, false);
2057 	if (team == NULL)
2058 		return B_NO_MEMORY;
2059 
2060 	parentTeam->LockTeamAndProcessGroup();
2061 	team->Lock();
2062 
2063 	team->SetName(parentTeam->Name());
2064 	team->SetArgs(parentTeam->Args());
2065 
2066 	team->commpage_address = parentTeam->commpage_address;
2067 
2068 	// Inherit the parent's user/group.
2069 	inherit_parent_user_and_group(team, parentTeam);
2070 
2071 	// inherit signal handlers
2072 	team->InheritSignalActions(parentTeam);
2073 
2074 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2075 
2076 	sTeamHash.Insert(team);
2077 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2078 	if (!teamLimitReached)
2079 		sUsedTeams++;
2080 
2081 	teamsLocker.Unlock();
2082 
2083 	insert_team_into_parent(parentTeam, team);
2084 	insert_team_into_group(parentTeam->group, team);
2085 
2086 	team->Unlock();
2087 	parentTeam->UnlockTeamAndProcessGroup();
2088 
2089 	// notify team listeners
2090 	sNotificationService.Notify(TEAM_ADDED, team);
2091 
2092 	// inherit some team debug flags
2093 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2094 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2095 
2096 	if (teamLimitReached) {
2097 		status = B_NO_MORE_TEAMS;
2098 		goto err1;
2099 	}
2100 
2101 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2102 	if (forkArgs == NULL) {
2103 		status = B_NO_MEMORY;
2104 		goto err1;
2105 	}
2106 
2107 	// create a new io_context for this team
2108 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2109 	if (!team->io_context) {
2110 		status = B_NO_MEMORY;
2111 		goto err2;
2112 	}
2113 
2114 	// duplicate the realtime sem context
2115 	if (parentTeam->realtime_sem_context) {
2116 		team->realtime_sem_context = clone_realtime_sem_context(
2117 			parentTeam->realtime_sem_context);
2118 		if (team->realtime_sem_context == NULL) {
2119 			status = B_NO_MEMORY;
2120 			goto err2;
2121 		}
2122 	}
2123 
2124 	// create an address space for this team
2125 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2126 		&team->address_space);
2127 	if (status < B_OK)
2128 		goto err3;
2129 
2130 	// copy all areas of the team
2131 	// TODO: should be able to handle stack areas differently (ie. don't have
2132 	// them copy-on-write)
2133 
2134 	areaCookie = 0;
2135 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2136 		if (info.area == parentTeam->user_data_area) {
2137 			// don't clone the user area; just create a new one
2138 			status = create_team_user_data(team, info.address);
2139 			if (status != B_OK)
2140 				break;
2141 
2142 			thread->user_thread = team_allocate_user_thread(team);
2143 		} else {
2144 			void* address;
2145 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2146 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2147 			if (area < B_OK) {
2148 				status = area;
2149 				break;
2150 			}
2151 
2152 			if (info.area == parentThread->user_stack_area)
2153 				thread->user_stack_area = area;
2154 		}
2155 	}
2156 
2157 	if (status < B_OK)
2158 		goto err4;
2159 
2160 	if (thread->user_thread == NULL) {
2161 #if KDEBUG
2162 		panic("user data area not found, parent area is %" B_PRId32,
2163 			parentTeam->user_data_area);
2164 #endif
2165 		status = B_ERROR;
2166 		goto err4;
2167 	}
2168 
2169 	thread->user_stack_base = parentThread->user_stack_base;
2170 	thread->user_stack_size = parentThread->user_stack_size;
2171 	thread->user_local_storage = parentThread->user_local_storage;
2172 	thread->sig_block_mask = parentThread->sig_block_mask;
2173 	thread->signal_stack_base = parentThread->signal_stack_base;
2174 	thread->signal_stack_size = parentThread->signal_stack_size;
2175 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2176 
2177 	arch_store_fork_frame(forkArgs);
2178 
2179 	// copy image list
2180 	if (copy_images(parentTeam->id, team) != B_OK)
2181 		goto err5;
2182 
2183 	// create the main thread
2184 	{
2185 		ThreadCreationAttributes threadCreationAttributes(NULL,
2186 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2187 		threadCreationAttributes.forkArgs = forkArgs;
2188 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2189 		threadID = thread_create_thread(threadCreationAttributes, false);
2190 		if (threadID < 0) {
2191 			status = threadID;
2192 			goto err5;
2193 		}
2194 	}
2195 
2196 	// notify the debugger
2197 	user_debug_team_created(team->id);
2198 
2199 	T(TeamForked(threadID));
2200 
2201 	resume_thread(threadID);
2202 	return threadID;
2203 
2204 err5:
2205 	remove_images(team);
2206 err4:
2207 	team->address_space->RemoveAndPut();
2208 err3:
2209 	delete_realtime_sem_context(team->realtime_sem_context);
2210 err2:
2211 	free(forkArgs);
2212 err1:
2213 	// Remove the team structure from the process group, the parent team, and
2214 	// the team hash table and delete the team structure.
2215 	parentTeam->LockTeamAndProcessGroup();
2216 	team->Lock();
2217 
2218 	remove_team_from_group(team);
2219 	remove_team_from_parent(team->parent, team);
2220 
2221 	team->Unlock();
2222 	parentTeam->UnlockTeamAndProcessGroup();
2223 
2224 	teamsLocker.Lock();
2225 	sTeamHash.Remove(team);
2226 	if (!teamLimitReached)
2227 		sUsedTeams--;
2228 	teamsLocker.Unlock();
2229 
2230 	sNotificationService.Notify(TEAM_REMOVED, team);
2231 
2232 	team->ReleaseReference();
2233 
2234 	return status;
2235 }
2236 
2237 
2238 /*!	Returns if the specified team \a parent has any children belonging to the
2239 	process group with the specified ID \a groupID.
2240 	The caller must hold \a parent's lock.
2241 */
2242 static bool
2243 has_children_in_group(Team* parent, pid_t groupID)
2244 {
2245 	for (Team* child = parent->children; child != NULL;
2246 			child = child->siblings_next) {
2247 		TeamLocker childLocker(child);
2248 		if (child->group_id == groupID)
2249 			return true;
2250 	}
2251 
2252 	return false;
2253 }
2254 
2255 
2256 /*!	Returns the first job control entry from \a children, which matches \a id.
2257 	\a id can be:
2258 	- \code > 0 \endcode: Matching an entry with that team ID.
2259 	- \code == -1 \endcode: Matching any entry.
2260 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2261 	\c 0 is an invalid value for \a id.
2262 
2263 	The caller must hold the lock of the team that \a children belongs to.
2264 
2265 	\param children The job control entry list to check.
2266 	\param id The match criterion.
2267 	\return The first matching entry or \c NULL, if none matches.
2268 */
2269 static job_control_entry*
2270 get_job_control_entry(team_job_control_children& children, pid_t id)
2271 {
2272 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2273 		 job_control_entry* entry = it.Next();) {
2274 
2275 		if (id > 0) {
2276 			if (entry->thread == id)
2277 				return entry;
2278 		} else if (id == -1) {
2279 			return entry;
2280 		} else {
2281 			pid_t processGroup
2282 				= (entry->team ? entry->team->group_id : entry->group_id);
2283 			if (processGroup == -id)
2284 				return entry;
2285 		}
2286 	}
2287 
2288 	return NULL;
2289 }
2290 
2291 
2292 /*!	Returns the first job control entry from one of team's dead, continued, or
2293     stopped children which matches \a id.
2294 	\a id can be:
2295 	- \code > 0 \endcode: Matching an entry with that team ID.
2296 	- \code == -1 \endcode: Matching any entry.
2297 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2298 	\c 0 is an invalid value for \a id.
2299 
2300 	The caller must hold \a team's lock.
2301 
2302 	\param team The team whose dead, stopped, and continued child lists shall be
2303 		checked.
2304 	\param id The match criterion.
2305 	\param flags Specifies which children shall be considered. Dead children
2306 		always are. Stopped children are considered when \a flags is ORed
2307 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2308 		bitwise with \c WCONTINUED.
2309 	\return The first matching entry or \c NULL, if none matches.
2310 */
2311 static job_control_entry*
2312 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2313 {
2314 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2315 
2316 	if (entry == NULL && (flags & WCONTINUED) != 0)
2317 		entry = get_job_control_entry(team->continued_children, id);
2318 
2319 	if (entry == NULL && (flags & WUNTRACED) != 0)
2320 		entry = get_job_control_entry(team->stopped_children, id);
2321 
2322 	return entry;
2323 }
2324 
2325 
2326 job_control_entry::job_control_entry()
2327 	:
2328 	has_group_ref(false)
2329 {
2330 }
2331 
2332 
2333 job_control_entry::~job_control_entry()
2334 {
2335 	if (has_group_ref) {
2336 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2337 
2338 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2339 		if (group == NULL) {
2340 			panic("job_control_entry::~job_control_entry(): unknown group "
2341 				"ID: %" B_PRId32, group_id);
2342 			return;
2343 		}
2344 
2345 		groupHashLocker.Unlock();
2346 
2347 		group->ReleaseReference();
2348 	}
2349 }
2350 
2351 
2352 /*!	Invoked when the owning team is dying, initializing the entry according to
2353 	the dead state.
2354 
2355 	The caller must hold the owning team's lock and the scheduler lock.
2356 */
2357 void
2358 job_control_entry::InitDeadState()
2359 {
2360 	if (team != NULL) {
2361 		ASSERT(team->exit.initialized);
2362 
2363 		group_id = team->group_id;
2364 		team->group->AcquireReference();
2365 		has_group_ref = true;
2366 
2367 		thread = team->id;
2368 		status = team->exit.status;
2369 		reason = team->exit.reason;
2370 		signal = team->exit.signal;
2371 		signaling_user = team->exit.signaling_user;
2372 		user_time = team->dead_threads_user_time
2373 			+ team->dead_children.user_time;
2374 		kernel_time = team->dead_threads_kernel_time
2375 			+ team->dead_children.kernel_time;
2376 
2377 		team = NULL;
2378 	}
2379 }
2380 
2381 
2382 job_control_entry&
2383 job_control_entry::operator=(const job_control_entry& other)
2384 {
2385 	state = other.state;
2386 	thread = other.thread;
2387 	signal = other.signal;
2388 	has_group_ref = false;
2389 	signaling_user = other.signaling_user;
2390 	team = other.team;
2391 	group_id = other.group_id;
2392 	status = other.status;
2393 	reason = other.reason;
2394 	user_time = other.user_time;
2395 	kernel_time = other.kernel_time;
2396 
2397 	return *this;
2398 }
2399 
2400 
2401 /*! This is the kernel backend for waitid().
2402 */
2403 static thread_id
2404 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2405 	team_usage_info& _usage_info)
2406 {
2407 	Thread* thread = thread_get_current_thread();
2408 	Team* team = thread->team;
2409 	struct job_control_entry foundEntry;
2410 	struct job_control_entry* freeDeathEntry = NULL;
2411 	status_t status = B_OK;
2412 
2413 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2414 		child, flags));
2415 
2416 	T(WaitForChild(child, flags));
2417 
2418 	pid_t originalChild = child;
2419 
2420 	bool ignoreFoundEntries = false;
2421 	bool ignoreFoundEntriesChecked = false;
2422 
2423 	while (true) {
2424 		// lock the team
2425 		TeamLocker teamLocker(team);
2426 
2427 		// A 0 child argument means to wait for all children in the process
2428 		// group of the calling team.
2429 		child = originalChild == 0 ? -team->group_id : originalChild;
2430 
2431 		// check whether any condition holds
2432 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2433 
2434 		// If we don't have an entry yet, check whether there are any children
2435 		// complying to the process group specification at all.
2436 		if (entry == NULL) {
2437 			// No success yet -- check whether there are any children complying
2438 			// to the process group specification at all.
2439 			bool childrenExist = false;
2440 			if (child == -1) {
2441 				childrenExist = team->children != NULL;
2442 			} else if (child < -1) {
2443 				childrenExist = has_children_in_group(team, -child);
2444 			} else {
2445 				if (Team* childTeam = Team::Get(child)) {
2446 					BReference<Team> childTeamReference(childTeam, true);
2447 					TeamLocker childTeamLocker(childTeam);
2448 					childrenExist = childTeam->parent == team;
2449 				}
2450 			}
2451 
2452 			if (!childrenExist) {
2453 				// there is no child we could wait for
2454 				status = ECHILD;
2455 			} else {
2456 				// the children we're waiting for are still running
2457 				status = B_WOULD_BLOCK;
2458 			}
2459 		} else {
2460 			// got something
2461 			foundEntry = *entry;
2462 
2463 			// unless WNOWAIT has been specified, "consume" the wait state
2464 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2465 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2466 					// The child is dead. Reap its death entry.
2467 					freeDeathEntry = entry;
2468 					team->dead_children.entries.Remove(entry);
2469 					team->dead_children.count--;
2470 				} else {
2471 					// The child is well. Reset its job control state.
2472 					team_set_job_control_state(entry->team,
2473 						JOB_CONTROL_STATE_NONE, NULL);
2474 				}
2475 			}
2476 		}
2477 
2478 		// If we haven't got anything yet, prepare for waiting for the
2479 		// condition variable.
2480 		ConditionVariableEntry deadWaitEntry;
2481 
2482 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2483 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2484 
2485 		teamLocker.Unlock();
2486 
2487 		// we got our entry and can return to our caller
2488 		if (status == B_OK) {
2489 			if (ignoreFoundEntries) {
2490 				// ... unless we shall ignore found entries
2491 				delete freeDeathEntry;
2492 				freeDeathEntry = NULL;
2493 				continue;
2494 			}
2495 
2496 			break;
2497 		}
2498 
2499 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2500 			T(WaitForChildDone(status));
2501 			return status;
2502 		}
2503 
2504 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2505 		if (status == B_INTERRUPTED) {
2506 			T(WaitForChildDone(status));
2507 			return status;
2508 		}
2509 
2510 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2511 		// all our children are dead and fail with ECHILD. We check the
2512 		// condition at this point.
2513 		if (!ignoreFoundEntriesChecked) {
2514 			teamLocker.Lock();
2515 
2516 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2517 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2518 				|| handler.sa_handler == SIG_IGN) {
2519 				ignoreFoundEntries = true;
2520 			}
2521 
2522 			teamLocker.Unlock();
2523 
2524 			ignoreFoundEntriesChecked = true;
2525 		}
2526 	}
2527 
2528 	delete freeDeathEntry;
2529 
2530 	// When we got here, we have a valid death entry, and already got
2531 	// unregistered from the team or group. Fill in the returned info.
2532 	memset(&_info, 0, sizeof(_info));
2533 	_info.si_signo = SIGCHLD;
2534 	_info.si_pid = foundEntry.thread;
2535 	_info.si_uid = foundEntry.signaling_user;
2536 	// TODO: Fill in si_errno?
2537 
2538 	switch (foundEntry.state) {
2539 		case JOB_CONTROL_STATE_DEAD:
2540 			_info.si_code = foundEntry.reason;
2541 			_info.si_status = foundEntry.reason == CLD_EXITED
2542 				? foundEntry.status : foundEntry.signal;
2543 			_usage_info.user_time = foundEntry.user_time;
2544 			_usage_info.kernel_time = foundEntry.kernel_time;
2545 			break;
2546 		case JOB_CONTROL_STATE_STOPPED:
2547 			_info.si_code = CLD_STOPPED;
2548 			_info.si_status = foundEntry.signal;
2549 			break;
2550 		case JOB_CONTROL_STATE_CONTINUED:
2551 			_info.si_code = CLD_CONTINUED;
2552 			_info.si_status = 0;
2553 			break;
2554 		case JOB_CONTROL_STATE_NONE:
2555 			// can't happen
2556 			break;
2557 	}
2558 
2559 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2560 	// status is available.
2561 	TeamLocker teamLocker(team);
2562 	InterruptsSpinLocker signalLocker(team->signal_lock);
2563 	SpinLocker threadCreationLocker(gThreadCreationLock);
2564 
2565 	if (is_team_signal_blocked(team, SIGCHLD)) {
2566 		if (get_job_control_entry(team, child, flags) == NULL)
2567 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2568 	}
2569 
2570 	threadCreationLocker.Unlock();
2571 	signalLocker.Unlock();
2572 	teamLocker.Unlock();
2573 
2574 	// When the team is dead, the main thread continues to live in the kernel
2575 	// team for a very short time. To avoid surprises for the caller we rather
2576 	// wait until the thread is really gone.
2577 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2578 		wait_for_thread(foundEntry.thread, NULL);
2579 
2580 	T(WaitForChildDone(foundEntry));
2581 
2582 	return foundEntry.thread;
2583 }
2584 
2585 
2586 /*! Fills the team_info structure with information from the specified team.
2587 	Interrupts must be enabled. The team must not be locked.
2588 */
2589 static status_t
2590 fill_team_info(Team* team, team_info* info, size_t size)
2591 {
2592 	if (size != sizeof(team_info))
2593 		return B_BAD_VALUE;
2594 
2595 	// TODO: Set more informations for team_info
2596 	memset(info, 0, size);
2597 
2598 	info->team = team->id;
2599 		// immutable
2600 	info->image_count = count_images(team);
2601 		// protected by sImageMutex
2602 
2603 	TeamLocker teamLocker(team);
2604 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2605 
2606 	info->thread_count = team->num_threads;
2607 	//info->area_count =
2608 	info->debugger_nub_thread = team->debug_info.nub_thread;
2609 	info->debugger_nub_port = team->debug_info.nub_port;
2610 	info->uid = team->effective_uid;
2611 	info->gid = team->effective_gid;
2612 
2613 	strlcpy(info->args, team->Args(), sizeof(info->args));
2614 	info->argc = 1;
2615 
2616 	return B_OK;
2617 }
2618 
2619 
2620 /*!	Returns whether the process group contains stopped processes.
2621 	The caller must hold the process group's lock.
2622 */
2623 static bool
2624 process_group_has_stopped_processes(ProcessGroup* group)
2625 {
2626 	Team* team = group->teams;
2627 	while (team != NULL) {
2628 		// the parent team's lock guards the job control entry -- acquire it
2629 		team->LockTeamAndParent(false);
2630 
2631 		if (team->job_control_entry != NULL
2632 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2633 			team->UnlockTeamAndParent();
2634 			return true;
2635 		}
2636 
2637 		team->UnlockTeamAndParent();
2638 
2639 		team = team->group_next;
2640 	}
2641 
2642 	return false;
2643 }
2644 
2645 
2646 /*!	Iterates through all process groups queued in team_remove_team() and signals
2647 	those that are orphaned and have stopped processes.
2648 	The caller must not hold any team or process group locks.
2649 */
2650 static void
2651 orphaned_process_group_check()
2652 {
2653 	// process as long as there are groups in the list
2654 	while (true) {
2655 		// remove the head from the list
2656 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2657 
2658 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2659 		if (group == NULL)
2660 			return;
2661 
2662 		group->UnsetOrphanedCheck();
2663 		BReference<ProcessGroup> groupReference(group);
2664 
2665 		orphanedCheckLocker.Unlock();
2666 
2667 		AutoLocker<ProcessGroup> groupLocker(group);
2668 
2669 		// If the group is orphaned and contains stopped processes, we're
2670 		// supposed to send SIGHUP + SIGCONT.
2671 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2672 			Thread* currentThread = thread_get_current_thread();
2673 
2674 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2675 			send_signal_to_process_group_locked(group, signal, 0);
2676 
2677 			signal.SetNumber(SIGCONT);
2678 			send_signal_to_process_group_locked(group, signal, 0);
2679 		}
2680 	}
2681 }
2682 
2683 
2684 static status_t
2685 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2686 	uint32 flags)
2687 {
2688 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2689 		return B_BAD_VALUE;
2690 
2691 	// get the team
2692 	Team* team = Team::GetAndLock(id);
2693 	if (team == NULL)
2694 		return B_BAD_TEAM_ID;
2695 	BReference<Team> teamReference(team, true);
2696 	TeamLocker teamLocker(team, true);
2697 
2698 	if ((flags & B_CHECK_PERMISSION) != 0) {
2699 		uid_t uid = geteuid();
2700 		if (uid != 0 && uid != team->effective_uid)
2701 			return B_NOT_ALLOWED;
2702 	}
2703 
2704 	bigtime_t kernelTime = 0;
2705 	bigtime_t userTime = 0;
2706 
2707 	switch (who) {
2708 		case B_TEAM_USAGE_SELF:
2709 		{
2710 			Thread* thread = team->thread_list;
2711 
2712 			for (; thread != NULL; thread = thread->team_next) {
2713 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2714 				kernelTime += thread->kernel_time;
2715 				userTime += thread->user_time;
2716 			}
2717 
2718 			kernelTime += team->dead_threads_kernel_time;
2719 			userTime += team->dead_threads_user_time;
2720 			break;
2721 		}
2722 
2723 		case B_TEAM_USAGE_CHILDREN:
2724 		{
2725 			Team* child = team->children;
2726 			for (; child != NULL; child = child->siblings_next) {
2727 				TeamLocker childLocker(child);
2728 
2729 				Thread* thread = team->thread_list;
2730 
2731 				for (; thread != NULL; thread = thread->team_next) {
2732 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2733 					kernelTime += thread->kernel_time;
2734 					userTime += thread->user_time;
2735 				}
2736 
2737 				kernelTime += child->dead_threads_kernel_time;
2738 				userTime += child->dead_threads_user_time;
2739 			}
2740 
2741 			kernelTime += team->dead_children.kernel_time;
2742 			userTime += team->dead_children.user_time;
2743 			break;
2744 		}
2745 	}
2746 
2747 	info->kernel_time = kernelTime;
2748 	info->user_time = userTime;
2749 
2750 	return B_OK;
2751 }
2752 
2753 
2754 //	#pragma mark - Private kernel API
2755 
2756 
2757 status_t
2758 team_init(kernel_args* args)
2759 {
2760 	// create the team hash table
2761 	new(&sTeamHash) TeamTable;
2762 	if (sTeamHash.Init(64) != B_OK)
2763 		panic("Failed to init team hash table!");
2764 
2765 	new(&sGroupHash) ProcessGroupHashTable;
2766 	if (sGroupHash.Init() != B_OK)
2767 		panic("Failed to init process group hash table!");
2768 
2769 	// create initial session and process groups
2770 
2771 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2772 	if (session == NULL)
2773 		panic("Could not create initial session.\n");
2774 	BReference<ProcessSession> sessionReference(session, true);
2775 
2776 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2777 	if (group == NULL)
2778 		panic("Could not create initial process group.\n");
2779 	BReference<ProcessGroup> groupReference(group, true);
2780 
2781 	group->Publish(session);
2782 
2783 	// create the kernel team
2784 	sKernelTeam = Team::Create(1, "kernel_team", true);
2785 	if (sKernelTeam == NULL)
2786 		panic("could not create kernel team!\n");
2787 	sKernelTeam->SetArgs(sKernelTeam->Name());
2788 	sKernelTeam->state = TEAM_STATE_NORMAL;
2789 
2790 	sKernelTeam->saved_set_uid = 0;
2791 	sKernelTeam->real_uid = 0;
2792 	sKernelTeam->effective_uid = 0;
2793 	sKernelTeam->saved_set_gid = 0;
2794 	sKernelTeam->real_gid = 0;
2795 	sKernelTeam->effective_gid = 0;
2796 	sKernelTeam->supplementary_groups = NULL;
2797 	sKernelTeam->supplementary_group_count = 0;
2798 
2799 	insert_team_into_group(group, sKernelTeam);
2800 
2801 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2802 	if (sKernelTeam->io_context == NULL)
2803 		panic("could not create io_context for kernel team!\n");
2804 
2805 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2806 		dprintf("Failed to resize FD table for kernel team!\n");
2807 
2808 	// stick it in the team hash
2809 	sTeamHash.Insert(sKernelTeam);
2810 
2811 	add_debugger_command_etc("team", &dump_team_info,
2812 		"Dump info about a particular team",
2813 		"[ <id> | <address> | <name> ]\n"
2814 		"Prints information about the specified team. If no argument is given\n"
2815 		"the current team is selected.\n"
2816 		"  <id>       - The ID of the team.\n"
2817 		"  <address>  - The address of the team structure.\n"
2818 		"  <name>     - The team's name.\n", 0);
2819 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2820 		"\n"
2821 		"Prints a list of all existing teams.\n", 0);
2822 
2823 	new(&sNotificationService) TeamNotificationService();
2824 
2825 	sNotificationService.Register();
2826 
2827 	return B_OK;
2828 }
2829 
2830 
2831 int32
2832 team_max_teams(void)
2833 {
2834 	return sMaxTeams;
2835 }
2836 
2837 
2838 int32
2839 team_used_teams(void)
2840 {
2841 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2842 	return sUsedTeams;
2843 }
2844 
2845 
2846 /*! Returns a death entry of a child team specified by ID (if any).
2847 	The caller must hold the team's lock.
2848 
2849 	\param team The team whose dead children list to check.
2850 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2851 	\param _deleteEntry Return variable, indicating whether the caller needs to
2852 		delete the returned entry.
2853 	\return The death entry of the matching team, or \c NULL, if no death entry
2854 		for the team was found.
2855 */
2856 job_control_entry*
2857 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2858 {
2859 	if (child <= 0)
2860 		return NULL;
2861 
2862 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2863 		child);
2864 	if (entry) {
2865 		// remove the entry only, if the caller is the parent of the found team
2866 		if (team_get_current_team_id() == entry->thread) {
2867 			team->dead_children.entries.Remove(entry);
2868 			team->dead_children.count--;
2869 			*_deleteEntry = true;
2870 		} else {
2871 			*_deleteEntry = false;
2872 		}
2873 	}
2874 
2875 	return entry;
2876 }
2877 
2878 
2879 /*! Quick check to see if we have a valid team ID. */
2880 bool
2881 team_is_valid(team_id id)
2882 {
2883 	if (id <= 0)
2884 		return false;
2885 
2886 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2887 
2888 	return team_get_team_struct_locked(id) != NULL;
2889 }
2890 
2891 
2892 Team*
2893 team_get_team_struct_locked(team_id id)
2894 {
2895 	return sTeamHash.Lookup(id);
2896 }
2897 
2898 
2899 void
2900 team_set_controlling_tty(int32 ttyIndex)
2901 {
2902 	// lock the team, so its session won't change while we're playing with it
2903 	Team* team = thread_get_current_thread()->team;
2904 	TeamLocker teamLocker(team);
2905 
2906 	// get and lock the session
2907 	ProcessSession* session = team->group->Session();
2908 	AutoLocker<ProcessSession> sessionLocker(session);
2909 
2910 	// set the session's fields
2911 	session->controlling_tty = ttyIndex;
2912 	session->foreground_group = -1;
2913 }
2914 
2915 
2916 int32
2917 team_get_controlling_tty()
2918 {
2919 	// lock the team, so its session won't change while we're playing with it
2920 	Team* team = thread_get_current_thread()->team;
2921 	TeamLocker teamLocker(team);
2922 
2923 	// get and lock the session
2924 	ProcessSession* session = team->group->Session();
2925 	AutoLocker<ProcessSession> sessionLocker(session);
2926 
2927 	// get the session's field
2928 	return session->controlling_tty;
2929 }
2930 
2931 
2932 status_t
2933 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2934 {
2935 	// lock the team, so its session won't change while we're playing with it
2936 	Thread* thread = thread_get_current_thread();
2937 	Team* team = thread->team;
2938 	TeamLocker teamLocker(team);
2939 
2940 	// get and lock the session
2941 	ProcessSession* session = team->group->Session();
2942 	AutoLocker<ProcessSession> sessionLocker(session);
2943 
2944 	// check given TTY -- must be the controlling tty of the calling process
2945 	if (session->controlling_tty != ttyIndex)
2946 		return ENOTTY;
2947 
2948 	// check given process group -- must belong to our session
2949 	{
2950 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2951 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2952 		if (group == NULL || group->Session() != session)
2953 			return B_BAD_VALUE;
2954 	}
2955 
2956 	// If we are a background group, we can do that unharmed only when we
2957 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2958 	if (session->foreground_group != -1
2959 		&& session->foreground_group != team->group_id
2960 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
2961 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
2962 		InterruptsSpinLocker signalLocker(team->signal_lock);
2963 
2964 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2965 			pid_t groupID = team->group_id;
2966 
2967 			signalLocker.Unlock();
2968 			sessionLocker.Unlock();
2969 			teamLocker.Unlock();
2970 
2971 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2972 			send_signal_to_process_group(groupID, signal, 0);
2973 			return B_INTERRUPTED;
2974 		}
2975 	}
2976 
2977 	session->foreground_group = processGroupID;
2978 
2979 	return B_OK;
2980 }
2981 
2982 
2983 /*!	Removes the specified team from the global team hash, from its process
2984 	group, and from its parent.
2985 	It also moves all of its children to the kernel team.
2986 
2987 	The caller must hold the following locks:
2988 	- \a team's process group's lock,
2989 	- the kernel team's lock,
2990 	- \a team's parent team's lock (might be the kernel team), and
2991 	- \a team's lock.
2992 */
2993 void
2994 team_remove_team(Team* team, pid_t& _signalGroup)
2995 {
2996 	Team* parent = team->parent;
2997 
2998 	// remember how long this team lasted
2999 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3000 		+ team->dead_children.kernel_time;
3001 	parent->dead_children.user_time += team->dead_threads_user_time
3002 		+ team->dead_children.user_time;
3003 
3004 	// remove the team from the hash table
3005 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3006 	sTeamHash.Remove(team);
3007 	sUsedTeams--;
3008 	teamsLocker.Unlock();
3009 
3010 	// The team can no longer be accessed by ID. Navigation to it is still
3011 	// possible from its process group and its parent and children, but that
3012 	// will be rectified shortly.
3013 	team->state = TEAM_STATE_DEATH;
3014 
3015 	// If we're a controlling process (i.e. a session leader with controlling
3016 	// terminal), there's a bit of signalling we have to do. We can't do any of
3017 	// the signaling here due to the bunch of locks we're holding, but we need
3018 	// to determine, whom to signal.
3019 	_signalGroup = -1;
3020 	bool isSessionLeader = false;
3021 	if (team->session_id == team->id
3022 		&& team->group->Session()->controlling_tty >= 0) {
3023 		isSessionLeader = true;
3024 
3025 		ProcessSession* session = team->group->Session();
3026 
3027 		AutoLocker<ProcessSession> sessionLocker(session);
3028 
3029 		session->controlling_tty = -1;
3030 		_signalGroup = session->foreground_group;
3031 	}
3032 
3033 	// remove us from our process group
3034 	remove_team_from_group(team);
3035 
3036 	// move the team's children to the kernel team
3037 	while (Team* child = team->children) {
3038 		// remove the child from the current team and add it to the kernel team
3039 		TeamLocker childLocker(child);
3040 
3041 		remove_team_from_parent(team, child);
3042 		insert_team_into_parent(sKernelTeam, child);
3043 
3044 		// move job control entries too
3045 		sKernelTeam->stopped_children.entries.MoveFrom(
3046 			&team->stopped_children.entries);
3047 		sKernelTeam->continued_children.entries.MoveFrom(
3048 			&team->continued_children.entries);
3049 
3050 		// If the team was a session leader with controlling terminal,
3051 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3052 		// groups with stopped processes. Due to locking complications we can't
3053 		// do that here, so we only check whether we were a reason for the
3054 		// child's process group not being an orphan and, if so, schedule a
3055 		// later check (cf. orphaned_process_group_check()).
3056 		if (isSessionLeader) {
3057 			ProcessGroup* childGroup = child->group;
3058 			if (childGroup->Session()->id == team->session_id
3059 				&& childGroup->id != team->group_id) {
3060 				childGroup->ScheduleOrphanedCheck();
3061 			}
3062 		}
3063 
3064 		// Note, we don't move the dead children entries. Those will be deleted
3065 		// when the team structure is deleted.
3066 	}
3067 
3068 	// remove us from our parent
3069 	remove_team_from_parent(parent, team);
3070 }
3071 
3072 
3073 /*!	Kills all threads but the main thread of the team and shuts down user
3074 	debugging for it.
3075 	To be called on exit of the team's main thread. No locks must be held.
3076 
3077 	\param team The team in question.
3078 	\return The port of the debugger for the team, -1 if none. To be passed to
3079 		team_delete_team().
3080 */
3081 port_id
3082 team_shutdown_team(Team* team)
3083 {
3084 	ASSERT(thread_get_current_thread() == team->main_thread);
3085 
3086 	TeamLocker teamLocker(team);
3087 
3088 	// Make sure debugging changes won't happen anymore.
3089 	port_id debuggerPort = -1;
3090 	while (true) {
3091 		// If a debugger change is in progress for the team, we'll have to
3092 		// wait until it is done.
3093 		ConditionVariableEntry waitForDebuggerEntry;
3094 		bool waitForDebugger = false;
3095 
3096 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3097 
3098 		if (team->debug_info.debugger_changed_condition != NULL) {
3099 			team->debug_info.debugger_changed_condition->Add(
3100 				&waitForDebuggerEntry);
3101 			waitForDebugger = true;
3102 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3103 			// The team is being debugged. That will stop with the termination
3104 			// of the nub thread. Since we set the team state to death, no one
3105 			// can install a debugger anymore. We fetch the debugger's port to
3106 			// send it a message at the bitter end.
3107 			debuggerPort = team->debug_info.debugger_port;
3108 		}
3109 
3110 		debugInfoLocker.Unlock();
3111 
3112 		if (!waitForDebugger)
3113 			break;
3114 
3115 		// wait for the debugger change to be finished
3116 		teamLocker.Unlock();
3117 
3118 		waitForDebuggerEntry.Wait();
3119 
3120 		teamLocker.Lock();
3121 	}
3122 
3123 	// Mark the team as shutting down. That will prevent new threads from being
3124 	// created and debugger changes from taking place.
3125 	team->state = TEAM_STATE_SHUTDOWN;
3126 
3127 	// delete all timers
3128 	team->DeleteUserTimers(false);
3129 
3130 	// deactivate CPU time user timers for the team
3131 	InterruptsSpinLocker timeLocker(team->time_lock);
3132 
3133 	if (team->HasActiveCPUTimeUserTimers())
3134 		team->DeactivateCPUTimeUserTimers();
3135 
3136 	timeLocker.Unlock();
3137 
3138 	// kill all threads but the main thread
3139 	team_death_entry deathEntry;
3140 	deathEntry.condition.Init(team, "team death");
3141 
3142 	while (true) {
3143 		team->death_entry = &deathEntry;
3144 		deathEntry.remaining_threads = 0;
3145 
3146 		Thread* thread = team->thread_list;
3147 		while (thread != NULL) {
3148 			if (thread != team->main_thread) {
3149 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3150 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3151 				deathEntry.remaining_threads++;
3152 			}
3153 
3154 			thread = thread->team_next;
3155 		}
3156 
3157 		if (deathEntry.remaining_threads == 0)
3158 			break;
3159 
3160 		// there are threads to wait for
3161 		ConditionVariableEntry entry;
3162 		deathEntry.condition.Add(&entry);
3163 
3164 		teamLocker.Unlock();
3165 
3166 		entry.Wait();
3167 
3168 		teamLocker.Lock();
3169 	}
3170 
3171 	team->death_entry = NULL;
3172 
3173 	return debuggerPort;
3174 }
3175 
3176 
3177 /*!	Called on team exit to notify threads waiting on the team and free most
3178 	resources associated with it.
3179 	The caller shouldn't hold any locks.
3180 */
3181 void
3182 team_delete_team(Team* team, port_id debuggerPort)
3183 {
3184 	// Not quite in our job description, but work that has been left by
3185 	// team_remove_team() and that can be done now that we're not holding any
3186 	// locks.
3187 	orphaned_process_group_check();
3188 
3189 	team_id teamID = team->id;
3190 
3191 	ASSERT(team->num_threads == 0);
3192 
3193 	// If someone is waiting for this team to be loaded, but it dies
3194 	// unexpectedly before being done, we need to notify the waiting
3195 	// thread now.
3196 
3197 	TeamLocker teamLocker(team);
3198 
3199 	if (team->loading_info) {
3200 		// there's indeed someone waiting
3201 		struct team_loading_info* loadingInfo = team->loading_info;
3202 		team->loading_info = NULL;
3203 
3204 		loadingInfo->result = B_ERROR;
3205 		loadingInfo->done = true;
3206 
3207 		// wake up the waiting thread
3208 		thread_continue(loadingInfo->thread);
3209 	}
3210 
3211 	// notify team watchers
3212 
3213 	{
3214 		// we're not reachable from anyone anymore at this point, so we
3215 		// can safely access the list without any locking
3216 		struct team_watcher* watcher;
3217 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3218 				&team->watcher_list)) != NULL) {
3219 			watcher->hook(teamID, watcher->data);
3220 			free(watcher);
3221 		}
3222 	}
3223 
3224 	teamLocker.Unlock();
3225 
3226 	sNotificationService.Notify(TEAM_REMOVED, team);
3227 
3228 	// free team resources
3229 
3230 	delete_realtime_sem_context(team->realtime_sem_context);
3231 	xsi_sem_undo(team);
3232 	remove_images(team);
3233 	team->address_space->RemoveAndPut();
3234 
3235 	team->ReleaseReference();
3236 
3237 	// notify the debugger, that the team is gone
3238 	user_debug_team_deleted(teamID, debuggerPort);
3239 }
3240 
3241 
3242 Team*
3243 team_get_kernel_team(void)
3244 {
3245 	return sKernelTeam;
3246 }
3247 
3248 
3249 team_id
3250 team_get_kernel_team_id(void)
3251 {
3252 	if (!sKernelTeam)
3253 		return 0;
3254 
3255 	return sKernelTeam->id;
3256 }
3257 
3258 
3259 team_id
3260 team_get_current_team_id(void)
3261 {
3262 	return thread_get_current_thread()->team->id;
3263 }
3264 
3265 
3266 status_t
3267 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3268 {
3269 	if (id == sKernelTeam->id) {
3270 		// we're the kernel team, so we don't have to go through all
3271 		// the hassle (locking and hash lookup)
3272 		*_addressSpace = VMAddressSpace::GetKernel();
3273 		return B_OK;
3274 	}
3275 
3276 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3277 
3278 	Team* team = team_get_team_struct_locked(id);
3279 	if (team == NULL)
3280 		return B_BAD_VALUE;
3281 
3282 	team->address_space->Get();
3283 	*_addressSpace = team->address_space;
3284 	return B_OK;
3285 }
3286 
3287 
3288 /*!	Sets the team's job control state.
3289 	The caller must hold the parent team's lock. Interrupts are allowed to be
3290 	enabled or disabled.
3291 	\a team The team whose job control state shall be set.
3292 	\a newState The new state to be set.
3293 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3294 		the caller is responsible for filling in the following fields of the
3295 		entry before releasing the parent team's lock, unless the new state is
3296 		\c JOB_CONTROL_STATE_NONE:
3297 		- \c signal: The number of the signal causing the state change.
3298 		- \c signaling_user: The real UID of the user sending the signal.
3299 */
3300 void
3301 team_set_job_control_state(Team* team, job_control_state newState,
3302 	Signal* signal)
3303 {
3304 	if (team == NULL || team->job_control_entry == NULL)
3305 		return;
3306 
3307 	// don't touch anything, if the state stays the same or the team is already
3308 	// dead
3309 	job_control_entry* entry = team->job_control_entry;
3310 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3311 		return;
3312 
3313 	T(SetJobControlState(team->id, newState, signal));
3314 
3315 	// remove from the old list
3316 	switch (entry->state) {
3317 		case JOB_CONTROL_STATE_NONE:
3318 			// entry is in no list ATM
3319 			break;
3320 		case JOB_CONTROL_STATE_DEAD:
3321 			// can't get here
3322 			break;
3323 		case JOB_CONTROL_STATE_STOPPED:
3324 			team->parent->stopped_children.entries.Remove(entry);
3325 			break;
3326 		case JOB_CONTROL_STATE_CONTINUED:
3327 			team->parent->continued_children.entries.Remove(entry);
3328 			break;
3329 	}
3330 
3331 	entry->state = newState;
3332 
3333 	if (signal != NULL) {
3334 		entry->signal = signal->Number();
3335 		entry->signaling_user = signal->SendingUser();
3336 	}
3337 
3338 	// add to new list
3339 	team_job_control_children* childList = NULL;
3340 	switch (entry->state) {
3341 		case JOB_CONTROL_STATE_NONE:
3342 			// entry doesn't get into any list
3343 			break;
3344 		case JOB_CONTROL_STATE_DEAD:
3345 			childList = &team->parent->dead_children;
3346 			team->parent->dead_children.count++;
3347 			break;
3348 		case JOB_CONTROL_STATE_STOPPED:
3349 			childList = &team->parent->stopped_children;
3350 			break;
3351 		case JOB_CONTROL_STATE_CONTINUED:
3352 			childList = &team->parent->continued_children;
3353 			break;
3354 	}
3355 
3356 	if (childList != NULL) {
3357 		childList->entries.Add(entry);
3358 		team->parent->dead_children.condition_variable.NotifyAll();
3359 	}
3360 }
3361 
3362 
3363 /*!	Inits the given team's exit information, if not yet initialized, to some
3364 	generic "killed" status.
3365 	The caller must not hold the team's lock. Interrupts must be enabled.
3366 
3367 	\param team The team whose exit info shall be initialized.
3368 */
3369 void
3370 team_init_exit_info_on_error(Team* team)
3371 {
3372 	TeamLocker teamLocker(team);
3373 
3374 	if (!team->exit.initialized) {
3375 		team->exit.reason = CLD_KILLED;
3376 		team->exit.signal = SIGKILL;
3377 		team->exit.signaling_user = geteuid();
3378 		team->exit.status = 0;
3379 		team->exit.initialized = true;
3380 	}
3381 }
3382 
3383 
3384 /*! Adds a hook to the team that is called as soon as this team goes away.
3385 	This call might get public in the future.
3386 */
3387 status_t
3388 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3389 {
3390 	if (hook == NULL || teamID < B_OK)
3391 		return B_BAD_VALUE;
3392 
3393 	// create the watcher object
3394 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3395 	if (watcher == NULL)
3396 		return B_NO_MEMORY;
3397 
3398 	watcher->hook = hook;
3399 	watcher->data = data;
3400 
3401 	// add watcher, if the team isn't already dying
3402 	// get the team
3403 	Team* team = Team::GetAndLock(teamID);
3404 	if (team == NULL) {
3405 		free(watcher);
3406 		return B_BAD_TEAM_ID;
3407 	}
3408 
3409 	list_add_item(&team->watcher_list, watcher);
3410 
3411 	team->UnlockAndReleaseReference();
3412 
3413 	return B_OK;
3414 }
3415 
3416 
3417 status_t
3418 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3419 {
3420 	if (hook == NULL || teamID < 0)
3421 		return B_BAD_VALUE;
3422 
3423 	// get team and remove watcher (if present)
3424 	Team* team = Team::GetAndLock(teamID);
3425 	if (team == NULL)
3426 		return B_BAD_TEAM_ID;
3427 
3428 	// search for watcher
3429 	team_watcher* watcher = NULL;
3430 	while ((watcher = (team_watcher*)list_get_next_item(
3431 			&team->watcher_list, watcher)) != NULL) {
3432 		if (watcher->hook == hook && watcher->data == data) {
3433 			// got it!
3434 			list_remove_item(&team->watcher_list, watcher);
3435 			break;
3436 		}
3437 	}
3438 
3439 	team->UnlockAndReleaseReference();
3440 
3441 	if (watcher == NULL)
3442 		return B_ENTRY_NOT_FOUND;
3443 
3444 	free(watcher);
3445 	return B_OK;
3446 }
3447 
3448 
3449 /*!	Allocates a user_thread structure from the team.
3450 	The team lock must be held, unless the function is called for the team's
3451 	main thread. Interrupts must be enabled.
3452 */
3453 struct user_thread*
3454 team_allocate_user_thread(Team* team)
3455 {
3456 	if (team->user_data == 0)
3457 		return NULL;
3458 
3459 	// take an entry from the free list, if any
3460 	if (struct free_user_thread* entry = team->free_user_threads) {
3461 		user_thread* thread = entry->thread;
3462 		team->free_user_threads = entry->next;
3463 		free(entry);
3464 		return thread;
3465 	}
3466 
3467 	while (true) {
3468 		// enough space left?
3469 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3470 		if (team->user_data_size - team->used_user_data < needed) {
3471 			// try to resize the area
3472 			if (resize_area(team->user_data_area,
3473 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3474 				return NULL;
3475 			}
3476 
3477 			// resized user area successfully -- try to allocate the user_thread
3478 			// again
3479 			team->user_data_size += B_PAGE_SIZE;
3480 			continue;
3481 		}
3482 
3483 		// allocate the user_thread
3484 		user_thread* thread
3485 			= (user_thread*)(team->user_data + team->used_user_data);
3486 		team->used_user_data += needed;
3487 
3488 		return thread;
3489 	}
3490 }
3491 
3492 
3493 /*!	Frees the given user_thread structure.
3494 	The team's lock must not be held. Interrupts must be enabled.
3495 	\param team The team the user thread was allocated from.
3496 	\param userThread The user thread to free.
3497 */
3498 void
3499 team_free_user_thread(Team* team, struct user_thread* userThread)
3500 {
3501 	if (userThread == NULL)
3502 		return;
3503 
3504 	// create a free list entry
3505 	free_user_thread* entry
3506 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3507 	if (entry == NULL) {
3508 		// we have to leak the user thread :-/
3509 		return;
3510 	}
3511 
3512 	// add to free list
3513 	TeamLocker teamLocker(team);
3514 
3515 	entry->thread = userThread;
3516 	entry->next = team->free_user_threads;
3517 	team->free_user_threads = entry;
3518 }
3519 
3520 
3521 //	#pragma mark - Associated data interface
3522 
3523 
3524 AssociatedData::AssociatedData()
3525 	:
3526 	fOwner(NULL)
3527 {
3528 }
3529 
3530 
3531 AssociatedData::~AssociatedData()
3532 {
3533 }
3534 
3535 
3536 void
3537 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3538 {
3539 }
3540 
3541 
3542 AssociatedDataOwner::AssociatedDataOwner()
3543 {
3544 	mutex_init(&fLock, "associated data owner");
3545 }
3546 
3547 
3548 AssociatedDataOwner::~AssociatedDataOwner()
3549 {
3550 	mutex_destroy(&fLock);
3551 }
3552 
3553 
3554 bool
3555 AssociatedDataOwner::AddData(AssociatedData* data)
3556 {
3557 	MutexLocker locker(fLock);
3558 
3559 	if (data->Owner() != NULL)
3560 		return false;
3561 
3562 	data->AcquireReference();
3563 	fList.Add(data);
3564 	data->SetOwner(this);
3565 
3566 	return true;
3567 }
3568 
3569 
3570 bool
3571 AssociatedDataOwner::RemoveData(AssociatedData* data)
3572 {
3573 	MutexLocker locker(fLock);
3574 
3575 	if (data->Owner() != this)
3576 		return false;
3577 
3578 	data->SetOwner(NULL);
3579 	fList.Remove(data);
3580 
3581 	locker.Unlock();
3582 
3583 	data->ReleaseReference();
3584 
3585 	return true;
3586 }
3587 
3588 
3589 void
3590 AssociatedDataOwner::PrepareForDeletion()
3591 {
3592 	MutexLocker locker(fLock);
3593 
3594 	// move all data to a temporary list and unset the owner
3595 	DataList list;
3596 	list.MoveFrom(&fList);
3597 
3598 	for (DataList::Iterator it = list.GetIterator();
3599 		AssociatedData* data = it.Next();) {
3600 		data->SetOwner(NULL);
3601 	}
3602 
3603 	locker.Unlock();
3604 
3605 	// call the notification hooks and release our references
3606 	while (AssociatedData* data = list.RemoveHead()) {
3607 		data->OwnerDeleted(this);
3608 		data->ReleaseReference();
3609 	}
3610 }
3611 
3612 
3613 /*!	Associates data with the current team.
3614 	When the team is deleted, the data object is notified.
3615 	The team acquires a reference to the object.
3616 
3617 	\param data The data object.
3618 	\return \c true on success, \c false otherwise. Fails only when the supplied
3619 		data object is already associated with another owner.
3620 */
3621 bool
3622 team_associate_data(AssociatedData* data)
3623 {
3624 	return thread_get_current_thread()->team->AddData(data);
3625 }
3626 
3627 
3628 /*!	Dissociates data from the current team.
3629 	Balances an earlier call to team_associate_data().
3630 
3631 	\param data The data object.
3632 	\return \c true on success, \c false otherwise. Fails only when the data
3633 		object is not associated with the current team.
3634 */
3635 bool
3636 team_dissociate_data(AssociatedData* data)
3637 {
3638 	return thread_get_current_thread()->team->RemoveData(data);
3639 }
3640 
3641 
3642 //	#pragma mark - Public kernel API
3643 
3644 
3645 thread_id
3646 load_image(int32 argCount, const char** args, const char** env)
3647 {
3648 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3649 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3650 }
3651 
3652 
3653 thread_id
3654 load_image_etc(int32 argCount, const char* const* args,
3655 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3656 {
3657 	// we need to flatten the args and environment
3658 
3659 	if (args == NULL)
3660 		return B_BAD_VALUE;
3661 
3662 	// determine total needed size
3663 	int32 argSize = 0;
3664 	for (int32 i = 0; i < argCount; i++)
3665 		argSize += strlen(args[i]) + 1;
3666 
3667 	int32 envCount = 0;
3668 	int32 envSize = 0;
3669 	while (env != NULL && env[envCount] != NULL)
3670 		envSize += strlen(env[envCount++]) + 1;
3671 
3672 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3673 	if (size > MAX_PROCESS_ARGS_SIZE)
3674 		return B_TOO_MANY_ARGS;
3675 
3676 	// allocate space
3677 	char** flatArgs = (char**)malloc(size);
3678 	if (flatArgs == NULL)
3679 		return B_NO_MEMORY;
3680 
3681 	char** slot = flatArgs;
3682 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3683 
3684 	// copy arguments and environment
3685 	for (int32 i = 0; i < argCount; i++) {
3686 		int32 argSize = strlen(args[i]) + 1;
3687 		memcpy(stringSpace, args[i], argSize);
3688 		*slot++ = stringSpace;
3689 		stringSpace += argSize;
3690 	}
3691 
3692 	*slot++ = NULL;
3693 
3694 	for (int32 i = 0; i < envCount; i++) {
3695 		int32 envSize = strlen(env[i]) + 1;
3696 		memcpy(stringSpace, env[i], envSize);
3697 		*slot++ = stringSpace;
3698 		stringSpace += envSize;
3699 	}
3700 
3701 	*slot++ = NULL;
3702 
3703 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3704 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3705 
3706 	free(flatArgs);
3707 		// load_image_internal() unset our variable if it took over ownership
3708 
3709 	return thread;
3710 }
3711 
3712 
3713 status_t
3714 wait_for_team(team_id id, status_t* _returnCode)
3715 {
3716 	// check whether the team exists
3717 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3718 
3719 	Team* team = team_get_team_struct_locked(id);
3720 	if (team == NULL)
3721 		return B_BAD_TEAM_ID;
3722 
3723 	id = team->id;
3724 
3725 	teamsLocker.Unlock();
3726 
3727 	// wait for the main thread (it has the same ID as the team)
3728 	return wait_for_thread(id, _returnCode);
3729 }
3730 
3731 
3732 status_t
3733 kill_team(team_id id)
3734 {
3735 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3736 
3737 	Team* team = team_get_team_struct_locked(id);
3738 	if (team == NULL)
3739 		return B_BAD_TEAM_ID;
3740 
3741 	id = team->id;
3742 
3743 	teamsLocker.Unlock();
3744 
3745 	if (team == sKernelTeam)
3746 		return B_NOT_ALLOWED;
3747 
3748 	// Just kill the team's main thread (it has same ID as the team). The
3749 	// cleanup code there will take care of the team.
3750 	return kill_thread(id);
3751 }
3752 
3753 
3754 status_t
3755 _get_team_info(team_id id, team_info* info, size_t size)
3756 {
3757 	// get the team
3758 	Team* team = Team::Get(id);
3759 	if (team == NULL)
3760 		return B_BAD_TEAM_ID;
3761 	BReference<Team> teamReference(team, true);
3762 
3763 	// fill in the info
3764 	return fill_team_info(team, info, size);
3765 }
3766 
3767 
3768 status_t
3769 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3770 {
3771 	int32 slot = *cookie;
3772 	if (slot < 1)
3773 		slot = 1;
3774 
3775 	InterruptsSpinLocker locker(sTeamHashLock);
3776 
3777 	team_id lastTeamID = peek_next_thread_id();
3778 		// TODO: This is broken, since the id can wrap around!
3779 
3780 	// get next valid team
3781 	Team* team = NULL;
3782 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3783 		slot++;
3784 
3785 	if (team == NULL)
3786 		return B_BAD_TEAM_ID;
3787 
3788 	// get a reference to the team and unlock
3789 	BReference<Team> teamReference(team);
3790 	locker.Unlock();
3791 
3792 	// fill in the info
3793 	*cookie = ++slot;
3794 	return fill_team_info(team, info, size);
3795 }
3796 
3797 
3798 status_t
3799 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3800 {
3801 	if (size != sizeof(team_usage_info))
3802 		return B_BAD_VALUE;
3803 
3804 	return common_get_team_usage_info(id, who, info, 0);
3805 }
3806 
3807 
3808 pid_t
3809 getpid(void)
3810 {
3811 	return thread_get_current_thread()->team->id;
3812 }
3813 
3814 
3815 pid_t
3816 getppid(void)
3817 {
3818 	Team* team = thread_get_current_thread()->team;
3819 
3820 	TeamLocker teamLocker(team);
3821 
3822 	return team->parent->id;
3823 }
3824 
3825 
3826 pid_t
3827 getpgid(pid_t id)
3828 {
3829 	if (id < 0) {
3830 		errno = EINVAL;
3831 		return -1;
3832 	}
3833 
3834 	if (id == 0) {
3835 		// get process group of the calling process
3836 		Team* team = thread_get_current_thread()->team;
3837 		TeamLocker teamLocker(team);
3838 		return team->group_id;
3839 	}
3840 
3841 	// get the team
3842 	Team* team = Team::GetAndLock(id);
3843 	if (team == NULL) {
3844 		errno = ESRCH;
3845 		return -1;
3846 	}
3847 
3848 	// get the team's process group ID
3849 	pid_t groupID = team->group_id;
3850 
3851 	team->UnlockAndReleaseReference();
3852 
3853 	return groupID;
3854 }
3855 
3856 
3857 pid_t
3858 getsid(pid_t id)
3859 {
3860 	if (id < 0) {
3861 		errno = EINVAL;
3862 		return -1;
3863 	}
3864 
3865 	if (id == 0) {
3866 		// get session of the calling process
3867 		Team* team = thread_get_current_thread()->team;
3868 		TeamLocker teamLocker(team);
3869 		return team->session_id;
3870 	}
3871 
3872 	// get the team
3873 	Team* team = Team::GetAndLock(id);
3874 	if (team == NULL) {
3875 		errno = ESRCH;
3876 		return -1;
3877 	}
3878 
3879 	// get the team's session ID
3880 	pid_t sessionID = team->session_id;
3881 
3882 	team->UnlockAndReleaseReference();
3883 
3884 	return sessionID;
3885 }
3886 
3887 
3888 //	#pragma mark - User syscalls
3889 
3890 
3891 status_t
3892 _user_exec(const char* userPath, const char* const* userFlatArgs,
3893 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3894 {
3895 	// NOTE: Since this function normally doesn't return, don't use automatic
3896 	// variables that need destruction in the function scope.
3897 	char path[B_PATH_NAME_LENGTH];
3898 
3899 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3900 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3901 		return B_BAD_ADDRESS;
3902 
3903 	// copy and relocate the flat arguments
3904 	char** flatArgs;
3905 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3906 		argCount, envCount, flatArgs);
3907 
3908 	if (error == B_OK) {
3909 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3910 			envCount, umask);
3911 			// this one only returns in case of error
3912 	}
3913 
3914 	free(flatArgs);
3915 	return error;
3916 }
3917 
3918 
3919 thread_id
3920 _user_fork(void)
3921 {
3922 	return fork_team();
3923 }
3924 
3925 
3926 pid_t
3927 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
3928 	team_usage_info* usageInfo)
3929 {
3930 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3931 		return B_BAD_ADDRESS;
3932 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
3933 		return B_BAD_ADDRESS;
3934 
3935 	siginfo_t info;
3936 	team_usage_info usage_info;
3937 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
3938 	if (foundChild < 0)
3939 		return syscall_restart_handle_post(foundChild);
3940 
3941 	// copy info back to userland
3942 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3943 		return B_BAD_ADDRESS;
3944 	// copy usage_info back to userland
3945 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
3946 		sizeof(usage_info)) != B_OK) {
3947 		return B_BAD_ADDRESS;
3948 	}
3949 
3950 	return foundChild;
3951 }
3952 
3953 
3954 pid_t
3955 _user_process_info(pid_t process, int32 which)
3956 {
3957 	// we only allow to return the parent of the current process
3958 	if (which == PARENT_ID
3959 		&& process != 0 && process != thread_get_current_thread()->team->id)
3960 		return B_BAD_VALUE;
3961 
3962 	pid_t result;
3963 	switch (which) {
3964 		case SESSION_ID:
3965 			result = getsid(process);
3966 			break;
3967 		case GROUP_ID:
3968 			result = getpgid(process);
3969 			break;
3970 		case PARENT_ID:
3971 			result = getppid();
3972 			break;
3973 		default:
3974 			return B_BAD_VALUE;
3975 	}
3976 
3977 	return result >= 0 ? result : errno;
3978 }
3979 
3980 
3981 pid_t
3982 _user_setpgid(pid_t processID, pid_t groupID)
3983 {
3984 	// setpgid() can be called either by the parent of the target process or
3985 	// by the process itself to do one of two things:
3986 	// * Create a new process group with the target process' ID and the target
3987 	//   process as group leader.
3988 	// * Set the target process' process group to an already existing one in the
3989 	//   same session.
3990 
3991 	if (groupID < 0)
3992 		return B_BAD_VALUE;
3993 
3994 	Team* currentTeam = thread_get_current_thread()->team;
3995 	if (processID == 0)
3996 		processID = currentTeam->id;
3997 
3998 	// if the group ID is not specified, use the target process' ID
3999 	if (groupID == 0)
4000 		groupID = processID;
4001 
4002 	// We loop when running into the following race condition: We create a new
4003 	// process group, because there isn't one with that ID yet, but later when
4004 	// trying to publish it, we find that someone else created and published
4005 	// a group with that ID in the meantime. In that case we just restart the
4006 	// whole action.
4007 	while (true) {
4008 		// Look up the process group by ID. If it doesn't exist yet and we are
4009 		// allowed to create a new one, do that.
4010 		ProcessGroup* group = ProcessGroup::Get(groupID);
4011 		bool newGroup = false;
4012 		if (group == NULL) {
4013 			if (groupID != processID)
4014 				return B_NOT_ALLOWED;
4015 
4016 			group = new(std::nothrow) ProcessGroup(groupID);
4017 			if (group == NULL)
4018 				return B_NO_MEMORY;
4019 
4020 			newGroup = true;
4021 		}
4022 		BReference<ProcessGroup> groupReference(group, true);
4023 
4024 		// get the target team
4025 		Team* team = Team::Get(processID);
4026 		if (team == NULL)
4027 			return ESRCH;
4028 		BReference<Team> teamReference(team, true);
4029 
4030 		// lock the new process group and the team's current process group
4031 		while (true) {
4032 			// lock the team's current process group
4033 			team->LockProcessGroup();
4034 
4035 			ProcessGroup* oldGroup = team->group;
4036 			if (oldGroup == group) {
4037 				// it's the same as the target group, so just bail out
4038 				oldGroup->Unlock();
4039 				return group->id;
4040 			}
4041 
4042 			oldGroup->AcquireReference();
4043 
4044 			// lock the target process group, if locking order allows it
4045 			if (newGroup || group->id > oldGroup->id) {
4046 				group->Lock();
4047 				break;
4048 			}
4049 
4050 			// try to lock
4051 			if (group->TryLock())
4052 				break;
4053 
4054 			// no dice -- unlock the team's current process group and relock in
4055 			// the correct order
4056 			oldGroup->Unlock();
4057 
4058 			group->Lock();
4059 			oldGroup->Lock();
4060 
4061 			// check whether things are still the same
4062 			TeamLocker teamLocker(team);
4063 			if (team->group == oldGroup)
4064 				break;
4065 
4066 			// something changed -- unlock everything and retry
4067 			teamLocker.Unlock();
4068 			oldGroup->Unlock();
4069 			group->Unlock();
4070 			oldGroup->ReleaseReference();
4071 		}
4072 
4073 		// we now have references and locks of both new and old process group
4074 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4075 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4076 		AutoLocker<ProcessGroup> groupLocker(group, true);
4077 
4078 		// also lock the target team and its parent
4079 		team->LockTeamAndParent(false);
4080 		TeamLocker parentLocker(team->parent, true);
4081 		TeamLocker teamLocker(team, true);
4082 
4083 		// perform the checks
4084 		if (team == currentTeam) {
4085 			// we set our own group
4086 
4087 			// we must not change our process group ID if we're a session leader
4088 			if (is_session_leader(currentTeam))
4089 				return B_NOT_ALLOWED;
4090 		} else {
4091 			// Calling team != target team. The target team must be a child of
4092 			// the calling team and in the same session. (If that's the case it
4093 			// isn't a session leader either.)
4094 			if (team->parent != currentTeam
4095 				|| team->session_id != currentTeam->session_id) {
4096 				return B_NOT_ALLOWED;
4097 			}
4098 
4099 			// The call is also supposed to fail on a child, when the child has
4100 			// already executed exec*() [EACCES].
4101 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4102 				return EACCES;
4103 		}
4104 
4105 		// If we created a new process group, publish it now.
4106 		if (newGroup) {
4107 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4108 			if (sGroupHash.Lookup(groupID)) {
4109 				// A group with the group ID appeared since we first checked.
4110 				// Back to square one.
4111 				continue;
4112 			}
4113 
4114 			group->PublishLocked(team->group->Session());
4115 		} else if (group->Session()->id != team->session_id) {
4116 			// The existing target process group belongs to a different session.
4117 			// That's not allowed.
4118 			return B_NOT_ALLOWED;
4119 		}
4120 
4121 		// Everything is ready -- set the group.
4122 		remove_team_from_group(team);
4123 		insert_team_into_group(group, team);
4124 
4125 		// Changing the process group might have changed the situation for a
4126 		// parent waiting in wait_for_child(). Hence we notify it.
4127 		team->parent->dead_children.condition_variable.NotifyAll();
4128 
4129 		return group->id;
4130 	}
4131 }
4132 
4133 
4134 pid_t
4135 _user_setsid(void)
4136 {
4137 	Team* team = thread_get_current_thread()->team;
4138 
4139 	// create a new process group and session
4140 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4141 	if (group == NULL)
4142 		return B_NO_MEMORY;
4143 	BReference<ProcessGroup> groupReference(group, true);
4144 	AutoLocker<ProcessGroup> groupLocker(group);
4145 
4146 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4147 	if (session == NULL)
4148 		return B_NO_MEMORY;
4149 	BReference<ProcessSession> sessionReference(session, true);
4150 
4151 	// lock the team's current process group, parent, and the team itself
4152 	team->LockTeamParentAndProcessGroup();
4153 	BReference<ProcessGroup> oldGroupReference(team->group);
4154 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4155 	TeamLocker parentLocker(team->parent, true);
4156 	TeamLocker teamLocker(team, true);
4157 
4158 	// the team must not already be a process group leader
4159 	if (is_process_group_leader(team))
4160 		return B_NOT_ALLOWED;
4161 
4162 	// remove the team from the old and add it to the new process group
4163 	remove_team_from_group(team);
4164 	group->Publish(session);
4165 	insert_team_into_group(group, team);
4166 
4167 	// Changing the process group might have changed the situation for a
4168 	// parent waiting in wait_for_child(). Hence we notify it.
4169 	team->parent->dead_children.condition_variable.NotifyAll();
4170 
4171 	return group->id;
4172 }
4173 
4174 
4175 status_t
4176 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4177 {
4178 	status_t returnCode;
4179 	status_t status;
4180 
4181 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4182 		return B_BAD_ADDRESS;
4183 
4184 	status = wait_for_team(id, &returnCode);
4185 	if (status >= B_OK && _userReturnCode != NULL) {
4186 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4187 				!= B_OK)
4188 			return B_BAD_ADDRESS;
4189 		return B_OK;
4190 	}
4191 
4192 	return syscall_restart_handle_post(status);
4193 }
4194 
4195 
4196 thread_id
4197 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4198 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4199 	port_id errorPort, uint32 errorToken)
4200 {
4201 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4202 
4203 	if (argCount < 1)
4204 		return B_BAD_VALUE;
4205 
4206 	// copy and relocate the flat arguments
4207 	char** flatArgs;
4208 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4209 		argCount, envCount, flatArgs);
4210 	if (error != B_OK)
4211 		return error;
4212 
4213 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4214 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4215 		errorToken);
4216 
4217 	free(flatArgs);
4218 		// load_image_internal() unset our variable if it took over ownership
4219 
4220 	return thread;
4221 }
4222 
4223 
4224 void
4225 _user_exit_team(status_t returnValue)
4226 {
4227 	Thread* thread = thread_get_current_thread();
4228 	Team* team = thread->team;
4229 
4230 	// set this thread's exit status
4231 	thread->exit.status = returnValue;
4232 
4233 	// set the team exit status
4234 	TeamLocker teamLocker(team);
4235 
4236 	if (!team->exit.initialized) {
4237 		team->exit.reason = CLD_EXITED;
4238 		team->exit.signal = 0;
4239 		team->exit.signaling_user = 0;
4240 		team->exit.status = returnValue;
4241 		team->exit.initialized = true;
4242 	}
4243 
4244 	teamLocker.Unlock();
4245 
4246 	// Stop the thread, if the team is being debugged and that has been
4247 	// requested.
4248 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4249 		user_debug_stop_thread();
4250 
4251 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4252 	// userland. The signal handling code forwards the signal to the main
4253 	// thread (if that's not already this one), which will take the team down.
4254 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4255 	send_signal_to_thread(thread, signal, 0);
4256 }
4257 
4258 
4259 status_t
4260 _user_kill_team(team_id team)
4261 {
4262 	return kill_team(team);
4263 }
4264 
4265 
4266 status_t
4267 _user_get_team_info(team_id id, team_info* userInfo)
4268 {
4269 	status_t status;
4270 	team_info info;
4271 
4272 	if (!IS_USER_ADDRESS(userInfo))
4273 		return B_BAD_ADDRESS;
4274 
4275 	status = _get_team_info(id, &info, sizeof(team_info));
4276 	if (status == B_OK) {
4277 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4278 			return B_BAD_ADDRESS;
4279 	}
4280 
4281 	return status;
4282 }
4283 
4284 
4285 status_t
4286 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4287 {
4288 	status_t status;
4289 	team_info info;
4290 	int32 cookie;
4291 
4292 	if (!IS_USER_ADDRESS(userCookie)
4293 		|| !IS_USER_ADDRESS(userInfo)
4294 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4295 		return B_BAD_ADDRESS;
4296 
4297 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4298 	if (status != B_OK)
4299 		return status;
4300 
4301 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4302 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4303 		return B_BAD_ADDRESS;
4304 
4305 	return status;
4306 }
4307 
4308 
4309 team_id
4310 _user_get_current_team(void)
4311 {
4312 	return team_get_current_team_id();
4313 }
4314 
4315 
4316 status_t
4317 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4318 	size_t size)
4319 {
4320 	if (size != sizeof(team_usage_info))
4321 		return B_BAD_VALUE;
4322 
4323 	team_usage_info info;
4324 	status_t status = common_get_team_usage_info(team, who, &info,
4325 		B_CHECK_PERMISSION);
4326 
4327 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4328 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4329 		return B_BAD_ADDRESS;
4330 	}
4331 
4332 	return status;
4333 }
4334 
4335 
4336 status_t
4337 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4338 	size_t size, size_t* _sizeNeeded)
4339 {
4340 	// check parameters
4341 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4342 		|| (buffer == NULL && size > 0)
4343 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4344 		return B_BAD_ADDRESS;
4345 	}
4346 
4347 	KMessage info;
4348 
4349 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4350 		// allocate memory for a copy of the needed team data
4351 		struct ExtendedTeamData {
4352 			team_id	id;
4353 			pid_t	group_id;
4354 			pid_t	session_id;
4355 			uid_t	real_uid;
4356 			gid_t	real_gid;
4357 			uid_t	effective_uid;
4358 			gid_t	effective_gid;
4359 			char	name[B_OS_NAME_LENGTH];
4360 		};
4361 
4362 		ExtendedTeamData* teamClone
4363 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4364 			// It would be nicer to use new, but then we'd have to use
4365 			// ObjectDeleter and declare the structure outside of the function
4366 			// due to template parameter restrictions.
4367 		if (teamClone == NULL)
4368 			return B_NO_MEMORY;
4369 		MemoryDeleter teamCloneDeleter(teamClone);
4370 
4371 		io_context* ioContext;
4372 		{
4373 			// get the team structure
4374 			Team* team = Team::GetAndLock(teamID);
4375 			if (team == NULL)
4376 				return B_BAD_TEAM_ID;
4377 			BReference<Team> teamReference(team, true);
4378 			TeamLocker teamLocker(team, true);
4379 
4380 			// copy the data
4381 			teamClone->id = team->id;
4382 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4383 			teamClone->group_id = team->group_id;
4384 			teamClone->session_id = team->session_id;
4385 			teamClone->real_uid = team->real_uid;
4386 			teamClone->real_gid = team->real_gid;
4387 			teamClone->effective_uid = team->effective_uid;
4388 			teamClone->effective_gid = team->effective_gid;
4389 
4390 			// also fetch a reference to the I/O context
4391 			ioContext = team->io_context;
4392 			vfs_get_io_context(ioContext);
4393 		}
4394 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4395 			&vfs_put_io_context);
4396 
4397 		// add the basic data to the info message
4398 		if (info.AddInt32("id", teamClone->id) != B_OK
4399 			|| info.AddString("name", teamClone->name) != B_OK
4400 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4401 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4402 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4403 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4404 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4405 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4406 			return B_NO_MEMORY;
4407 		}
4408 
4409 		// get the current working directory from the I/O context
4410 		dev_t cwdDevice;
4411 		ino_t cwdDirectory;
4412 		{
4413 			MutexLocker ioContextLocker(ioContext->io_mutex);
4414 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4415 		}
4416 
4417 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4418 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4419 			return B_NO_MEMORY;
4420 		}
4421 	}
4422 
4423 	// TODO: Support the other flags!
4424 
4425 	// copy the needed size and, if it fits, the message back to userland
4426 	size_t sizeNeeded = info.ContentSize();
4427 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4428 		return B_BAD_ADDRESS;
4429 
4430 	if (sizeNeeded > size)
4431 		return B_BUFFER_OVERFLOW;
4432 
4433 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4434 		return B_BAD_ADDRESS;
4435 
4436 	return B_OK;
4437 }
4438