xref: /haiku/src/system/kernel/team.cpp (revision d1f885b435e9892ac028f4be2b80536b9dd37413)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <sem.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
49 #include <syscalls.h>
50 #include <tls.h>
51 #include <tracing.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
55 #include <vfs.h>
56 #include <vm/vm.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
59 
60 #include "TeamThreadTables.h"
61 
62 
63 //#define TRACE_TEAM
64 #ifdef TRACE_TEAM
65 #	define TRACE(x) dprintf x
66 #else
67 #	define TRACE(x) ;
68 #endif
69 
70 
71 struct team_key {
72 	team_id id;
73 };
74 
75 struct team_arg {
76 	char	*path;
77 	char	**flat_args;
78 	size_t	flat_args_size;
79 	uint32	arg_count;
80 	uint32	env_count;
81 	mode_t	umask;
82 	uint32	flags;
83 	port_id	error_port;
84 	uint32	error_token;
85 };
86 
87 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
88 
89 
90 namespace {
91 
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, Team* team);
98 };
99 
100 
101 // #pragma mark - TeamTable
102 
103 
104 typedef BKernel::TeamThreadTable<Team> TeamTable;
105 
106 
107 // #pragma mark - ProcessGroupHashDefinition
108 
109 
110 struct ProcessGroupHashDefinition {
111 	typedef pid_t			KeyType;
112 	typedef	ProcessGroup	ValueType;
113 
114 	size_t HashKey(pid_t key) const
115 	{
116 		return key;
117 	}
118 
119 	size_t Hash(ProcessGroup* value) const
120 	{
121 		return HashKey(value->id);
122 	}
123 
124 	bool Compare(pid_t key, ProcessGroup* value) const
125 	{
126 		return value->id == key;
127 	}
128 
129 	ProcessGroup*& GetLink(ProcessGroup* value) const
130 	{
131 		return value->next;
132 	}
133 };
134 
135 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
136 
137 
138 }	// unnamed namespace
139 
140 
141 // #pragma mark -
142 
143 
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash;
146 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
147 
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash;
150 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
151 
152 static Team* sKernelTeam = NULL;
153 
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups;
157 static mutex sOrphanedCheckLock
158 	= MUTEX_INITIALIZER("orphaned process group check");
159 
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams = 2048;
163 static int32 sUsedTeams = 1;
164 
165 static TeamNotificationService sNotificationService;
166 
167 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
168 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
169 
170 
171 // #pragma mark - TeamListIterator
172 
173 
174 TeamListIterator::TeamListIterator()
175 {
176 	// queue the entry
177 	InterruptsSpinLocker locker(sTeamHashLock);
178 	sTeamHash.InsertIteratorEntry(&fEntry);
179 }
180 
181 
182 TeamListIterator::~TeamListIterator()
183 {
184 	// remove the entry
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	sTeamHash.RemoveIteratorEntry(&fEntry);
187 }
188 
189 
190 Team*
191 TeamListIterator::Next()
192 {
193 	// get the next team -- if there is one, get reference for it
194 	InterruptsSpinLocker locker(sTeamHashLock);
195 	Team* team = sTeamHash.NextElement(&fEntry);
196 	if (team != NULL)
197 		team->AcquireReference();
198 
199 	return team;
200 }
201 
202 
203 // #pragma mark - Tracing
204 
205 
206 #if TEAM_TRACING
207 namespace TeamTracing {
208 
209 class TeamForked : public AbstractTraceEntry {
210 public:
211 	TeamForked(thread_id forkedThread)
212 		:
213 		fForkedThread(forkedThread)
214 	{
215 		Initialized();
216 	}
217 
218 	virtual void AddDump(TraceOutput& out)
219 	{
220 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
221 	}
222 
223 private:
224 	thread_id			fForkedThread;
225 };
226 
227 
228 class ExecTeam : public AbstractTraceEntry {
229 public:
230 	ExecTeam(const char* path, int32 argCount, const char* const* args,
231 			int32 envCount, const char* const* env)
232 		:
233 		fArgCount(argCount),
234 		fArgs(NULL)
235 	{
236 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
237 			false);
238 
239 		// determine the buffer size we need for the args
240 		size_t argBufferSize = 0;
241 		for (int32 i = 0; i < argCount; i++)
242 			argBufferSize += strlen(args[i]) + 1;
243 
244 		// allocate a buffer
245 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
246 		if (fArgs) {
247 			char* buffer = fArgs;
248 			for (int32 i = 0; i < argCount; i++) {
249 				size_t argSize = strlen(args[i]) + 1;
250 				memcpy(buffer, args[i], argSize);
251 				buffer += argSize;
252 			}
253 		}
254 
255 		// ignore env for the time being
256 		(void)envCount;
257 		(void)env;
258 
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		out.Print("team exec, \"%p\", args:", fPath);
265 
266 		if (fArgs != NULL) {
267 			char* args = fArgs;
268 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
269 				out.Print(" \"%s\"", args);
270 				args += strlen(args) + 1;
271 			}
272 		} else
273 			out.Print(" <too long>");
274 	}
275 
276 private:
277 	char*	fPath;
278 	int32	fArgCount;
279 	char*	fArgs;
280 };
281 
282 
283 static const char*
284 job_control_state_name(job_control_state state)
285 {
286 	switch (state) {
287 		case JOB_CONTROL_STATE_NONE:
288 			return "none";
289 		case JOB_CONTROL_STATE_STOPPED:
290 			return "stopped";
291 		case JOB_CONTROL_STATE_CONTINUED:
292 			return "continued";
293 		case JOB_CONTROL_STATE_DEAD:
294 			return "dead";
295 		default:
296 			return "invalid";
297 	}
298 }
299 
300 
301 class SetJobControlState : public AbstractTraceEntry {
302 public:
303 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
304 		:
305 		fTeam(team),
306 		fNewState(newState),
307 		fSignal(signal != NULL ? signal->Number() : 0)
308 	{
309 		Initialized();
310 	}
311 
312 	virtual void AddDump(TraceOutput& out)
313 	{
314 		out.Print("team set job control state, team %" B_PRId32 ", "
315 			"new state: %s, signal: %d",
316 			fTeam, job_control_state_name(fNewState), fSignal);
317 	}
318 
319 private:
320 	team_id				fTeam;
321 	job_control_state	fNewState;
322 	int					fSignal;
323 };
324 
325 
326 class WaitForChild : public AbstractTraceEntry {
327 public:
328 	WaitForChild(pid_t child, uint32 flags)
329 		:
330 		fChild(child),
331 		fFlags(flags)
332 	{
333 		Initialized();
334 	}
335 
336 	virtual void AddDump(TraceOutput& out)
337 	{
338 		out.Print("team wait for child, child: %" B_PRId32 ", "
339 			"flags: %#" B_PRIx32, fChild, fFlags);
340 	}
341 
342 private:
343 	pid_t	fChild;
344 	uint32	fFlags;
345 };
346 
347 
348 class WaitForChildDone : public AbstractTraceEntry {
349 public:
350 	WaitForChildDone(const job_control_entry& entry)
351 		:
352 		fState(entry.state),
353 		fTeam(entry.thread),
354 		fStatus(entry.status),
355 		fReason(entry.reason),
356 		fSignal(entry.signal)
357 	{
358 		Initialized();
359 	}
360 
361 	WaitForChildDone(status_t error)
362 		:
363 		fTeam(error)
364 	{
365 		Initialized();
366 	}
367 
368 	virtual void AddDump(TraceOutput& out)
369 	{
370 		if (fTeam >= 0) {
371 			out.Print("team wait for child done, team: %" B_PRId32 ", "
372 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
373 				fTeam, job_control_state_name(fState), fStatus, fReason,
374 				fSignal);
375 		} else {
376 			out.Print("team wait for child failed, error: "
377 				"%#" B_PRIx32 ", ", fTeam);
378 		}
379 	}
380 
381 private:
382 	job_control_state	fState;
383 	team_id				fTeam;
384 	status_t			fStatus;
385 	uint16				fReason;
386 	uint16				fSignal;
387 };
388 
389 }	// namespace TeamTracing
390 
391 #	define T(x) new(std::nothrow) TeamTracing::x;
392 #else
393 #	define T(x) ;
394 #endif
395 
396 
397 //	#pragma mark - TeamNotificationService
398 
399 
400 TeamNotificationService::TeamNotificationService()
401 	: DefaultNotificationService("teams")
402 {
403 }
404 
405 
406 void
407 TeamNotificationService::Notify(uint32 eventCode, Team* team)
408 {
409 	char eventBuffer[128];
410 	KMessage event;
411 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
412 	event.AddInt32("event", eventCode);
413 	event.AddInt32("team", team->id);
414 	event.AddPointer("teamStruct", team);
415 
416 	DefaultNotificationService::Notify(event, eventCode);
417 }
418 
419 
420 //	#pragma mark - Team
421 
422 
423 Team::Team(team_id id, bool kernel)
424 {
425 	// allocate an ID
426 	this->id = id;
427 	visible = true;
428 	serial_number = -1;
429 
430 	// init mutex
431 	if (kernel) {
432 		mutex_init(&fLock, "Team:kernel");
433 	} else {
434 		char lockName[16];
435 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
436 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
437 	}
438 
439 	hash_next = siblings_next = children = parent = NULL;
440 	fName[0] = '\0';
441 	fArgs[0] = '\0';
442 	num_threads = 0;
443 	io_context = NULL;
444 	address_space = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	thread_list = NULL;
448 	main_thread = NULL;
449 	loading_info = NULL;
450 	state = TEAM_STATE_BIRTH;
451 	flags = 0;
452 	death_entry = NULL;
453 	user_data_area = -1;
454 	user_data = 0;
455 	used_user_data = 0;
456 	user_data_size = 0;
457 	free_user_threads = NULL;
458 
459 	commpage_address = NULL;
460 
461 	supplementary_groups = NULL;
462 	supplementary_group_count = 0;
463 
464 	dead_threads_kernel_time = 0;
465 	dead_threads_user_time = 0;
466 	cpu_clock_offset = 0;
467 
468 	// dead threads
469 	list_init(&dead_threads);
470 	dead_threads_count = 0;
471 
472 	// dead children
473 	dead_children.count = 0;
474 	dead_children.kernel_time = 0;
475 	dead_children.user_time = 0;
476 
477 	// job control entry
478 	job_control_entry = new(nothrow) ::job_control_entry;
479 	if (job_control_entry != NULL) {
480 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
481 		job_control_entry->thread = id;
482 		job_control_entry->team = this;
483 	}
484 
485 	// exit status -- setting initialized to false suffices
486 	exit.initialized = false;
487 
488 	list_init(&sem_list);
489 	list_init_etc(&port_list, port_team_link_offset());
490 	list_init(&image_list);
491 	list_init(&watcher_list);
492 
493 	clear_team_debug_info(&debug_info, true);
494 
495 	// init dead/stopped/continued children condition vars
496 	dead_children.condition_variable.Init(&dead_children, "team children");
497 
498 	B_INITIALIZE_SPINLOCK(&time_lock);
499 	B_INITIALIZE_SPINLOCK(&signal_lock);
500 
501 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
502 		kernel ? -1 : MAX_QUEUED_SIGNALS);
503 	memset(fSignalActions, 0, sizeof(fSignalActions));
504 
505 	fUserDefinedTimerCount = 0;
506 
507 	fCoreDumpCondition = NULL;
508 }
509 
510 
511 Team::~Team()
512 {
513 	// get rid of all associated data
514 	PrepareForDeletion();
515 
516 	if (io_context != NULL)
517 		vfs_put_io_context(io_context);
518 	delete_owned_ports(this);
519 	sem_delete_owned_sems(this);
520 
521 	DeleteUserTimers(false);
522 
523 	fPendingSignals.Clear();
524 
525 	if (fQueuedSignalsCounter != NULL)
526 		fQueuedSignalsCounter->ReleaseReference();
527 
528 	while (thread_death_entry* threadDeathEntry
529 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
530 		free(threadDeathEntry);
531 	}
532 
533 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
534 		delete entry;
535 
536 	while (free_user_thread* entry = free_user_threads) {
537 		free_user_threads = entry->next;
538 		free(entry);
539 	}
540 
541 	malloc_referenced_release(supplementary_groups);
542 
543 	delete job_control_entry;
544 		// usually already NULL and transferred to the parent
545 
546 	mutex_destroy(&fLock);
547 }
548 
549 
550 /*static*/ Team*
551 Team::Create(team_id id, const char* name, bool kernel)
552 {
553 	// create the team object
554 	Team* team = new(std::nothrow) Team(id, kernel);
555 	if (team == NULL)
556 		return NULL;
557 	ObjectDeleter<Team> teamDeleter(team);
558 
559 	if (name != NULL)
560 		team->SetName(name);
561 
562 	// check initialization
563 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
564 		return NULL;
565 
566 	// finish initialization (arch specifics)
567 	if (arch_team_init_team_struct(team, kernel) != B_OK)
568 		return NULL;
569 
570 	if (!kernel) {
571 		status_t error = user_timer_create_team_timers(team);
572 		if (error != B_OK)
573 			return NULL;
574 	}
575 
576 	// everything went fine
577 	return teamDeleter.Detach();
578 }
579 
580 
581 /*!	\brief Returns the team with the given ID.
582 	Returns a reference to the team.
583 	Team and thread spinlock must not be held.
584 */
585 /*static*/ Team*
586 Team::Get(team_id id)
587 {
588 	if (id == B_CURRENT_TEAM) {
589 		Team* team = thread_get_current_thread()->team;
590 		team->AcquireReference();
591 		return team;
592 	}
593 
594 	InterruptsSpinLocker locker(sTeamHashLock);
595 	Team* team = sTeamHash.Lookup(id);
596 	if (team != NULL)
597 		team->AcquireReference();
598 	return team;
599 }
600 
601 
602 /*!	\brief Returns the team with the given ID in a locked state.
603 	Returns a reference to the team.
604 	Team and thread spinlock must not be held.
605 */
606 /*static*/ Team*
607 Team::GetAndLock(team_id id)
608 {
609 	// get the team
610 	Team* team = Get(id);
611 	if (team == NULL)
612 		return NULL;
613 
614 	// lock it
615 	team->Lock();
616 
617 	// only return the team, when it isn't already dying
618 	if (team->state >= TEAM_STATE_SHUTDOWN) {
619 		team->Unlock();
620 		team->ReleaseReference();
621 		return NULL;
622 	}
623 
624 	return team;
625 }
626 
627 
628 /*!	Locks the team and its parent team (if any).
629 	The caller must hold a reference to the team or otherwise make sure that
630 	it won't be deleted.
631 	If the team doesn't have a parent, only the team itself is locked. If the
632 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
633 	only the team itself is locked.
634 
635 	\param dontLockParentIfKernel If \c true, the team's parent team is only
636 		locked, if it is not the kernel team.
637 */
638 void
639 Team::LockTeamAndParent(bool dontLockParentIfKernel)
640 {
641 	// The locking order is parent -> child. Since the parent can change as long
642 	// as we don't lock the team, we need to do a trial and error loop.
643 	Lock();
644 
645 	while (true) {
646 		// If the team doesn't have a parent, we're done. Otherwise try to lock
647 		// the parent.This will succeed in most cases, simplifying things.
648 		Team* parent = this->parent;
649 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
650 			|| parent->TryLock()) {
651 			return;
652 		}
653 
654 		// get a temporary reference to the parent, unlock this team, lock the
655 		// parent, and re-lock this team
656 		BReference<Team> parentReference(parent);
657 
658 		Unlock();
659 		parent->Lock();
660 		Lock();
661 
662 		// If the parent hasn't changed in the meantime, we're done.
663 		if (this->parent == parent)
664 			return;
665 
666 		// The parent has changed -- unlock and retry.
667 		parent->Unlock();
668 	}
669 }
670 
671 
672 /*!	Unlocks the team and its parent team (if any).
673 */
674 void
675 Team::UnlockTeamAndParent()
676 {
677 	if (parent != NULL)
678 		parent->Unlock();
679 
680 	Unlock();
681 }
682 
683 
684 /*!	Locks the team, its parent team (if any), and the team's process group.
685 	The caller must hold a reference to the team or otherwise make sure that
686 	it won't be deleted.
687 	If the team doesn't have a parent, only the team itself is locked.
688 */
689 void
690 Team::LockTeamParentAndProcessGroup()
691 {
692 	LockTeamAndProcessGroup();
693 
694 	// We hold the group's and the team's lock, but not the parent team's lock.
695 	// If we have a parent, try to lock it.
696 	if (this->parent == NULL || this->parent->TryLock())
697 		return;
698 
699 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
700 	// the job.
701 	Unlock();
702 	LockTeamAndParent(false);
703 }
704 
705 
706 /*!	Unlocks the team, its parent team (if any), and the team's process group.
707 */
708 void
709 Team::UnlockTeamParentAndProcessGroup()
710 {
711 	group->Unlock();
712 
713 	if (parent != NULL)
714 		parent->Unlock();
715 
716 	Unlock();
717 }
718 
719 
720 void
721 Team::LockTeamAndProcessGroup()
722 {
723 	// The locking order is process group -> child. Since the process group can
724 	// change as long as we don't lock the team, we need to do a trial and error
725 	// loop.
726 	Lock();
727 
728 	while (true) {
729 		// Try to lock the group. This will succeed in most cases, simplifying
730 		// things.
731 		ProcessGroup* group = this->group;
732 		if (group->TryLock())
733 			return;
734 
735 		// get a temporary reference to the group, unlock this team, lock the
736 		// group, and re-lock this team
737 		BReference<ProcessGroup> groupReference(group);
738 
739 		Unlock();
740 		group->Lock();
741 		Lock();
742 
743 		// If the group hasn't changed in the meantime, we're done.
744 		if (this->group == group)
745 			return;
746 
747 		// The group has changed -- unlock and retry.
748 		group->Unlock();
749 	}
750 }
751 
752 
753 void
754 Team::UnlockTeamAndProcessGroup()
755 {
756 	group->Unlock();
757 	Unlock();
758 }
759 
760 
761 void
762 Team::SetName(const char* name)
763 {
764 	if (const char* lastSlash = strrchr(name, '/'))
765 		name = lastSlash + 1;
766 
767 	strlcpy(fName, name, B_OS_NAME_LENGTH);
768 }
769 
770 
771 void
772 Team::SetArgs(const char* args)
773 {
774 	strlcpy(fArgs, args, sizeof(fArgs));
775 }
776 
777 
778 void
779 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
780 {
781 	fArgs[0] = '\0';
782 	strlcpy(fArgs, path, sizeof(fArgs));
783 	for (int i = 0; i < otherArgCount; i++) {
784 		strlcat(fArgs, " ", sizeof(fArgs));
785 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
786 	}
787 }
788 
789 
790 void
791 Team::ResetSignalsOnExec()
792 {
793 	// We are supposed to keep pending signals. Signal actions shall be reset
794 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
795 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
796 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
797 	// flags, but since there aren't any handlers, they make little sense, so
798 	// we clear them.
799 
800 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
801 		struct sigaction& action = SignalActionFor(i);
802 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
803 			action.sa_handler = SIG_DFL;
804 
805 		action.sa_mask = 0;
806 		action.sa_flags = 0;
807 		action.sa_userdata = NULL;
808 	}
809 }
810 
811 
812 void
813 Team::InheritSignalActions(Team* parent)
814 {
815 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
816 }
817 
818 
819 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
820 	ID.
821 
822 	The caller must hold the team's lock.
823 
824 	\param timer The timer to be added. If it doesn't have an ID yet, it is
825 		considered user-defined and will be assigned an ID.
826 	\return \c B_OK, if the timer was added successfully, another error code
827 		otherwise.
828 */
829 status_t
830 Team::AddUserTimer(UserTimer* timer)
831 {
832 	// don't allow addition of timers when already shutting the team down
833 	if (state >= TEAM_STATE_SHUTDOWN)
834 		return B_BAD_TEAM_ID;
835 
836 	// If the timer is user-defined, check timer limit and increment
837 	// user-defined count.
838 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
839 		return EAGAIN;
840 
841 	fUserTimers.AddTimer(timer);
842 
843 	return B_OK;
844 }
845 
846 
847 /*!	Removes the given user timer from the team.
848 
849 	The caller must hold the team's lock.
850 
851 	\param timer The timer to be removed.
852 
853 */
854 void
855 Team::RemoveUserTimer(UserTimer* timer)
856 {
857 	fUserTimers.RemoveTimer(timer);
858 
859 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
860 		UserDefinedTimersRemoved(1);
861 }
862 
863 
864 /*!	Deletes all (or all user-defined) user timers of the team.
865 
866 	Timer's belonging to the team's threads are not affected.
867 	The caller must hold the team's lock.
868 
869 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
870 		otherwise all timers are deleted.
871 */
872 void
873 Team::DeleteUserTimers(bool userDefinedOnly)
874 {
875 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
876 	UserDefinedTimersRemoved(count);
877 }
878 
879 
880 /*!	If not at the limit yet, increments the team's user-defined timer count.
881 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
882 */
883 bool
884 Team::CheckAddUserDefinedTimer()
885 {
886 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
887 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
888 		atomic_add(&fUserDefinedTimerCount, -1);
889 		return false;
890 	}
891 
892 	return true;
893 }
894 
895 
896 /*!	Subtracts the given count for the team's user-defined timer count.
897 	\param count The count to subtract.
898 */
899 void
900 Team::UserDefinedTimersRemoved(int32 count)
901 {
902 	atomic_add(&fUserDefinedTimerCount, -count);
903 }
904 
905 
906 void
907 Team::DeactivateCPUTimeUserTimers()
908 {
909 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
910 		timer->Deactivate();
911 
912 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
913 		timer->Deactivate();
914 }
915 
916 
917 /*!	Returns the team's current total CPU time (kernel + user + offset).
918 
919 	The caller must hold \c time_lock.
920 
921 	\param ignoreCurrentRun If \c true and the current thread is one team's
922 		threads, don't add the time since the last time \c last_time was
923 		updated. Should be used in "thread unscheduled" scheduler callbacks,
924 		since although the thread is still running at that time, its time has
925 		already been stopped.
926 	\return The team's current total CPU time.
927 */
928 bigtime_t
929 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
930 {
931 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
932 		+ dead_threads_user_time;
933 
934 	Thread* currentThread = thread_get_current_thread();
935 	bigtime_t now = system_time();
936 
937 	for (Thread* thread = thread_list; thread != NULL;
938 			thread = thread->team_next) {
939 		bool alreadyLocked = thread == lockedThread;
940 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
941 		time += thread->kernel_time + thread->user_time;
942 
943 		if (thread->last_time != 0) {
944 			if (!ignoreCurrentRun || thread != currentThread)
945 				time += now - thread->last_time;
946 		}
947 
948 		if (alreadyLocked)
949 			threadTimeLocker.Detach();
950 	}
951 
952 	return time;
953 }
954 
955 
956 /*!	Returns the team's current user CPU time.
957 
958 	The caller must hold \c time_lock.
959 
960 	\return The team's current user CPU time.
961 */
962 bigtime_t
963 Team::UserCPUTime() const
964 {
965 	bigtime_t time = dead_threads_user_time;
966 
967 	bigtime_t now = system_time();
968 
969 	for (Thread* thread = thread_list; thread != NULL;
970 			thread = thread->team_next) {
971 		SpinLocker threadTimeLocker(thread->time_lock);
972 		time += thread->user_time;
973 
974 		if (thread->last_time != 0 && !thread->in_kernel)
975 			time += now - thread->last_time;
976 	}
977 
978 	return time;
979 }
980 
981 
982 //	#pragma mark - ProcessGroup
983 
984 
985 ProcessGroup::ProcessGroup(pid_t id)
986 	:
987 	id(id),
988 	teams(NULL),
989 	fSession(NULL),
990 	fInOrphanedCheckList(false)
991 {
992 	char lockName[32];
993 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
994 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
995 }
996 
997 
998 ProcessGroup::~ProcessGroup()
999 {
1000 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1001 
1002 	// If the group is in the orphaned check list, remove it.
1003 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1004 
1005 	if (fInOrphanedCheckList)
1006 		sOrphanedCheckProcessGroups.Remove(this);
1007 
1008 	orphanedCheckLocker.Unlock();
1009 
1010 	// remove group from the hash table and from the session
1011 	if (fSession != NULL) {
1012 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1013 		sGroupHash.RemoveUnchecked(this);
1014 		groupHashLocker.Unlock();
1015 
1016 		fSession->ReleaseReference();
1017 	}
1018 
1019 	mutex_destroy(&fLock);
1020 }
1021 
1022 
1023 /*static*/ ProcessGroup*
1024 ProcessGroup::Get(pid_t id)
1025 {
1026 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1027 	ProcessGroup* group = sGroupHash.Lookup(id);
1028 	if (group != NULL)
1029 		group->AcquireReference();
1030 	return group;
1031 }
1032 
1033 
1034 /*!	Adds the group the given session and makes it publicly accessible.
1035 	The caller must not hold the process group hash lock.
1036 */
1037 void
1038 ProcessGroup::Publish(ProcessSession* session)
1039 {
1040 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1041 	PublishLocked(session);
1042 }
1043 
1044 
1045 /*!	Adds the group to the given session and makes it publicly accessible.
1046 	The caller must hold the process group hash lock.
1047 */
1048 void
1049 ProcessGroup::PublishLocked(ProcessSession* session)
1050 {
1051 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1052 
1053 	fSession = session;
1054 	fSession->AcquireReference();
1055 
1056 	sGroupHash.InsertUnchecked(this);
1057 }
1058 
1059 
1060 /*!	Checks whether the process group is orphaned.
1061 	The caller must hold the group's lock.
1062 	\return \c true, if the group is orphaned, \c false otherwise.
1063 */
1064 bool
1065 ProcessGroup::IsOrphaned() const
1066 {
1067 	// Orphaned Process Group: "A process group in which the parent of every
1068 	// member is either itself a member of the group or is not a member of the
1069 	// group's session." (Open Group Base Specs Issue 7)
1070 	bool orphaned = true;
1071 
1072 	Team* team = teams;
1073 	while (orphaned && team != NULL) {
1074 		team->LockTeamAndParent(false);
1075 
1076 		Team* parent = team->parent;
1077 		if (parent != NULL && parent->group_id != id
1078 			&& parent->session_id == fSession->id) {
1079 			orphaned = false;
1080 		}
1081 
1082 		team->UnlockTeamAndParent();
1083 
1084 		team = team->group_next;
1085 	}
1086 
1087 	return orphaned;
1088 }
1089 
1090 
1091 void
1092 ProcessGroup::ScheduleOrphanedCheck()
1093 {
1094 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1095 
1096 	if (!fInOrphanedCheckList) {
1097 		sOrphanedCheckProcessGroups.Add(this);
1098 		fInOrphanedCheckList = true;
1099 	}
1100 }
1101 
1102 
1103 void
1104 ProcessGroup::UnsetOrphanedCheck()
1105 {
1106 	fInOrphanedCheckList = false;
1107 }
1108 
1109 
1110 //	#pragma mark - ProcessSession
1111 
1112 
1113 ProcessSession::ProcessSession(pid_t id)
1114 	:
1115 	id(id),
1116 	controlling_tty(-1),
1117 	foreground_group(-1)
1118 {
1119 	char lockName[32];
1120 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1121 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1122 }
1123 
1124 
1125 ProcessSession::~ProcessSession()
1126 {
1127 	mutex_destroy(&fLock);
1128 }
1129 
1130 
1131 //	#pragma mark - KDL functions
1132 
1133 
1134 static void
1135 _dump_team_info(Team* team)
1136 {
1137 	kprintf("TEAM: %p\n", team);
1138 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1139 		team->id);
1140 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1141 	kprintf("name:             '%s'\n", team->Name());
1142 	kprintf("args:             '%s'\n", team->Args());
1143 	kprintf("hash_next:        %p\n", team->hash_next);
1144 	kprintf("parent:           %p", team->parent);
1145 	if (team->parent != NULL) {
1146 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1147 	} else
1148 		kprintf("\n");
1149 
1150 	kprintf("children:         %p\n", team->children);
1151 	kprintf("num_threads:      %d\n", team->num_threads);
1152 	kprintf("state:            %d\n", team->state);
1153 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1154 	kprintf("io_context:       %p\n", team->io_context);
1155 	if (team->address_space)
1156 		kprintf("address_space:    %p\n", team->address_space);
1157 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1158 		(void*)team->user_data, team->user_data_area);
1159 	kprintf("free user thread: %p\n", team->free_user_threads);
1160 	kprintf("main_thread:      %p\n", team->main_thread);
1161 	kprintf("thread_list:      %p\n", team->thread_list);
1162 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1163 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1164 }
1165 
1166 
1167 static int
1168 dump_team_info(int argc, char** argv)
1169 {
1170 	ulong arg;
1171 	bool found = false;
1172 
1173 	if (argc < 2) {
1174 		Thread* thread = thread_get_current_thread();
1175 		if (thread != NULL && thread->team != NULL)
1176 			_dump_team_info(thread->team);
1177 		else
1178 			kprintf("No current team!\n");
1179 		return 0;
1180 	}
1181 
1182 	arg = strtoul(argv[1], NULL, 0);
1183 	if (IS_KERNEL_ADDRESS(arg)) {
1184 		// semi-hack
1185 		_dump_team_info((Team*)arg);
1186 		return 0;
1187 	}
1188 
1189 	// walk through the thread list, trying to match name or id
1190 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1191 		Team* team = it.Next();) {
1192 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1193 			|| team->id == (team_id)arg) {
1194 			_dump_team_info(team);
1195 			found = true;
1196 			break;
1197 		}
1198 	}
1199 
1200 	if (!found)
1201 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1202 	return 0;
1203 }
1204 
1205 
1206 static int
1207 dump_teams(int argc, char** argv)
1208 {
1209 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1210 		B_PRINTF_POINTER_WIDTH, "parent");
1211 
1212 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1213 		Team* team = it.Next();) {
1214 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 
1221 //	#pragma mark - Private functions
1222 
1223 
1224 /*!	Inserts team \a team into the child list of team \a parent.
1225 
1226 	The caller must hold the lock of both \a parent and \a team.
1227 
1228 	\param parent The parent team.
1229 	\param team The team to be inserted into \a parent's child list.
1230 */
1231 static void
1232 insert_team_into_parent(Team* parent, Team* team)
1233 {
1234 	ASSERT(parent != NULL);
1235 
1236 	team->siblings_next = parent->children;
1237 	parent->children = team;
1238 	team->parent = parent;
1239 }
1240 
1241 
1242 /*!	Removes team \a team from the child list of team \a parent.
1243 
1244 	The caller must hold the lock of both \a parent and \a team.
1245 
1246 	\param parent The parent team.
1247 	\param team The team to be removed from \a parent's child list.
1248 */
1249 static void
1250 remove_team_from_parent(Team* parent, Team* team)
1251 {
1252 	Team* child;
1253 	Team* last = NULL;
1254 
1255 	for (child = parent->children; child != NULL;
1256 			child = child->siblings_next) {
1257 		if (child == team) {
1258 			if (last == NULL)
1259 				parent->children = child->siblings_next;
1260 			else
1261 				last->siblings_next = child->siblings_next;
1262 
1263 			team->parent = NULL;
1264 			break;
1265 		}
1266 		last = child;
1267 	}
1268 }
1269 
1270 
1271 /*!	Returns whether the given team is a session leader.
1272 	The caller must hold the team's lock or its process group's lock.
1273 */
1274 static bool
1275 is_session_leader(Team* team)
1276 {
1277 	return team->session_id == team->id;
1278 }
1279 
1280 
1281 /*!	Returns whether the given team is a process group leader.
1282 	The caller must hold the team's lock or its process group's lock.
1283 */
1284 static bool
1285 is_process_group_leader(Team* team)
1286 {
1287 	return team->group_id == team->id;
1288 }
1289 
1290 
1291 /*!	Inserts the given team into the given process group.
1292 	The caller must hold the process group's lock, the team's lock, and the
1293 	team's parent's lock.
1294 */
1295 static void
1296 insert_team_into_group(ProcessGroup* group, Team* team)
1297 {
1298 	team->group = group;
1299 	team->group_id = group->id;
1300 	team->session_id = group->Session()->id;
1301 
1302 	team->group_next = group->teams;
1303 	group->teams = team;
1304 	group->AcquireReference();
1305 }
1306 
1307 
1308 /*!	Removes the given team from its process group.
1309 
1310 	The caller must hold the process group's lock, the team's lock, and the
1311 	team's parent's lock. Interrupts must be enabled.
1312 
1313 	\param team The team that'll be removed from its process group.
1314 */
1315 static void
1316 remove_team_from_group(Team* team)
1317 {
1318 	ProcessGroup* group = team->group;
1319 	Team* current;
1320 	Team* last = NULL;
1321 
1322 	// the team must be in a process group to let this function have any effect
1323 	if  (group == NULL)
1324 		return;
1325 
1326 	for (current = group->teams; current != NULL;
1327 			current = current->group_next) {
1328 		if (current == team) {
1329 			if (last == NULL)
1330 				group->teams = current->group_next;
1331 			else
1332 				last->group_next = current->group_next;
1333 
1334 			team->group = NULL;
1335 			break;
1336 		}
1337 		last = current;
1338 	}
1339 
1340 	team->group = NULL;
1341 	team->group_next = NULL;
1342 
1343 	group->ReleaseReference();
1344 }
1345 
1346 
1347 static status_t
1348 create_team_user_data(Team* team, void* exactAddress = NULL)
1349 {
1350 	void* address;
1351 	uint32 addressSpec;
1352 
1353 	if (exactAddress != NULL) {
1354 		address = exactAddress;
1355 		addressSpec = B_EXACT_ADDRESS;
1356 	} else {
1357 		address = (void*)KERNEL_USER_DATA_BASE;
1358 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1359 	}
1360 
1361 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1362 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1363 
1364 	virtual_address_restrictions virtualRestrictions = {};
1365 	if (result == B_OK || exactAddress != NULL) {
1366 		if (exactAddress != NULL)
1367 			virtualRestrictions.address = exactAddress;
1368 		else
1369 			virtualRestrictions.address = address;
1370 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1371 	} else {
1372 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1373 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1374 	}
1375 
1376 	physical_address_restrictions physicalRestrictions = {};
1377 	team->user_data_area = create_area_etc(team->id, "user area",
1378 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1379 		&virtualRestrictions, &physicalRestrictions, &address);
1380 	if (team->user_data_area < 0)
1381 		return team->user_data_area;
1382 
1383 	team->user_data = (addr_t)address;
1384 	team->used_user_data = 0;
1385 	team->user_data_size = kTeamUserDataInitialSize;
1386 	team->free_user_threads = NULL;
1387 
1388 	return B_OK;
1389 }
1390 
1391 
1392 static void
1393 delete_team_user_data(Team* team)
1394 {
1395 	if (team->user_data_area >= 0) {
1396 		vm_delete_area(team->id, team->user_data_area, true);
1397 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1398 			kTeamUserDataReservedSize);
1399 
1400 		team->user_data = 0;
1401 		team->used_user_data = 0;
1402 		team->user_data_size = 0;
1403 		team->user_data_area = -1;
1404 		while (free_user_thread* entry = team->free_user_threads) {
1405 			team->free_user_threads = entry->next;
1406 			free(entry);
1407 		}
1408 	}
1409 }
1410 
1411 
1412 static status_t
1413 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1414 	int32 argCount, int32 envCount, char**& _flatArgs)
1415 {
1416 	if (argCount < 0 || envCount < 0)
1417 		return B_BAD_VALUE;
1418 
1419 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1420 		return B_TOO_MANY_ARGS;
1421 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1422 		return B_BAD_VALUE;
1423 
1424 	if (!IS_USER_ADDRESS(userFlatArgs))
1425 		return B_BAD_ADDRESS;
1426 
1427 	// allocate kernel memory
1428 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1429 	if (flatArgs == NULL)
1430 		return B_NO_MEMORY;
1431 
1432 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1433 		free(flatArgs);
1434 		return B_BAD_ADDRESS;
1435 	}
1436 
1437 	// check and relocate the array
1438 	status_t error = B_OK;
1439 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1440 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1441 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1442 		if (i == argCount || i == argCount + envCount + 1) {
1443 			// check array null termination
1444 			if (flatArgs[i] != NULL) {
1445 				error = B_BAD_VALUE;
1446 				break;
1447 			}
1448 		} else {
1449 			// check string
1450 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1451 			size_t maxLen = stringEnd - arg;
1452 			if (arg < stringBase || arg >= stringEnd
1453 					|| strnlen(arg, maxLen) == maxLen) {
1454 				error = B_BAD_VALUE;
1455 				break;
1456 			}
1457 
1458 			flatArgs[i] = arg;
1459 		}
1460 	}
1461 
1462 	if (error == B_OK)
1463 		_flatArgs = flatArgs;
1464 	else
1465 		free(flatArgs);
1466 
1467 	return error;
1468 }
1469 
1470 
1471 static void
1472 free_team_arg(struct team_arg* teamArg)
1473 {
1474 	if (teamArg != NULL) {
1475 		free(teamArg->flat_args);
1476 		free(teamArg->path);
1477 		free(teamArg);
1478 	}
1479 }
1480 
1481 
1482 static status_t
1483 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1484 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1485 	port_id port, uint32 token)
1486 {
1487 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1488 	if (teamArg == NULL)
1489 		return B_NO_MEMORY;
1490 
1491 	teamArg->path = strdup(path);
1492 	if (teamArg->path == NULL) {
1493 		free(teamArg);
1494 		return B_NO_MEMORY;
1495 	}
1496 
1497 	// copy the args over
1498 	teamArg->flat_args = flatArgs;
1499 	teamArg->flat_args_size = flatArgsSize;
1500 	teamArg->arg_count = argCount;
1501 	teamArg->env_count = envCount;
1502 	teamArg->flags = 0;
1503 	teamArg->umask = umask;
1504 	teamArg->error_port = port;
1505 	teamArg->error_token = token;
1506 
1507 	// determine the flags from the environment
1508 	const char* const* env = flatArgs + argCount + 1;
1509 	for (int32 i = 0; i < envCount; i++) {
1510 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1511 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1512 			break;
1513 		}
1514 	}
1515 
1516 	*_teamArg = teamArg;
1517 	return B_OK;
1518 }
1519 
1520 
1521 static status_t
1522 team_create_thread_start_internal(void* args)
1523 {
1524 	status_t err;
1525 	Thread* thread;
1526 	Team* team;
1527 	struct team_arg* teamArgs = (struct team_arg*)args;
1528 	const char* path;
1529 	addr_t entry;
1530 	char** userArgs;
1531 	char** userEnv;
1532 	struct user_space_program_args* programArgs;
1533 	uint32 argCount, envCount;
1534 
1535 	thread = thread_get_current_thread();
1536 	team = thread->team;
1537 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1538 
1539 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1540 		thread->id));
1541 
1542 	// Main stack area layout is currently as follows (starting from 0):
1543 	//
1544 	// size								| usage
1545 	// ---------------------------------+--------------------------------
1546 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1547 	// TLS_SIZE							| TLS data
1548 	// sizeof(user_space_program_args)	| argument structure for the runtime
1549 	//									| loader
1550 	// flat arguments size				| flat process arguments and environment
1551 
1552 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1553 	// the heap
1554 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1555 
1556 	argCount = teamArgs->arg_count;
1557 	envCount = teamArgs->env_count;
1558 
1559 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1560 		+ thread->user_stack_size + TLS_SIZE);
1561 
1562 	userArgs = (char**)(programArgs + 1);
1563 	userEnv = userArgs + argCount + 1;
1564 	path = teamArgs->path;
1565 
1566 	if (user_strlcpy(programArgs->program_path, path,
1567 				sizeof(programArgs->program_path)) < B_OK
1568 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1569 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1570 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1571 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1572 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1573 				sizeof(port_id)) < B_OK
1574 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1575 				sizeof(uint32)) < B_OK
1576 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1577 		|| user_memcpy(userArgs, teamArgs->flat_args,
1578 				teamArgs->flat_args_size) < B_OK) {
1579 		// the team deletion process will clean this mess
1580 		free_team_arg(teamArgs);
1581 		return B_BAD_ADDRESS;
1582 	}
1583 
1584 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1585 
1586 	// set team args and update state
1587 	team->Lock();
1588 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1589 	team->state = TEAM_STATE_NORMAL;
1590 	team->Unlock();
1591 
1592 	free_team_arg(teamArgs);
1593 		// the arguments are already on the user stack, we no longer need
1594 		// them in this form
1595 
1596 	// Clone commpage area
1597 	area_id commPageArea = clone_commpage_area(team->id,
1598 		&team->commpage_address);
1599 	if (commPageArea  < B_OK) {
1600 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1601 			strerror(commPageArea)));
1602 		return commPageArea;
1603 	}
1604 
1605 	// Register commpage image
1606 	image_id commPageImage = get_commpage_image();
1607 	extended_image_info imageInfo;
1608 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1609 	if (err != B_OK) {
1610 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1611 			strerror(err)));
1612 		return err;
1613 	}
1614 	imageInfo.basic_info.text = team->commpage_address;
1615 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1616 	imageInfo.symbol_table = NULL;
1617 	imageInfo.symbol_hash = NULL;
1618 	imageInfo.string_table = NULL;
1619 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1620 	if (image < 0) {
1621 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1622 			strerror(image)));
1623 		return image;
1624 	}
1625 
1626 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1627 	// automatic variables with function scope will never be destroyed.
1628 	{
1629 		// find runtime_loader path
1630 		KPath runtimeLoaderPath;
1631 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1632 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1633 		if (err < B_OK) {
1634 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1635 				strerror(err)));
1636 			return err;
1637 		}
1638 		runtimeLoaderPath.UnlockBuffer();
1639 		err = runtimeLoaderPath.Append("runtime_loader");
1640 
1641 		if (err == B_OK) {
1642 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1643 				&entry);
1644 		}
1645 	}
1646 
1647 	if (err < B_OK) {
1648 		// Luckily, we don't have to clean up the mess we created - that's
1649 		// done for us by the normal team deletion process
1650 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1651 			"%s\n", strerror(err)));
1652 		return err;
1653 	}
1654 
1655 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1656 
1657 	// enter userspace -- returns only in case of error
1658 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1659 		programArgs, team->commpage_address);
1660 }
1661 
1662 
1663 static status_t
1664 team_create_thread_start(void* args)
1665 {
1666 	team_create_thread_start_internal(args);
1667 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1668 	thread_exit();
1669 		// does not return
1670 	return B_OK;
1671 }
1672 
1673 
1674 static thread_id
1675 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1676 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1677 	port_id errorPort, uint32 errorToken)
1678 {
1679 	char** flatArgs = _flatArgs;
1680 	thread_id thread;
1681 	status_t status;
1682 	struct team_arg* teamArgs;
1683 	struct team_loading_info loadingInfo;
1684 	ConditionVariableEntry loadingWaitEntry;
1685 	io_context* parentIOContext = NULL;
1686 	team_id teamID;
1687 	bool teamLimitReached = false;
1688 
1689 	if (flatArgs == NULL || argCount == 0)
1690 		return B_BAD_VALUE;
1691 
1692 	const char* path = flatArgs[0];
1693 
1694 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1695 		"\n", path, flatArgs, argCount));
1696 
1697 	// cut the path from the main thread name
1698 	const char* threadName = strrchr(path, '/');
1699 	if (threadName != NULL)
1700 		threadName++;
1701 	else
1702 		threadName = path;
1703 
1704 	// create the main thread object
1705 	Thread* mainThread;
1706 	status = Thread::Create(threadName, mainThread);
1707 	if (status != B_OK)
1708 		return status;
1709 	BReference<Thread> mainThreadReference(mainThread, true);
1710 
1711 	// create team object
1712 	Team* team = Team::Create(mainThread->id, path, false);
1713 	if (team == NULL)
1714 		return B_NO_MEMORY;
1715 	BReference<Team> teamReference(team, true);
1716 
1717 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1718 		loadingInfo.condition.Init(team, "image load");
1719 		loadingInfo.condition.Add(&loadingWaitEntry);
1720 		loadingInfo.result = B_ERROR;
1721 		team->loading_info = &loadingInfo;
1722 	}
1723 
1724 	// get the parent team
1725 	Team* parent = Team::Get(parentID);
1726 	if (parent == NULL)
1727 		return B_BAD_TEAM_ID;
1728 	BReference<Team> parentReference(parent, true);
1729 
1730 	parent->LockTeamAndProcessGroup();
1731 	team->Lock();
1732 
1733 	// inherit the parent's user/group
1734 	inherit_parent_user_and_group(team, parent);
1735 
1736 	// get a reference to the parent's I/O context -- we need it to create ours
1737 	parentIOContext = parent->io_context;
1738 	vfs_get_io_context(parentIOContext);
1739 
1740 	team->Unlock();
1741 	parent->UnlockTeamAndProcessGroup();
1742 
1743 	// check the executable's set-user/group-id permission
1744 	update_set_id_user_and_group(team, path);
1745 
1746 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1747 		envCount, (mode_t)-1, errorPort, errorToken);
1748 	if (status != B_OK)
1749 		goto err1;
1750 
1751 	_flatArgs = NULL;
1752 		// args are owned by the team_arg structure now
1753 
1754 	// create a new io_context for this team
1755 	team->io_context = vfs_new_io_context(parentIOContext, true);
1756 	if (!team->io_context) {
1757 		status = B_NO_MEMORY;
1758 		goto err2;
1759 	}
1760 
1761 	// We don't need the parent's I/O context any longer.
1762 	vfs_put_io_context(parentIOContext);
1763 	parentIOContext = NULL;
1764 
1765 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1766 	vfs_exec_io_context(team->io_context);
1767 
1768 	// create an address space for this team
1769 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1770 		&team->address_space);
1771 	if (status != B_OK)
1772 		goto err2;
1773 
1774 	team->address_space->SetRandomizingEnabled(
1775 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1776 
1777 	// create the user data area
1778 	status = create_team_user_data(team);
1779 	if (status != B_OK)
1780 		goto err4;
1781 
1782 	// insert the team into its parent and the teams hash
1783 	parent->LockTeamAndProcessGroup();
1784 	team->Lock();
1785 
1786 	{
1787 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
1788 
1789 		sTeamHash.Insert(team);
1790 		teamLimitReached = sUsedTeams >= sMaxTeams;
1791 		if (!teamLimitReached)
1792 			sUsedTeams++;
1793 	}
1794 
1795 	insert_team_into_parent(parent, team);
1796 	insert_team_into_group(parent->group, team);
1797 
1798 	team->Unlock();
1799 	parent->UnlockTeamAndProcessGroup();
1800 
1801 	// notify team listeners
1802 	sNotificationService.Notify(TEAM_ADDED, team);
1803 
1804 	if (teamLimitReached) {
1805 		status = B_NO_MORE_TEAMS;
1806 		goto err6;
1807 	}
1808 
1809 	// In case we start the main thread, we shouldn't access the team object
1810 	// afterwards, so cache the team's ID.
1811 	teamID = team->id;
1812 
1813 	// Create a kernel thread, but under the context of the new team
1814 	// The new thread will take over ownership of teamArgs.
1815 	{
1816 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1817 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1818 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1819 			+ teamArgs->flat_args_size;
1820 		thread = thread_create_thread(threadAttributes, false);
1821 		if (thread < 0) {
1822 			status = thread;
1823 			goto err6;
1824 		}
1825 	}
1826 
1827 	// The team has been created successfully, so we keep the reference. Or
1828 	// more precisely: It's owned by the team's main thread, now.
1829 	teamReference.Detach();
1830 
1831 	// wait for the loader of the new team to finish its work
1832 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1833 		if (mainThread != NULL) {
1834 			// resume the team's main thread
1835 			thread_continue(mainThread);
1836 		}
1837 
1838 		// Now wait until loading is finished. We will be woken either by the
1839 		// thread, when it finished or aborted loading, or when the team is
1840 		// going to die (e.g. is killed). In either case the one notifying is
1841 		// responsible for unsetting `loading_info` in the team structure.
1842 		loadingWaitEntry.Wait();
1843 
1844 		if (loadingInfo.result < B_OK)
1845 			return loadingInfo.result;
1846 	}
1847 
1848 	// notify the debugger
1849 	user_debug_team_created(teamID);
1850 
1851 	return thread;
1852 
1853 err6:
1854 	// Remove the team structure from the process group, the parent team, and
1855 	// the team hash table and delete the team structure.
1856 	parent->LockTeamAndProcessGroup();
1857 	team->Lock();
1858 
1859 	remove_team_from_group(team);
1860 	remove_team_from_parent(team->parent, team);
1861 
1862 	team->Unlock();
1863 	parent->UnlockTeamAndProcessGroup();
1864 
1865 	{
1866 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
1867 		sTeamHash.Remove(team);
1868 		if (!teamLimitReached)
1869 			sUsedTeams--;
1870 	}
1871 
1872 	sNotificationService.Notify(TEAM_REMOVED, team);
1873 
1874 	delete_team_user_data(team);
1875 err4:
1876 	team->address_space->Put();
1877 err2:
1878 	free_team_arg(teamArgs);
1879 err1:
1880 	if (parentIOContext != NULL)
1881 		vfs_put_io_context(parentIOContext);
1882 
1883 	return status;
1884 }
1885 
1886 
1887 /*!	Almost shuts down the current team and loads a new image into it.
1888 	If successful, this function does not return and will takeover ownership of
1889 	the arguments provided.
1890 	This function may only be called in a userland team (caused by one of the
1891 	exec*() syscalls).
1892 */
1893 static status_t
1894 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1895 	int32 argCount, int32 envCount, mode_t umask)
1896 {
1897 	// NOTE: Since this function normally doesn't return, don't use automatic
1898 	// variables that need destruction in the function scope.
1899 	char** flatArgs = _flatArgs;
1900 	Team* team = thread_get_current_thread()->team;
1901 	struct team_arg* teamArgs;
1902 	const char* threadName;
1903 	thread_id nubThreadID = -1;
1904 
1905 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1906 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1907 		team->id));
1908 
1909 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1910 
1911 	// switching the kernel at run time is probably not a good idea :)
1912 	if (team == team_get_kernel_team())
1913 		return B_NOT_ALLOWED;
1914 
1915 	// we currently need to be single threaded here
1916 	// TODO: maybe we should just kill all other threads and
1917 	//	make the current thread the team's main thread?
1918 	Thread* currentThread = thread_get_current_thread();
1919 	if (currentThread != team->main_thread)
1920 		return B_NOT_ALLOWED;
1921 
1922 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1923 	// We iterate through the thread list to make sure that there's no other
1924 	// thread.
1925 	TeamLocker teamLocker(team);
1926 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1927 
1928 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1929 		nubThreadID = team->debug_info.nub_thread;
1930 
1931 	debugInfoLocker.Unlock();
1932 
1933 	for (Thread* thread = team->thread_list; thread != NULL;
1934 			thread = thread->team_next) {
1935 		if (thread != team->main_thread && thread->id != nubThreadID)
1936 			return B_NOT_ALLOWED;
1937 	}
1938 
1939 	team->DeleteUserTimers(true);
1940 	team->ResetSignalsOnExec();
1941 
1942 	teamLocker.Unlock();
1943 
1944 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1945 		argCount, envCount, umask, -1, 0);
1946 	if (status != B_OK)
1947 		return status;
1948 
1949 	_flatArgs = NULL;
1950 		// args are owned by the team_arg structure now
1951 
1952 	// TODO: remove team resources if there are any left
1953 	// thread_atkernel_exit() might not be called at all
1954 
1955 	thread_reset_for_exec();
1956 
1957 	user_debug_prepare_for_exec();
1958 
1959 	delete_team_user_data(team);
1960 	vm_delete_areas(team->address_space, false);
1961 	xsi_sem_undo(team);
1962 	delete_owned_ports(team);
1963 	sem_delete_owned_sems(team);
1964 	remove_images(team);
1965 	vfs_exec_io_context(team->io_context);
1966 	delete_realtime_sem_context(team->realtime_sem_context);
1967 	team->realtime_sem_context = NULL;
1968 
1969 	// update ASLR
1970 	team->address_space->SetRandomizingEnabled(
1971 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1972 
1973 	status = create_team_user_data(team);
1974 	if (status != B_OK) {
1975 		// creating the user data failed -- we're toast
1976 		free_team_arg(teamArgs);
1977 		exit_thread(status);
1978 		return status;
1979 	}
1980 
1981 	user_debug_finish_after_exec();
1982 
1983 	// rename the team
1984 
1985 	team->Lock();
1986 	team->SetName(path);
1987 	team->Unlock();
1988 
1989 	// cut the path from the team name and rename the main thread, too
1990 	threadName = strrchr(path, '/');
1991 	if (threadName != NULL)
1992 		threadName++;
1993 	else
1994 		threadName = path;
1995 	rename_thread(thread_get_current_thread_id(), threadName);
1996 
1997 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1998 
1999 	// Update user/group according to the executable's set-user/group-id
2000 	// permission.
2001 	update_set_id_user_and_group(team, path);
2002 
2003 	user_debug_team_exec();
2004 
2005 	// notify team listeners
2006 	sNotificationService.Notify(TEAM_EXEC, team);
2007 
2008 	// get a user thread for the thread
2009 	user_thread* userThread = team_allocate_user_thread(team);
2010 		// cannot fail (the allocation for the team would have failed already)
2011 	ThreadLocker currentThreadLocker(currentThread);
2012 	currentThread->user_thread = userThread;
2013 	currentThreadLocker.Unlock();
2014 
2015 	// create the user stack for the thread
2016 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2017 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2018 	if (status == B_OK) {
2019 		// prepare the stack, load the runtime loader, and enter userspace
2020 		team_create_thread_start(teamArgs);
2021 			// does never return
2022 	} else
2023 		free_team_arg(teamArgs);
2024 
2025 	// Sorry, we have to kill ourselves, there is no way out anymore
2026 	// (without any areas left and all that).
2027 	exit_thread(status);
2028 
2029 	// We return a status here since the signal that is sent by the
2030 	// call above is not immediately handled.
2031 	return B_ERROR;
2032 }
2033 
2034 
2035 static thread_id
2036 fork_team(void)
2037 {
2038 	Thread* parentThread = thread_get_current_thread();
2039 	Team* parentTeam = parentThread->team;
2040 	Team* team;
2041 	arch_fork_arg* forkArgs;
2042 	struct area_info info;
2043 	thread_id threadID;
2044 	status_t status;
2045 	ssize_t areaCookie;
2046 	bool teamLimitReached = false;
2047 
2048 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2049 
2050 	if (parentTeam == team_get_kernel_team())
2051 		return B_NOT_ALLOWED;
2052 
2053 	// create a new team
2054 	// TODO: this is very similar to load_image_internal() - maybe we can do
2055 	// something about it :)
2056 
2057 	// create the main thread object
2058 	Thread* thread;
2059 	status = Thread::Create(parentThread->name, thread);
2060 	if (status != B_OK)
2061 		return status;
2062 	BReference<Thread> threadReference(thread, true);
2063 
2064 	// create the team object
2065 	team = Team::Create(thread->id, NULL, false);
2066 	if (team == NULL)
2067 		return B_NO_MEMORY;
2068 
2069 	parentTeam->LockTeamAndProcessGroup();
2070 	team->Lock();
2071 
2072 	team->SetName(parentTeam->Name());
2073 	team->SetArgs(parentTeam->Args());
2074 
2075 	team->commpage_address = parentTeam->commpage_address;
2076 
2077 	// Inherit the parent's user/group.
2078 	inherit_parent_user_and_group(team, parentTeam);
2079 
2080 	// inherit signal handlers
2081 	team->InheritSignalActions(parentTeam);
2082 
2083 	team->Unlock();
2084 	parentTeam->UnlockTeamAndProcessGroup();
2085 
2086 	// inherit some team debug flags
2087 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2088 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2089 
2090 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2091 	if (forkArgs == NULL) {
2092 		status = B_NO_MEMORY;
2093 		goto err1;
2094 	}
2095 
2096 	// create a new io_context for this team
2097 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2098 	if (!team->io_context) {
2099 		status = B_NO_MEMORY;
2100 		goto err2;
2101 	}
2102 
2103 	// duplicate the realtime sem context
2104 	if (parentTeam->realtime_sem_context) {
2105 		team->realtime_sem_context = clone_realtime_sem_context(
2106 			parentTeam->realtime_sem_context);
2107 		if (team->realtime_sem_context == NULL) {
2108 			status = B_NO_MEMORY;
2109 			goto err2;
2110 		}
2111 	}
2112 
2113 	// create an address space for this team
2114 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2115 		&team->address_space);
2116 	if (status < B_OK)
2117 		goto err3;
2118 
2119 	// copy all areas of the team
2120 	// TODO: should be able to handle stack areas differently (ie. don't have
2121 	// them copy-on-write)
2122 
2123 	areaCookie = 0;
2124 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2125 		if (info.area == parentTeam->user_data_area) {
2126 			// don't clone the user area; just create a new one
2127 			status = create_team_user_data(team, info.address);
2128 			if (status != B_OK)
2129 				break;
2130 
2131 			thread->user_thread = team_allocate_user_thread(team);
2132 		} else {
2133 			void* address;
2134 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2135 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2136 			if (area < B_OK) {
2137 				status = area;
2138 				break;
2139 			}
2140 
2141 			if (info.area == parentThread->user_stack_area)
2142 				thread->user_stack_area = area;
2143 		}
2144 	}
2145 
2146 	if (status < B_OK)
2147 		goto err4;
2148 
2149 	if (thread->user_thread == NULL) {
2150 #if KDEBUG
2151 		panic("user data area not found, parent area is %" B_PRId32,
2152 			parentTeam->user_data_area);
2153 #endif
2154 		status = B_ERROR;
2155 		goto err4;
2156 	}
2157 
2158 	thread->user_stack_base = parentThread->user_stack_base;
2159 	thread->user_stack_size = parentThread->user_stack_size;
2160 	thread->user_local_storage = parentThread->user_local_storage;
2161 	thread->sig_block_mask = parentThread->sig_block_mask;
2162 	thread->signal_stack_base = parentThread->signal_stack_base;
2163 	thread->signal_stack_size = parentThread->signal_stack_size;
2164 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2165 
2166 	arch_store_fork_frame(forkArgs);
2167 
2168 	// copy image list
2169 	if (copy_images(parentTeam->id, team) != B_OK)
2170 		goto err5;
2171 
2172 	// insert the team into its parent and the teams hash
2173 	parentTeam->LockTeamAndProcessGroup();
2174 	team->Lock();
2175 
2176 	{
2177 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
2178 
2179 		sTeamHash.Insert(team);
2180 		teamLimitReached = sUsedTeams >= sMaxTeams;
2181 		if (!teamLimitReached)
2182 			sUsedTeams++;
2183 	}
2184 
2185 	insert_team_into_parent(parentTeam, team);
2186 	insert_team_into_group(parentTeam->group, team);
2187 
2188 	team->Unlock();
2189 	parentTeam->UnlockTeamAndProcessGroup();
2190 
2191 	// notify team listeners
2192 	sNotificationService.Notify(TEAM_ADDED, team);
2193 
2194 	if (teamLimitReached) {
2195 		status = B_NO_MORE_TEAMS;
2196 		goto err6;
2197 	}
2198 
2199 	// create the main thread
2200 	{
2201 		ThreadCreationAttributes threadCreationAttributes(NULL,
2202 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2203 		threadCreationAttributes.forkArgs = forkArgs;
2204 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2205 		threadID = thread_create_thread(threadCreationAttributes, false);
2206 		if (threadID < 0) {
2207 			status = threadID;
2208 			goto err6;
2209 		}
2210 	}
2211 
2212 	// notify the debugger
2213 	user_debug_team_created(team->id);
2214 
2215 	T(TeamForked(threadID));
2216 
2217 	resume_thread(threadID);
2218 	return threadID;
2219 
2220 err6:
2221 	// Remove the team structure from the process group, the parent team, and
2222 	// the team hash table and delete the team structure.
2223 	parentTeam->LockTeamAndProcessGroup();
2224 	team->Lock();
2225 
2226 	remove_team_from_group(team);
2227 	remove_team_from_parent(team->parent, team);
2228 
2229 	team->Unlock();
2230 	parentTeam->UnlockTeamAndProcessGroup();
2231 
2232 	{
2233 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
2234 		sTeamHash.Remove(team);
2235 		if (!teamLimitReached)
2236 			sUsedTeams--;
2237 	}
2238 
2239 	sNotificationService.Notify(TEAM_REMOVED, team);
2240 err5:
2241 	remove_images(team);
2242 err4:
2243 	team->address_space->RemoveAndPut();
2244 err3:
2245 	delete_realtime_sem_context(team->realtime_sem_context);
2246 err2:
2247 	free(forkArgs);
2248 err1:
2249 	team->ReleaseReference();
2250 
2251 	return status;
2252 }
2253 
2254 
2255 /*!	Returns if the specified team \a parent has any children belonging to the
2256 	process group with the specified ID \a groupID.
2257 	The caller must hold \a parent's lock.
2258 */
2259 static bool
2260 has_children_in_group(Team* parent, pid_t groupID)
2261 {
2262 	for (Team* child = parent->children; child != NULL;
2263 			child = child->siblings_next) {
2264 		TeamLocker childLocker(child);
2265 		if (child->group_id == groupID)
2266 			return true;
2267 	}
2268 
2269 	return false;
2270 }
2271 
2272 
2273 /*!	Returns the first job control entry from \a children, which matches \a id.
2274 	\a id can be:
2275 	- \code > 0 \endcode: Matching an entry with that team ID.
2276 	- \code == -1 \endcode: Matching any entry.
2277 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2278 	\c 0 is an invalid value for \a id.
2279 
2280 	The caller must hold the lock of the team that \a children belongs to.
2281 
2282 	\param children The job control entry list to check.
2283 	\param id The match criterion.
2284 	\return The first matching entry or \c NULL, if none matches.
2285 */
2286 static job_control_entry*
2287 get_job_control_entry(team_job_control_children& children, pid_t id)
2288 {
2289 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2290 		 job_control_entry* entry = it.Next();) {
2291 
2292 		if (id > 0) {
2293 			if (entry->thread == id)
2294 				return entry;
2295 		} else if (id == -1) {
2296 			return entry;
2297 		} else {
2298 			pid_t processGroup
2299 				= (entry->team ? entry->team->group_id : entry->group_id);
2300 			if (processGroup == -id)
2301 				return entry;
2302 		}
2303 	}
2304 
2305 	return NULL;
2306 }
2307 
2308 
2309 /*!	Returns the first job control entry from one of team's dead, continued, or
2310 	stopped children which matches \a id.
2311 	\a id can be:
2312 	- \code > 0 \endcode: Matching an entry with that team ID.
2313 	- \code == -1 \endcode: Matching any entry.
2314 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2315 	\c 0 is an invalid value for \a id.
2316 
2317 	The caller must hold \a team's lock.
2318 
2319 	\param team The team whose dead, stopped, and continued child lists shall be
2320 		checked.
2321 	\param id The match criterion.
2322 	\param flags Specifies which children shall be considered. Dead children
2323 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2324 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2325 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2326 		\c WCONTINUED.
2327 	\return The first matching entry or \c NULL, if none matches.
2328 */
2329 static job_control_entry*
2330 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2331 {
2332 	job_control_entry* entry = NULL;
2333 
2334 	if ((flags & WEXITED) != 0)
2335 		entry = get_job_control_entry(team->dead_children, id);
2336 
2337 	if (entry == NULL && (flags & WCONTINUED) != 0)
2338 		entry = get_job_control_entry(team->continued_children, id);
2339 
2340 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2341 		entry = get_job_control_entry(team->stopped_children, id);
2342 
2343 	return entry;
2344 }
2345 
2346 
2347 job_control_entry::job_control_entry()
2348 	:
2349 	has_group_ref(false)
2350 {
2351 }
2352 
2353 
2354 job_control_entry::~job_control_entry()
2355 {
2356 	if (has_group_ref) {
2357 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2358 
2359 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2360 		if (group == NULL) {
2361 			panic("job_control_entry::~job_control_entry(): unknown group "
2362 				"ID: %" B_PRId32, group_id);
2363 			return;
2364 		}
2365 
2366 		groupHashLocker.Unlock();
2367 
2368 		group->ReleaseReference();
2369 	}
2370 }
2371 
2372 
2373 /*!	Invoked when the owning team is dying, initializing the entry according to
2374 	the dead state.
2375 
2376 	The caller must hold the owning team's lock and the scheduler lock.
2377 */
2378 void
2379 job_control_entry::InitDeadState()
2380 {
2381 	if (team != NULL) {
2382 		ASSERT(team->exit.initialized);
2383 
2384 		group_id = team->group_id;
2385 		team->group->AcquireReference();
2386 		has_group_ref = true;
2387 
2388 		thread = team->id;
2389 		status = team->exit.status;
2390 		reason = team->exit.reason;
2391 		signal = team->exit.signal;
2392 		signaling_user = team->exit.signaling_user;
2393 		user_time = team->dead_threads_user_time
2394 			+ team->dead_children.user_time;
2395 		kernel_time = team->dead_threads_kernel_time
2396 			+ team->dead_children.kernel_time;
2397 
2398 		team = NULL;
2399 	}
2400 }
2401 
2402 
2403 job_control_entry&
2404 job_control_entry::operator=(const job_control_entry& other)
2405 {
2406 	state = other.state;
2407 	thread = other.thread;
2408 	signal = other.signal;
2409 	has_group_ref = false;
2410 	signaling_user = other.signaling_user;
2411 	team = other.team;
2412 	group_id = other.group_id;
2413 	status = other.status;
2414 	reason = other.reason;
2415 	user_time = other.user_time;
2416 	kernel_time = other.kernel_time;
2417 
2418 	return *this;
2419 }
2420 
2421 
2422 /*! This is the kernel backend for waitid().
2423 */
2424 static thread_id
2425 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2426 	team_usage_info& _usage_info)
2427 {
2428 	Thread* thread = thread_get_current_thread();
2429 	Team* team = thread->team;
2430 	struct job_control_entry foundEntry;
2431 	struct job_control_entry* freeDeathEntry = NULL;
2432 	status_t status = B_OK;
2433 
2434 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2435 		child, flags));
2436 
2437 	T(WaitForChild(child, flags));
2438 
2439 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2440 		T(WaitForChildDone(B_BAD_VALUE));
2441 		return B_BAD_VALUE;
2442 	}
2443 
2444 	pid_t originalChild = child;
2445 
2446 	bool ignoreFoundEntries = false;
2447 	bool ignoreFoundEntriesChecked = false;
2448 
2449 	while (true) {
2450 		// lock the team
2451 		TeamLocker teamLocker(team);
2452 
2453 		// A 0 child argument means to wait for all children in the process
2454 		// group of the calling team.
2455 		child = originalChild == 0 ? -team->group_id : originalChild;
2456 
2457 		// check whether any condition holds
2458 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2459 
2460 		// If we don't have an entry yet, check whether there are any children
2461 		// complying to the process group specification at all.
2462 		if (entry == NULL) {
2463 			// No success yet -- check whether there are any children complying
2464 			// to the process group specification at all.
2465 			bool childrenExist = false;
2466 			if (child == -1) {
2467 				childrenExist = team->children != NULL;
2468 			} else if (child < -1) {
2469 				childrenExist = has_children_in_group(team, -child);
2470 			} else if (child != team->id) {
2471 				if (Team* childTeam = Team::Get(child)) {
2472 					BReference<Team> childTeamReference(childTeam, true);
2473 					TeamLocker childTeamLocker(childTeam);
2474 					childrenExist = childTeam->parent == team;
2475 				}
2476 			}
2477 
2478 			if (!childrenExist) {
2479 				// there is no child we could wait for
2480 				status = ECHILD;
2481 			} else {
2482 				// the children we're waiting for are still running
2483 				status = B_WOULD_BLOCK;
2484 			}
2485 		} else {
2486 			// got something
2487 			foundEntry = *entry;
2488 
2489 			// unless WNOWAIT has been specified, "consume" the wait state
2490 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2491 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2492 					// The child is dead. Reap its death entry.
2493 					freeDeathEntry = entry;
2494 					team->dead_children.entries.Remove(entry);
2495 					team->dead_children.count--;
2496 				} else {
2497 					// The child is well. Reset its job control state.
2498 					team_set_job_control_state(entry->team,
2499 						JOB_CONTROL_STATE_NONE, NULL);
2500 				}
2501 			}
2502 		}
2503 
2504 		// If we haven't got anything yet, prepare for waiting for the
2505 		// condition variable.
2506 		ConditionVariableEntry deadWaitEntry;
2507 
2508 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2509 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2510 
2511 		teamLocker.Unlock();
2512 
2513 		// we got our entry and can return to our caller
2514 		if (status == B_OK) {
2515 			if (ignoreFoundEntries) {
2516 				// ... unless we shall ignore found entries
2517 				delete freeDeathEntry;
2518 				freeDeathEntry = NULL;
2519 				continue;
2520 			}
2521 
2522 			break;
2523 		}
2524 
2525 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2526 			T(WaitForChildDone(status));
2527 			return status;
2528 		}
2529 
2530 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2531 		if (status == B_INTERRUPTED) {
2532 			T(WaitForChildDone(status));
2533 			return status;
2534 		}
2535 
2536 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2537 		// all our children are dead and fail with ECHILD. We check the
2538 		// condition at this point.
2539 		if (!ignoreFoundEntriesChecked) {
2540 			teamLocker.Lock();
2541 
2542 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2543 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2544 				|| handler.sa_handler == SIG_IGN) {
2545 				ignoreFoundEntries = true;
2546 			}
2547 
2548 			teamLocker.Unlock();
2549 
2550 			ignoreFoundEntriesChecked = true;
2551 		}
2552 	}
2553 
2554 	delete freeDeathEntry;
2555 
2556 	// When we got here, we have a valid death entry, and already got
2557 	// unregistered from the team or group. Fill in the returned info.
2558 	memset(&_info, 0, sizeof(_info));
2559 	_info.si_signo = SIGCHLD;
2560 	_info.si_pid = foundEntry.thread;
2561 	_info.si_uid = foundEntry.signaling_user;
2562 	// TODO: Fill in si_errno?
2563 
2564 	switch (foundEntry.state) {
2565 		case JOB_CONTROL_STATE_DEAD:
2566 			_info.si_code = foundEntry.reason;
2567 			_info.si_status = foundEntry.reason == CLD_EXITED
2568 				? foundEntry.status : foundEntry.signal;
2569 			_usage_info.user_time = foundEntry.user_time;
2570 			_usage_info.kernel_time = foundEntry.kernel_time;
2571 			break;
2572 		case JOB_CONTROL_STATE_STOPPED:
2573 			_info.si_code = CLD_STOPPED;
2574 			_info.si_status = foundEntry.signal;
2575 			break;
2576 		case JOB_CONTROL_STATE_CONTINUED:
2577 			_info.si_code = CLD_CONTINUED;
2578 			_info.si_status = 0;
2579 			break;
2580 		case JOB_CONTROL_STATE_NONE:
2581 			// can't happen
2582 			break;
2583 	}
2584 
2585 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2586 	// status is available.
2587 	TeamLocker teamLocker(team);
2588 	InterruptsSpinLocker signalLocker(team->signal_lock);
2589 	SpinLocker threadCreationLocker(gThreadCreationLock);
2590 
2591 	if (is_team_signal_blocked(team, SIGCHLD)) {
2592 		if (get_job_control_entry(team, child, flags) == NULL)
2593 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2594 	}
2595 
2596 	threadCreationLocker.Unlock();
2597 	signalLocker.Unlock();
2598 	teamLocker.Unlock();
2599 
2600 	// When the team is dead, the main thread continues to live in the kernel
2601 	// team for a very short time. To avoid surprises for the caller we rather
2602 	// wait until the thread is really gone.
2603 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2604 		wait_for_thread(foundEntry.thread, NULL);
2605 
2606 	T(WaitForChildDone(foundEntry));
2607 
2608 	return foundEntry.thread;
2609 }
2610 
2611 
2612 /*! Fills the team_info structure with information from the specified team.
2613 	Interrupts must be enabled. The team must not be locked.
2614 */
2615 static status_t
2616 fill_team_info(Team* team, team_info* info, size_t size)
2617 {
2618 	if (size != sizeof(team_info))
2619 		return B_BAD_VALUE;
2620 
2621 	// TODO: Set more informations for team_info
2622 	memset(info, 0, size);
2623 
2624 	info->team = team->id;
2625 		// immutable
2626 	info->image_count = count_images(team);
2627 		// protected by sImageMutex
2628 
2629 	TeamLocker teamLocker(team);
2630 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2631 
2632 	info->thread_count = team->num_threads;
2633 	//info->area_count =
2634 	info->debugger_nub_thread = team->debug_info.nub_thread;
2635 	info->debugger_nub_port = team->debug_info.nub_port;
2636 	info->uid = team->effective_uid;
2637 	info->gid = team->effective_gid;
2638 
2639 	strlcpy(info->args, team->Args(), sizeof(info->args));
2640 	info->argc = 1;
2641 
2642 	return B_OK;
2643 }
2644 
2645 
2646 /*!	Returns whether the process group contains stopped processes.
2647 	The caller must hold the process group's lock.
2648 */
2649 static bool
2650 process_group_has_stopped_processes(ProcessGroup* group)
2651 {
2652 	Team* team = group->teams;
2653 	while (team != NULL) {
2654 		// the parent team's lock guards the job control entry -- acquire it
2655 		team->LockTeamAndParent(false);
2656 
2657 		if (team->job_control_entry != NULL
2658 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2659 			team->UnlockTeamAndParent();
2660 			return true;
2661 		}
2662 
2663 		team->UnlockTeamAndParent();
2664 
2665 		team = team->group_next;
2666 	}
2667 
2668 	return false;
2669 }
2670 
2671 
2672 /*!	Iterates through all process groups queued in team_remove_team() and signals
2673 	those that are orphaned and have stopped processes.
2674 	The caller must not hold any team or process group locks.
2675 */
2676 static void
2677 orphaned_process_group_check()
2678 {
2679 	// process as long as there are groups in the list
2680 	while (true) {
2681 		// remove the head from the list
2682 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2683 
2684 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2685 		if (group == NULL)
2686 			return;
2687 
2688 		group->UnsetOrphanedCheck();
2689 		BReference<ProcessGroup> groupReference(group);
2690 
2691 		orphanedCheckLocker.Unlock();
2692 
2693 		AutoLocker<ProcessGroup> groupLocker(group);
2694 
2695 		// If the group is orphaned and contains stopped processes, we're
2696 		// supposed to send SIGHUP + SIGCONT.
2697 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2698 			Thread* currentThread = thread_get_current_thread();
2699 
2700 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2701 			send_signal_to_process_group_locked(group, signal, 0);
2702 
2703 			signal.SetNumber(SIGCONT);
2704 			send_signal_to_process_group_locked(group, signal, 0);
2705 		}
2706 	}
2707 }
2708 
2709 
2710 static status_t
2711 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2712 	uint32 flags)
2713 {
2714 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2715 		return B_BAD_VALUE;
2716 
2717 	// get the team
2718 	Team* team = Team::GetAndLock(id);
2719 	if (team == NULL)
2720 		return B_BAD_TEAM_ID;
2721 	BReference<Team> teamReference(team, true);
2722 	TeamLocker teamLocker(team, true);
2723 
2724 	if ((flags & B_CHECK_PERMISSION) != 0) {
2725 		uid_t uid = geteuid();
2726 		if (uid != 0 && uid != team->effective_uid)
2727 			return B_NOT_ALLOWED;
2728 	}
2729 
2730 	bigtime_t kernelTime = 0;
2731 	bigtime_t userTime = 0;
2732 
2733 	switch (who) {
2734 		case B_TEAM_USAGE_SELF:
2735 		{
2736 			Thread* thread = team->thread_list;
2737 
2738 			for (; thread != NULL; thread = thread->team_next) {
2739 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2740 				kernelTime += thread->kernel_time;
2741 				userTime += thread->user_time;
2742 			}
2743 
2744 			kernelTime += team->dead_threads_kernel_time;
2745 			userTime += team->dead_threads_user_time;
2746 			break;
2747 		}
2748 
2749 		case B_TEAM_USAGE_CHILDREN:
2750 		{
2751 			Team* child = team->children;
2752 			for (; child != NULL; child = child->siblings_next) {
2753 				TeamLocker childLocker(child);
2754 
2755 				Thread* thread = team->thread_list;
2756 
2757 				for (; thread != NULL; thread = thread->team_next) {
2758 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2759 					kernelTime += thread->kernel_time;
2760 					userTime += thread->user_time;
2761 				}
2762 
2763 				kernelTime += child->dead_threads_kernel_time;
2764 				userTime += child->dead_threads_user_time;
2765 			}
2766 
2767 			kernelTime += team->dead_children.kernel_time;
2768 			userTime += team->dead_children.user_time;
2769 			break;
2770 		}
2771 	}
2772 
2773 	info->kernel_time = kernelTime;
2774 	info->user_time = userTime;
2775 
2776 	return B_OK;
2777 }
2778 
2779 
2780 //	#pragma mark - Private kernel API
2781 
2782 
2783 status_t
2784 team_init(kernel_args* args)
2785 {
2786 	// create the team hash table
2787 	new(&sTeamHash) TeamTable;
2788 	if (sTeamHash.Init(64) != B_OK)
2789 		panic("Failed to init team hash table!");
2790 
2791 	new(&sGroupHash) ProcessGroupHashTable;
2792 	if (sGroupHash.Init() != B_OK)
2793 		panic("Failed to init process group hash table!");
2794 
2795 	// create initial session and process groups
2796 
2797 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2798 	if (session == NULL)
2799 		panic("Could not create initial session.\n");
2800 	BReference<ProcessSession> sessionReference(session, true);
2801 
2802 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2803 	if (group == NULL)
2804 		panic("Could not create initial process group.\n");
2805 	BReference<ProcessGroup> groupReference(group, true);
2806 
2807 	group->Publish(session);
2808 
2809 	// create the kernel team
2810 	sKernelTeam = Team::Create(1, "kernel_team", true);
2811 	if (sKernelTeam == NULL)
2812 		panic("could not create kernel team!\n");
2813 	sKernelTeam->SetArgs(sKernelTeam->Name());
2814 	sKernelTeam->state = TEAM_STATE_NORMAL;
2815 
2816 	sKernelTeam->saved_set_uid = 0;
2817 	sKernelTeam->real_uid = 0;
2818 	sKernelTeam->effective_uid = 0;
2819 	sKernelTeam->saved_set_gid = 0;
2820 	sKernelTeam->real_gid = 0;
2821 	sKernelTeam->effective_gid = 0;
2822 	sKernelTeam->supplementary_groups = NULL;
2823 	sKernelTeam->supplementary_group_count = 0;
2824 
2825 	insert_team_into_group(group, sKernelTeam);
2826 
2827 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2828 	if (sKernelTeam->io_context == NULL)
2829 		panic("could not create io_context for kernel team!\n");
2830 
2831 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2832 		dprintf("Failed to resize FD table for kernel team!\n");
2833 
2834 	// stick it in the team hash
2835 	sTeamHash.Insert(sKernelTeam);
2836 
2837 	add_debugger_command_etc("team", &dump_team_info,
2838 		"Dump info about a particular team",
2839 		"[ <id> | <address> | <name> ]\n"
2840 		"Prints information about the specified team. If no argument is given\n"
2841 		"the current team is selected.\n"
2842 		"  <id>       - The ID of the team.\n"
2843 		"  <address>  - The address of the team structure.\n"
2844 		"  <name>     - The team's name.\n", 0);
2845 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2846 		"\n"
2847 		"Prints a list of all existing teams.\n", 0);
2848 
2849 	new(&sNotificationService) TeamNotificationService();
2850 
2851 	sNotificationService.Register();
2852 
2853 	return B_OK;
2854 }
2855 
2856 
2857 int32
2858 team_max_teams(void)
2859 {
2860 	return sMaxTeams;
2861 }
2862 
2863 
2864 int32
2865 team_used_teams(void)
2866 {
2867 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2868 	return sUsedTeams;
2869 }
2870 
2871 
2872 /*! Returns a death entry of a child team specified by ID (if any).
2873 	The caller must hold the team's lock.
2874 
2875 	\param team The team whose dead children list to check.
2876 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2877 	\param _deleteEntry Return variable, indicating whether the caller needs to
2878 		delete the returned entry.
2879 	\return The death entry of the matching team, or \c NULL, if no death entry
2880 		for the team was found.
2881 */
2882 job_control_entry*
2883 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2884 {
2885 	if (child <= 0)
2886 		return NULL;
2887 
2888 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2889 		child);
2890 	if (entry) {
2891 		// remove the entry only, if the caller is the parent of the found team
2892 		if (team_get_current_team_id() == entry->thread) {
2893 			team->dead_children.entries.Remove(entry);
2894 			team->dead_children.count--;
2895 			*_deleteEntry = true;
2896 		} else {
2897 			*_deleteEntry = false;
2898 		}
2899 	}
2900 
2901 	return entry;
2902 }
2903 
2904 
2905 /*! Quick check to see if we have a valid team ID. */
2906 bool
2907 team_is_valid(team_id id)
2908 {
2909 	if (id <= 0)
2910 		return false;
2911 
2912 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2913 
2914 	return team_get_team_struct_locked(id) != NULL;
2915 }
2916 
2917 
2918 Team*
2919 team_get_team_struct_locked(team_id id)
2920 {
2921 	return sTeamHash.Lookup(id);
2922 }
2923 
2924 
2925 void
2926 team_set_controlling_tty(int32 ttyIndex)
2927 {
2928 	// lock the team, so its session won't change while we're playing with it
2929 	Team* team = thread_get_current_thread()->team;
2930 	TeamLocker teamLocker(team);
2931 
2932 	// get and lock the session
2933 	ProcessSession* session = team->group->Session();
2934 	AutoLocker<ProcessSession> sessionLocker(session);
2935 
2936 	// set the session's fields
2937 	session->controlling_tty = ttyIndex;
2938 	session->foreground_group = -1;
2939 }
2940 
2941 
2942 int32
2943 team_get_controlling_tty()
2944 {
2945 	// lock the team, so its session won't change while we're playing with it
2946 	Team* team = thread_get_current_thread()->team;
2947 	TeamLocker teamLocker(team);
2948 
2949 	// get and lock the session
2950 	ProcessSession* session = team->group->Session();
2951 	AutoLocker<ProcessSession> sessionLocker(session);
2952 
2953 	// get the session's field
2954 	return session->controlling_tty;
2955 }
2956 
2957 
2958 status_t
2959 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2960 {
2961 	// lock the team, so its session won't change while we're playing with it
2962 	Thread* thread = thread_get_current_thread();
2963 	Team* team = thread->team;
2964 	TeamLocker teamLocker(team);
2965 
2966 	// get and lock the session
2967 	ProcessSession* session = team->group->Session();
2968 	AutoLocker<ProcessSession> sessionLocker(session);
2969 
2970 	// check given TTY -- must be the controlling tty of the calling process
2971 	if (session->controlling_tty != ttyIndex)
2972 		return ENOTTY;
2973 
2974 	// check given process group -- must belong to our session
2975 	{
2976 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2977 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2978 		if (group == NULL || group->Session() != session)
2979 			return B_BAD_VALUE;
2980 	}
2981 
2982 	// If we are a background group, we can do that unharmed only when we
2983 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2984 	if (session->foreground_group != -1
2985 		&& session->foreground_group != team->group_id
2986 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
2987 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
2988 		InterruptsSpinLocker signalLocker(team->signal_lock);
2989 
2990 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2991 			pid_t groupID = team->group_id;
2992 
2993 			signalLocker.Unlock();
2994 			sessionLocker.Unlock();
2995 			teamLocker.Unlock();
2996 
2997 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2998 			send_signal_to_process_group(groupID, signal, 0);
2999 			return B_INTERRUPTED;
3000 		}
3001 	}
3002 
3003 	session->foreground_group = processGroupID;
3004 
3005 	return B_OK;
3006 }
3007 
3008 
3009 /*!	Removes the specified team from the global team hash, from its process
3010 	group, and from its parent.
3011 	It also moves all of its children to the kernel team.
3012 
3013 	The caller must hold the following locks:
3014 	- \a team's process group's lock,
3015 	- the kernel team's lock,
3016 	- \a team's parent team's lock (might be the kernel team), and
3017 	- \a team's lock.
3018 */
3019 void
3020 team_remove_team(Team* team, pid_t& _signalGroup)
3021 {
3022 	Team* parent = team->parent;
3023 
3024 	// remember how long this team lasted
3025 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3026 		+ team->dead_children.kernel_time;
3027 	parent->dead_children.user_time += team->dead_threads_user_time
3028 		+ team->dead_children.user_time;
3029 
3030 	// remove the team from the hash table
3031 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3032 	sTeamHash.Remove(team);
3033 	sUsedTeams--;
3034 	teamsLocker.Unlock();
3035 
3036 	// The team can no longer be accessed by ID. Navigation to it is still
3037 	// possible from its process group and its parent and children, but that
3038 	// will be rectified shortly.
3039 	team->state = TEAM_STATE_DEATH;
3040 
3041 	// If we're a controlling process (i.e. a session leader with controlling
3042 	// terminal), there's a bit of signalling we have to do. We can't do any of
3043 	// the signaling here due to the bunch of locks we're holding, but we need
3044 	// to determine, whom to signal.
3045 	_signalGroup = -1;
3046 	bool isSessionLeader = false;
3047 	if (team->session_id == team->id
3048 		&& team->group->Session()->controlling_tty >= 0) {
3049 		isSessionLeader = true;
3050 
3051 		ProcessSession* session = team->group->Session();
3052 
3053 		AutoLocker<ProcessSession> sessionLocker(session);
3054 
3055 		session->controlling_tty = -1;
3056 		_signalGroup = session->foreground_group;
3057 	}
3058 
3059 	// remove us from our process group
3060 	remove_team_from_group(team);
3061 
3062 	// move the team's children to the kernel team
3063 	while (Team* child = team->children) {
3064 		// remove the child from the current team and add it to the kernel team
3065 		TeamLocker childLocker(child);
3066 
3067 		remove_team_from_parent(team, child);
3068 		insert_team_into_parent(sKernelTeam, child);
3069 
3070 		// move job control entries too
3071 		sKernelTeam->stopped_children.entries.MoveFrom(
3072 			&team->stopped_children.entries);
3073 		sKernelTeam->continued_children.entries.MoveFrom(
3074 			&team->continued_children.entries);
3075 
3076 		// If the team was a session leader with controlling terminal,
3077 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3078 		// groups with stopped processes. Due to locking complications we can't
3079 		// do that here, so we only check whether we were a reason for the
3080 		// child's process group not being an orphan and, if so, schedule a
3081 		// later check (cf. orphaned_process_group_check()).
3082 		if (isSessionLeader) {
3083 			ProcessGroup* childGroup = child->group;
3084 			if (childGroup->Session()->id == team->session_id
3085 				&& childGroup->id != team->group_id) {
3086 				childGroup->ScheduleOrphanedCheck();
3087 			}
3088 		}
3089 
3090 		// Note, we don't move the dead children entries. Those will be deleted
3091 		// when the team structure is deleted.
3092 	}
3093 
3094 	// remove us from our parent
3095 	remove_team_from_parent(parent, team);
3096 }
3097 
3098 
3099 /*!	Kills all threads but the main thread of the team and shuts down user
3100 	debugging for it.
3101 	To be called on exit of the team's main thread. No locks must be held.
3102 
3103 	\param team The team in question.
3104 	\return The port of the debugger for the team, -1 if none. To be passed to
3105 		team_delete_team().
3106 */
3107 port_id
3108 team_shutdown_team(Team* team)
3109 {
3110 	ASSERT(thread_get_current_thread() == team->main_thread);
3111 
3112 	TeamLocker teamLocker(team);
3113 
3114 	// Make sure debugging changes won't happen anymore.
3115 	port_id debuggerPort = -1;
3116 	while (true) {
3117 		// If a debugger change is in progress for the team, we'll have to
3118 		// wait until it is done.
3119 		ConditionVariableEntry waitForDebuggerEntry;
3120 		bool waitForDebugger = false;
3121 
3122 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3123 
3124 		if (team->debug_info.debugger_changed_condition != NULL) {
3125 			team->debug_info.debugger_changed_condition->Add(
3126 				&waitForDebuggerEntry);
3127 			waitForDebugger = true;
3128 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3129 			// The team is being debugged. That will stop with the termination
3130 			// of the nub thread. Since we set the team state to death, no one
3131 			// can install a debugger anymore. We fetch the debugger's port to
3132 			// send it a message at the bitter end.
3133 			debuggerPort = team->debug_info.debugger_port;
3134 		}
3135 
3136 		debugInfoLocker.Unlock();
3137 
3138 		if (!waitForDebugger)
3139 			break;
3140 
3141 		// wait for the debugger change to be finished
3142 		teamLocker.Unlock();
3143 
3144 		waitForDebuggerEntry.Wait();
3145 
3146 		teamLocker.Lock();
3147 	}
3148 
3149 	// Mark the team as shutting down. That will prevent new threads from being
3150 	// created and debugger changes from taking place.
3151 	team->state = TEAM_STATE_SHUTDOWN;
3152 
3153 	// delete all timers
3154 	team->DeleteUserTimers(false);
3155 
3156 	// deactivate CPU time user timers for the team
3157 	InterruptsSpinLocker timeLocker(team->time_lock);
3158 
3159 	if (team->HasActiveCPUTimeUserTimers())
3160 		team->DeactivateCPUTimeUserTimers();
3161 
3162 	timeLocker.Unlock();
3163 
3164 	// kill all threads but the main thread
3165 	team_death_entry deathEntry;
3166 	deathEntry.condition.Init(team, "team death");
3167 
3168 	while (true) {
3169 		team->death_entry = &deathEntry;
3170 		deathEntry.remaining_threads = 0;
3171 
3172 		Thread* thread = team->thread_list;
3173 		while (thread != NULL) {
3174 			if (thread != team->main_thread) {
3175 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3176 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3177 				deathEntry.remaining_threads++;
3178 			}
3179 
3180 			thread = thread->team_next;
3181 		}
3182 
3183 		if (deathEntry.remaining_threads == 0)
3184 			break;
3185 
3186 		// there are threads to wait for
3187 		ConditionVariableEntry entry;
3188 		deathEntry.condition.Add(&entry);
3189 
3190 		teamLocker.Unlock();
3191 
3192 		entry.Wait();
3193 
3194 		teamLocker.Lock();
3195 	}
3196 
3197 	team->death_entry = NULL;
3198 
3199 	return debuggerPort;
3200 }
3201 
3202 
3203 /*!	Called on team exit to notify threads waiting on the team and free most
3204 	resources associated with it.
3205 	The caller shouldn't hold any locks.
3206 */
3207 void
3208 team_delete_team(Team* team, port_id debuggerPort)
3209 {
3210 	// Not quite in our job description, but work that has been left by
3211 	// team_remove_team() and that can be done now that we're not holding any
3212 	// locks.
3213 	orphaned_process_group_check();
3214 
3215 	team_id teamID = team->id;
3216 
3217 	ASSERT(team->num_threads == 0);
3218 
3219 	// If someone is waiting for this team to be loaded, but it dies
3220 	// unexpectedly before being done, we need to notify the waiting
3221 	// thread now.
3222 
3223 	TeamLocker teamLocker(team);
3224 
3225 	if (team->loading_info) {
3226 		// there's indeed someone waiting
3227 		struct team_loading_info* loadingInfo = team->loading_info;
3228 		team->loading_info = NULL;
3229 
3230 		loadingInfo->result = B_ERROR;
3231 
3232 		// wake up the waiting thread
3233 		loadingInfo->condition.NotifyAll();
3234 	}
3235 
3236 	// notify team watchers
3237 
3238 	{
3239 		// we're not reachable from anyone anymore at this point, so we
3240 		// can safely access the list without any locking
3241 		struct team_watcher* watcher;
3242 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3243 				&team->watcher_list)) != NULL) {
3244 			watcher->hook(teamID, watcher->data);
3245 			free(watcher);
3246 		}
3247 	}
3248 
3249 	teamLocker.Unlock();
3250 
3251 	sNotificationService.Notify(TEAM_REMOVED, team);
3252 
3253 	// free team resources
3254 
3255 	delete_realtime_sem_context(team->realtime_sem_context);
3256 	xsi_sem_undo(team);
3257 	remove_images(team);
3258 	team->address_space->RemoveAndPut();
3259 
3260 	team->ReleaseReference();
3261 
3262 	// notify the debugger, that the team is gone
3263 	user_debug_team_deleted(teamID, debuggerPort);
3264 }
3265 
3266 
3267 Team*
3268 team_get_kernel_team(void)
3269 {
3270 	return sKernelTeam;
3271 }
3272 
3273 
3274 team_id
3275 team_get_kernel_team_id(void)
3276 {
3277 	if (!sKernelTeam)
3278 		return 0;
3279 
3280 	return sKernelTeam->id;
3281 }
3282 
3283 
3284 team_id
3285 team_get_current_team_id(void)
3286 {
3287 	return thread_get_current_thread()->team->id;
3288 }
3289 
3290 
3291 status_t
3292 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3293 {
3294 	if (id == sKernelTeam->id) {
3295 		// we're the kernel team, so we don't have to go through all
3296 		// the hassle (locking and hash lookup)
3297 		*_addressSpace = VMAddressSpace::GetKernel();
3298 		return B_OK;
3299 	}
3300 
3301 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3302 
3303 	Team* team = team_get_team_struct_locked(id);
3304 	if (team == NULL)
3305 		return B_BAD_VALUE;
3306 
3307 	team->address_space->Get();
3308 	*_addressSpace = team->address_space;
3309 	return B_OK;
3310 }
3311 
3312 
3313 /*!	Sets the team's job control state.
3314 	The caller must hold the parent team's lock. Interrupts are allowed to be
3315 	enabled or disabled.
3316 	\a team The team whose job control state shall be set.
3317 	\a newState The new state to be set.
3318 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3319 		the caller is responsible for filling in the following fields of the
3320 		entry before releasing the parent team's lock, unless the new state is
3321 		\c JOB_CONTROL_STATE_NONE:
3322 		- \c signal: The number of the signal causing the state change.
3323 		- \c signaling_user: The real UID of the user sending the signal.
3324 */
3325 void
3326 team_set_job_control_state(Team* team, job_control_state newState,
3327 	Signal* signal)
3328 {
3329 	if (team == NULL || team->job_control_entry == NULL)
3330 		return;
3331 
3332 	// don't touch anything, if the state stays the same or the team is already
3333 	// dead
3334 	job_control_entry* entry = team->job_control_entry;
3335 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3336 		return;
3337 
3338 	T(SetJobControlState(team->id, newState, signal));
3339 
3340 	// remove from the old list
3341 	switch (entry->state) {
3342 		case JOB_CONTROL_STATE_NONE:
3343 			// entry is in no list ATM
3344 			break;
3345 		case JOB_CONTROL_STATE_DEAD:
3346 			// can't get here
3347 			break;
3348 		case JOB_CONTROL_STATE_STOPPED:
3349 			team->parent->stopped_children.entries.Remove(entry);
3350 			break;
3351 		case JOB_CONTROL_STATE_CONTINUED:
3352 			team->parent->continued_children.entries.Remove(entry);
3353 			break;
3354 	}
3355 
3356 	entry->state = newState;
3357 
3358 	if (signal != NULL) {
3359 		entry->signal = signal->Number();
3360 		entry->signaling_user = signal->SendingUser();
3361 	}
3362 
3363 	// add to new list
3364 	team_job_control_children* childList = NULL;
3365 	switch (entry->state) {
3366 		case JOB_CONTROL_STATE_NONE:
3367 			// entry doesn't get into any list
3368 			break;
3369 		case JOB_CONTROL_STATE_DEAD:
3370 			childList = &team->parent->dead_children;
3371 			team->parent->dead_children.count++;
3372 			break;
3373 		case JOB_CONTROL_STATE_STOPPED:
3374 			childList = &team->parent->stopped_children;
3375 			break;
3376 		case JOB_CONTROL_STATE_CONTINUED:
3377 			childList = &team->parent->continued_children;
3378 			break;
3379 	}
3380 
3381 	if (childList != NULL) {
3382 		childList->entries.Add(entry);
3383 		team->parent->dead_children.condition_variable.NotifyAll();
3384 	}
3385 }
3386 
3387 
3388 /*!	Inits the given team's exit information, if not yet initialized, to some
3389 	generic "killed" status.
3390 	The caller must not hold the team's lock. Interrupts must be enabled.
3391 
3392 	\param team The team whose exit info shall be initialized.
3393 */
3394 void
3395 team_init_exit_info_on_error(Team* team)
3396 {
3397 	TeamLocker teamLocker(team);
3398 
3399 	if (!team->exit.initialized) {
3400 		team->exit.reason = CLD_KILLED;
3401 		team->exit.signal = SIGKILL;
3402 		team->exit.signaling_user = geteuid();
3403 		team->exit.status = 0;
3404 		team->exit.initialized = true;
3405 	}
3406 }
3407 
3408 
3409 /*! Adds a hook to the team that is called as soon as this team goes away.
3410 	This call might get public in the future.
3411 */
3412 status_t
3413 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3414 {
3415 	if (hook == NULL || teamID < B_OK)
3416 		return B_BAD_VALUE;
3417 
3418 	// create the watcher object
3419 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3420 	if (watcher == NULL)
3421 		return B_NO_MEMORY;
3422 
3423 	watcher->hook = hook;
3424 	watcher->data = data;
3425 
3426 	// add watcher, if the team isn't already dying
3427 	// get the team
3428 	Team* team = Team::GetAndLock(teamID);
3429 	if (team == NULL) {
3430 		free(watcher);
3431 		return B_BAD_TEAM_ID;
3432 	}
3433 
3434 	list_add_item(&team->watcher_list, watcher);
3435 
3436 	team->UnlockAndReleaseReference();
3437 
3438 	return B_OK;
3439 }
3440 
3441 
3442 status_t
3443 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3444 {
3445 	if (hook == NULL || teamID < 0)
3446 		return B_BAD_VALUE;
3447 
3448 	// get team and remove watcher (if present)
3449 	Team* team = Team::GetAndLock(teamID);
3450 	if (team == NULL)
3451 		return B_BAD_TEAM_ID;
3452 
3453 	// search for watcher
3454 	team_watcher* watcher = NULL;
3455 	while ((watcher = (team_watcher*)list_get_next_item(
3456 			&team->watcher_list, watcher)) != NULL) {
3457 		if (watcher->hook == hook && watcher->data == data) {
3458 			// got it!
3459 			list_remove_item(&team->watcher_list, watcher);
3460 			break;
3461 		}
3462 	}
3463 
3464 	team->UnlockAndReleaseReference();
3465 
3466 	if (watcher == NULL)
3467 		return B_ENTRY_NOT_FOUND;
3468 
3469 	free(watcher);
3470 	return B_OK;
3471 }
3472 
3473 
3474 /*!	Allocates a user_thread structure from the team.
3475 	The team lock must be held, unless the function is called for the team's
3476 	main thread. Interrupts must be enabled.
3477 */
3478 struct user_thread*
3479 team_allocate_user_thread(Team* team)
3480 {
3481 	if (team->user_data == 0)
3482 		return NULL;
3483 
3484 	// take an entry from the free list, if any
3485 	if (struct free_user_thread* entry = team->free_user_threads) {
3486 		user_thread* thread = entry->thread;
3487 		team->free_user_threads = entry->next;
3488 		free(entry);
3489 		return thread;
3490 	}
3491 
3492 	while (true) {
3493 		// enough space left?
3494 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3495 		if (team->user_data_size - team->used_user_data < needed) {
3496 			// try to resize the area
3497 			if (resize_area(team->user_data_area,
3498 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3499 				return NULL;
3500 			}
3501 
3502 			// resized user area successfully -- try to allocate the user_thread
3503 			// again
3504 			team->user_data_size += B_PAGE_SIZE;
3505 			continue;
3506 		}
3507 
3508 		// allocate the user_thread
3509 		user_thread* thread
3510 			= (user_thread*)(team->user_data + team->used_user_data);
3511 		team->used_user_data += needed;
3512 
3513 		return thread;
3514 	}
3515 }
3516 
3517 
3518 /*!	Frees the given user_thread structure.
3519 	The team's lock must not be held. Interrupts must be enabled.
3520 	\param team The team the user thread was allocated from.
3521 	\param userThread The user thread to free.
3522 */
3523 void
3524 team_free_user_thread(Team* team, struct user_thread* userThread)
3525 {
3526 	if (userThread == NULL)
3527 		return;
3528 
3529 	// create a free list entry
3530 	free_user_thread* entry
3531 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3532 	if (entry == NULL) {
3533 		// we have to leak the user thread :-/
3534 		return;
3535 	}
3536 
3537 	// add to free list
3538 	TeamLocker teamLocker(team);
3539 
3540 	entry->thread = userThread;
3541 	entry->next = team->free_user_threads;
3542 	team->free_user_threads = entry;
3543 }
3544 
3545 
3546 //	#pragma mark - Associated data interface
3547 
3548 
3549 AssociatedData::AssociatedData()
3550 	:
3551 	fOwner(NULL)
3552 {
3553 }
3554 
3555 
3556 AssociatedData::~AssociatedData()
3557 {
3558 }
3559 
3560 
3561 void
3562 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3563 {
3564 }
3565 
3566 
3567 AssociatedDataOwner::AssociatedDataOwner()
3568 {
3569 	mutex_init(&fLock, "associated data owner");
3570 }
3571 
3572 
3573 AssociatedDataOwner::~AssociatedDataOwner()
3574 {
3575 	mutex_destroy(&fLock);
3576 }
3577 
3578 
3579 bool
3580 AssociatedDataOwner::AddData(AssociatedData* data)
3581 {
3582 	MutexLocker locker(fLock);
3583 
3584 	if (data->Owner() != NULL)
3585 		return false;
3586 
3587 	data->AcquireReference();
3588 	fList.Add(data);
3589 	data->SetOwner(this);
3590 
3591 	return true;
3592 }
3593 
3594 
3595 bool
3596 AssociatedDataOwner::RemoveData(AssociatedData* data)
3597 {
3598 	MutexLocker locker(fLock);
3599 
3600 	if (data->Owner() != this)
3601 		return false;
3602 
3603 	data->SetOwner(NULL);
3604 	fList.Remove(data);
3605 
3606 	locker.Unlock();
3607 
3608 	data->ReleaseReference();
3609 
3610 	return true;
3611 }
3612 
3613 
3614 void
3615 AssociatedDataOwner::PrepareForDeletion()
3616 {
3617 	MutexLocker locker(fLock);
3618 
3619 	// move all data to a temporary list and unset the owner
3620 	DataList list;
3621 	list.MoveFrom(&fList);
3622 
3623 	for (DataList::Iterator it = list.GetIterator();
3624 		AssociatedData* data = it.Next();) {
3625 		data->SetOwner(NULL);
3626 	}
3627 
3628 	locker.Unlock();
3629 
3630 	// call the notification hooks and release our references
3631 	while (AssociatedData* data = list.RemoveHead()) {
3632 		data->OwnerDeleted(this);
3633 		data->ReleaseReference();
3634 	}
3635 }
3636 
3637 
3638 /*!	Associates data with the current team.
3639 	When the team is deleted, the data object is notified.
3640 	The team acquires a reference to the object.
3641 
3642 	\param data The data object.
3643 	\return \c true on success, \c false otherwise. Fails only when the supplied
3644 		data object is already associated with another owner.
3645 */
3646 bool
3647 team_associate_data(AssociatedData* data)
3648 {
3649 	return thread_get_current_thread()->team->AddData(data);
3650 }
3651 
3652 
3653 /*!	Dissociates data from the current team.
3654 	Balances an earlier call to team_associate_data().
3655 
3656 	\param data The data object.
3657 	\return \c true on success, \c false otherwise. Fails only when the data
3658 		object is not associated with the current team.
3659 */
3660 bool
3661 team_dissociate_data(AssociatedData* data)
3662 {
3663 	return thread_get_current_thread()->team->RemoveData(data);
3664 }
3665 
3666 
3667 //	#pragma mark - Public kernel API
3668 
3669 
3670 thread_id
3671 load_image(int32 argCount, const char** args, const char** env)
3672 {
3673 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3674 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3675 }
3676 
3677 
3678 thread_id
3679 load_image_etc(int32 argCount, const char* const* args,
3680 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3681 {
3682 	// we need to flatten the args and environment
3683 
3684 	if (args == NULL)
3685 		return B_BAD_VALUE;
3686 
3687 	// determine total needed size
3688 	int32 argSize = 0;
3689 	for (int32 i = 0; i < argCount; i++)
3690 		argSize += strlen(args[i]) + 1;
3691 
3692 	int32 envCount = 0;
3693 	int32 envSize = 0;
3694 	while (env != NULL && env[envCount] != NULL)
3695 		envSize += strlen(env[envCount++]) + 1;
3696 
3697 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3698 	if (size > MAX_PROCESS_ARGS_SIZE)
3699 		return B_TOO_MANY_ARGS;
3700 
3701 	// allocate space
3702 	char** flatArgs = (char**)malloc(size);
3703 	if (flatArgs == NULL)
3704 		return B_NO_MEMORY;
3705 
3706 	char** slot = flatArgs;
3707 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3708 
3709 	// copy arguments and environment
3710 	for (int32 i = 0; i < argCount; i++) {
3711 		int32 argSize = strlen(args[i]) + 1;
3712 		memcpy(stringSpace, args[i], argSize);
3713 		*slot++ = stringSpace;
3714 		stringSpace += argSize;
3715 	}
3716 
3717 	*slot++ = NULL;
3718 
3719 	for (int32 i = 0; i < envCount; i++) {
3720 		int32 envSize = strlen(env[i]) + 1;
3721 		memcpy(stringSpace, env[i], envSize);
3722 		*slot++ = stringSpace;
3723 		stringSpace += envSize;
3724 	}
3725 
3726 	*slot++ = NULL;
3727 
3728 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3729 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3730 
3731 	free(flatArgs);
3732 		// load_image_internal() unset our variable if it took over ownership
3733 
3734 	return thread;
3735 }
3736 
3737 
3738 status_t
3739 wait_for_team(team_id id, status_t* _returnCode)
3740 {
3741 	// check whether the team exists
3742 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3743 
3744 	Team* team = team_get_team_struct_locked(id);
3745 	if (team == NULL)
3746 		return B_BAD_TEAM_ID;
3747 
3748 	id = team->id;
3749 
3750 	teamsLocker.Unlock();
3751 
3752 	// wait for the main thread (it has the same ID as the team)
3753 	return wait_for_thread(id, _returnCode);
3754 }
3755 
3756 
3757 status_t
3758 kill_team(team_id id)
3759 {
3760 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3761 
3762 	Team* team = team_get_team_struct_locked(id);
3763 	if (team == NULL)
3764 		return B_BAD_TEAM_ID;
3765 
3766 	id = team->id;
3767 
3768 	teamsLocker.Unlock();
3769 
3770 	if (team == sKernelTeam)
3771 		return B_NOT_ALLOWED;
3772 
3773 	// Just kill the team's main thread (it has same ID as the team). The
3774 	// cleanup code there will take care of the team.
3775 	return kill_thread(id);
3776 }
3777 
3778 
3779 status_t
3780 _get_team_info(team_id id, team_info* info, size_t size)
3781 {
3782 	// get the team
3783 	Team* team = Team::Get(id);
3784 	if (team == NULL)
3785 		return B_BAD_TEAM_ID;
3786 	BReference<Team> teamReference(team, true);
3787 
3788 	// fill in the info
3789 	return fill_team_info(team, info, size);
3790 }
3791 
3792 
3793 status_t
3794 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3795 {
3796 	int32 slot = *cookie;
3797 	if (slot < 1)
3798 		slot = 1;
3799 
3800 	InterruptsSpinLocker locker(sTeamHashLock);
3801 
3802 	team_id lastTeamID = peek_next_thread_id();
3803 		// TODO: This is broken, since the id can wrap around!
3804 
3805 	// get next valid team
3806 	Team* team = NULL;
3807 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3808 		slot++;
3809 
3810 	if (team == NULL)
3811 		return B_BAD_TEAM_ID;
3812 
3813 	// get a reference to the team and unlock
3814 	BReference<Team> teamReference(team);
3815 	locker.Unlock();
3816 
3817 	// fill in the info
3818 	*cookie = ++slot;
3819 	return fill_team_info(team, info, size);
3820 }
3821 
3822 
3823 status_t
3824 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3825 {
3826 	if (size != sizeof(team_usage_info))
3827 		return B_BAD_VALUE;
3828 
3829 	return common_get_team_usage_info(id, who, info, 0);
3830 }
3831 
3832 
3833 pid_t
3834 getpid(void)
3835 {
3836 	return thread_get_current_thread()->team->id;
3837 }
3838 
3839 
3840 pid_t
3841 getppid(void)
3842 {
3843 	Team* team = thread_get_current_thread()->team;
3844 
3845 	TeamLocker teamLocker(team);
3846 
3847 	return team->parent->id;
3848 }
3849 
3850 
3851 pid_t
3852 getpgid(pid_t id)
3853 {
3854 	if (id < 0) {
3855 		errno = EINVAL;
3856 		return -1;
3857 	}
3858 
3859 	if (id == 0) {
3860 		// get process group of the calling process
3861 		Team* team = thread_get_current_thread()->team;
3862 		TeamLocker teamLocker(team);
3863 		return team->group_id;
3864 	}
3865 
3866 	// get the team
3867 	Team* team = Team::GetAndLock(id);
3868 	if (team == NULL) {
3869 		errno = ESRCH;
3870 		return -1;
3871 	}
3872 
3873 	// get the team's process group ID
3874 	pid_t groupID = team->group_id;
3875 
3876 	team->UnlockAndReleaseReference();
3877 
3878 	return groupID;
3879 }
3880 
3881 
3882 pid_t
3883 getsid(pid_t id)
3884 {
3885 	if (id < 0) {
3886 		errno = EINVAL;
3887 		return -1;
3888 	}
3889 
3890 	if (id == 0) {
3891 		// get session of the calling process
3892 		Team* team = thread_get_current_thread()->team;
3893 		TeamLocker teamLocker(team);
3894 		return team->session_id;
3895 	}
3896 
3897 	// get the team
3898 	Team* team = Team::GetAndLock(id);
3899 	if (team == NULL) {
3900 		errno = ESRCH;
3901 		return -1;
3902 	}
3903 
3904 	// get the team's session ID
3905 	pid_t sessionID = team->session_id;
3906 
3907 	team->UnlockAndReleaseReference();
3908 
3909 	return sessionID;
3910 }
3911 
3912 
3913 //	#pragma mark - User syscalls
3914 
3915 
3916 status_t
3917 _user_exec(const char* userPath, const char* const* userFlatArgs,
3918 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3919 {
3920 	// NOTE: Since this function normally doesn't return, don't use automatic
3921 	// variables that need destruction in the function scope.
3922 	char path[B_PATH_NAME_LENGTH];
3923 
3924 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3925 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3926 		return B_BAD_ADDRESS;
3927 
3928 	// copy and relocate the flat arguments
3929 	char** flatArgs;
3930 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3931 		argCount, envCount, flatArgs);
3932 
3933 	if (error == B_OK) {
3934 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3935 			envCount, umask);
3936 			// this one only returns in case of error
3937 	}
3938 
3939 	free(flatArgs);
3940 	return error;
3941 }
3942 
3943 
3944 thread_id
3945 _user_fork(void)
3946 {
3947 	return fork_team();
3948 }
3949 
3950 
3951 pid_t
3952 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
3953 	team_usage_info* usageInfo)
3954 {
3955 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3956 		return B_BAD_ADDRESS;
3957 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
3958 		return B_BAD_ADDRESS;
3959 
3960 	siginfo_t info;
3961 	team_usage_info usage_info;
3962 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
3963 	if (foundChild < 0)
3964 		return syscall_restart_handle_post(foundChild);
3965 
3966 	// copy info back to userland
3967 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3968 		return B_BAD_ADDRESS;
3969 	// copy usage_info back to userland
3970 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
3971 		sizeof(usage_info)) != B_OK) {
3972 		return B_BAD_ADDRESS;
3973 	}
3974 
3975 	return foundChild;
3976 }
3977 
3978 
3979 pid_t
3980 _user_process_info(pid_t process, int32 which)
3981 {
3982 	// we only allow to return the parent of the current process
3983 	if (which == PARENT_ID
3984 		&& process != 0 && process != thread_get_current_thread()->team->id)
3985 		return B_BAD_VALUE;
3986 
3987 	pid_t result;
3988 	switch (which) {
3989 		case SESSION_ID:
3990 			result = getsid(process);
3991 			break;
3992 		case GROUP_ID:
3993 			result = getpgid(process);
3994 			break;
3995 		case PARENT_ID:
3996 			result = getppid();
3997 			break;
3998 		default:
3999 			return B_BAD_VALUE;
4000 	}
4001 
4002 	return result >= 0 ? result : errno;
4003 }
4004 
4005 
4006 pid_t
4007 _user_setpgid(pid_t processID, pid_t groupID)
4008 {
4009 	// setpgid() can be called either by the parent of the target process or
4010 	// by the process itself to do one of two things:
4011 	// * Create a new process group with the target process' ID and the target
4012 	//   process as group leader.
4013 	// * Set the target process' process group to an already existing one in the
4014 	//   same session.
4015 
4016 	if (groupID < 0)
4017 		return B_BAD_VALUE;
4018 
4019 	Team* currentTeam = thread_get_current_thread()->team;
4020 	if (processID == 0)
4021 		processID = currentTeam->id;
4022 
4023 	// if the group ID is not specified, use the target process' ID
4024 	if (groupID == 0)
4025 		groupID = processID;
4026 
4027 	// We loop when running into the following race condition: We create a new
4028 	// process group, because there isn't one with that ID yet, but later when
4029 	// trying to publish it, we find that someone else created and published
4030 	// a group with that ID in the meantime. In that case we just restart the
4031 	// whole action.
4032 	while (true) {
4033 		// Look up the process group by ID. If it doesn't exist yet and we are
4034 		// allowed to create a new one, do that.
4035 		ProcessGroup* group = ProcessGroup::Get(groupID);
4036 		bool newGroup = false;
4037 		if (group == NULL) {
4038 			if (groupID != processID)
4039 				return B_NOT_ALLOWED;
4040 
4041 			group = new(std::nothrow) ProcessGroup(groupID);
4042 			if (group == NULL)
4043 				return B_NO_MEMORY;
4044 
4045 			newGroup = true;
4046 		}
4047 		BReference<ProcessGroup> groupReference(group, true);
4048 
4049 		// get the target team
4050 		Team* team = Team::Get(processID);
4051 		if (team == NULL)
4052 			return ESRCH;
4053 		BReference<Team> teamReference(team, true);
4054 
4055 		// lock the new process group and the team's current process group
4056 		while (true) {
4057 			// lock the team's current process group
4058 			team->LockProcessGroup();
4059 
4060 			ProcessGroup* oldGroup = team->group;
4061 			if (oldGroup == group) {
4062 				// it's the same as the target group, so just bail out
4063 				oldGroup->Unlock();
4064 				return group->id;
4065 			}
4066 
4067 			oldGroup->AcquireReference();
4068 
4069 			// lock the target process group, if locking order allows it
4070 			if (newGroup || group->id > oldGroup->id) {
4071 				group->Lock();
4072 				break;
4073 			}
4074 
4075 			// try to lock
4076 			if (group->TryLock())
4077 				break;
4078 
4079 			// no dice -- unlock the team's current process group and relock in
4080 			// the correct order
4081 			oldGroup->Unlock();
4082 
4083 			group->Lock();
4084 			oldGroup->Lock();
4085 
4086 			// check whether things are still the same
4087 			TeamLocker teamLocker(team);
4088 			if (team->group == oldGroup)
4089 				break;
4090 
4091 			// something changed -- unlock everything and retry
4092 			teamLocker.Unlock();
4093 			oldGroup->Unlock();
4094 			group->Unlock();
4095 			oldGroup->ReleaseReference();
4096 		}
4097 
4098 		// we now have references and locks of both new and old process group
4099 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4100 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4101 		AutoLocker<ProcessGroup> groupLocker(group, true);
4102 
4103 		// also lock the target team and its parent
4104 		team->LockTeamAndParent(false);
4105 		TeamLocker parentLocker(team->parent, true);
4106 		TeamLocker teamLocker(team, true);
4107 
4108 		// perform the checks
4109 		if (team == currentTeam) {
4110 			// we set our own group
4111 
4112 			// we must not change our process group ID if we're a session leader
4113 			if (is_session_leader(currentTeam))
4114 				return B_NOT_ALLOWED;
4115 		} else {
4116 			// Calling team != target team. The target team must be a child of
4117 			// the calling team and in the same session. (If that's the case it
4118 			// isn't a session leader either.)
4119 			if (team->parent != currentTeam
4120 				|| team->session_id != currentTeam->session_id) {
4121 				return B_NOT_ALLOWED;
4122 			}
4123 
4124 			// The call is also supposed to fail on a child, when the child has
4125 			// already executed exec*() [EACCES].
4126 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4127 				return EACCES;
4128 		}
4129 
4130 		// If we created a new process group, publish it now.
4131 		if (newGroup) {
4132 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4133 			if (sGroupHash.Lookup(groupID)) {
4134 				// A group with the group ID appeared since we first checked.
4135 				// Back to square one.
4136 				continue;
4137 			}
4138 
4139 			group->PublishLocked(team->group->Session());
4140 		} else if (group->Session()->id != team->session_id) {
4141 			// The existing target process group belongs to a different session.
4142 			// That's not allowed.
4143 			return B_NOT_ALLOWED;
4144 		}
4145 
4146 		// Everything is ready -- set the group.
4147 		remove_team_from_group(team);
4148 		insert_team_into_group(group, team);
4149 
4150 		// Changing the process group might have changed the situation for a
4151 		// parent waiting in wait_for_child(). Hence we notify it.
4152 		team->parent->dead_children.condition_variable.NotifyAll();
4153 
4154 		return group->id;
4155 	}
4156 }
4157 
4158 
4159 pid_t
4160 _user_setsid(void)
4161 {
4162 	Team* team = thread_get_current_thread()->team;
4163 
4164 	// create a new process group and session
4165 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4166 	if (group == NULL)
4167 		return B_NO_MEMORY;
4168 	BReference<ProcessGroup> groupReference(group, true);
4169 	AutoLocker<ProcessGroup> groupLocker(group);
4170 
4171 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4172 	if (session == NULL)
4173 		return B_NO_MEMORY;
4174 	BReference<ProcessSession> sessionReference(session, true);
4175 
4176 	// lock the team's current process group, parent, and the team itself
4177 	team->LockTeamParentAndProcessGroup();
4178 	BReference<ProcessGroup> oldGroupReference(team->group);
4179 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4180 	TeamLocker parentLocker(team->parent, true);
4181 	TeamLocker teamLocker(team, true);
4182 
4183 	// the team must not already be a process group leader
4184 	if (is_process_group_leader(team))
4185 		return B_NOT_ALLOWED;
4186 
4187 	// remove the team from the old and add it to the new process group
4188 	remove_team_from_group(team);
4189 	group->Publish(session);
4190 	insert_team_into_group(group, team);
4191 
4192 	// Changing the process group might have changed the situation for a
4193 	// parent waiting in wait_for_child(). Hence we notify it.
4194 	team->parent->dead_children.condition_variable.NotifyAll();
4195 
4196 	return group->id;
4197 }
4198 
4199 
4200 status_t
4201 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4202 {
4203 	status_t returnCode;
4204 	status_t status;
4205 
4206 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4207 		return B_BAD_ADDRESS;
4208 
4209 	status = wait_for_team(id, &returnCode);
4210 	if (status >= B_OK && _userReturnCode != NULL) {
4211 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4212 				!= B_OK)
4213 			return B_BAD_ADDRESS;
4214 		return B_OK;
4215 	}
4216 
4217 	return syscall_restart_handle_post(status);
4218 }
4219 
4220 
4221 thread_id
4222 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4223 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4224 	port_id errorPort, uint32 errorToken)
4225 {
4226 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4227 
4228 	if (argCount < 1)
4229 		return B_BAD_VALUE;
4230 
4231 	// copy and relocate the flat arguments
4232 	char** flatArgs;
4233 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4234 		argCount, envCount, flatArgs);
4235 	if (error != B_OK)
4236 		return error;
4237 
4238 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4239 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4240 		errorToken);
4241 
4242 	free(flatArgs);
4243 		// load_image_internal() unset our variable if it took over ownership
4244 
4245 	return thread;
4246 }
4247 
4248 
4249 void
4250 _user_exit_team(status_t returnValue)
4251 {
4252 	Thread* thread = thread_get_current_thread();
4253 	Team* team = thread->team;
4254 
4255 	// set this thread's exit status
4256 	thread->exit.status = returnValue;
4257 
4258 	// set the team exit status
4259 	TeamLocker teamLocker(team);
4260 
4261 	if (!team->exit.initialized) {
4262 		team->exit.reason = CLD_EXITED;
4263 		team->exit.signal = 0;
4264 		team->exit.signaling_user = 0;
4265 		team->exit.status = returnValue;
4266 		team->exit.initialized = true;
4267 	}
4268 
4269 	teamLocker.Unlock();
4270 
4271 	// Stop the thread, if the team is being debugged and that has been
4272 	// requested.
4273 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4274 		user_debug_stop_thread();
4275 
4276 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4277 	// userland. The signal handling code forwards the signal to the main
4278 	// thread (if that's not already this one), which will take the team down.
4279 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4280 	send_signal_to_thread(thread, signal, 0);
4281 }
4282 
4283 
4284 status_t
4285 _user_kill_team(team_id team)
4286 {
4287 	return kill_team(team);
4288 }
4289 
4290 
4291 status_t
4292 _user_get_team_info(team_id id, team_info* userInfo)
4293 {
4294 	status_t status;
4295 	team_info info;
4296 
4297 	if (!IS_USER_ADDRESS(userInfo))
4298 		return B_BAD_ADDRESS;
4299 
4300 	status = _get_team_info(id, &info, sizeof(team_info));
4301 	if (status == B_OK) {
4302 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4303 			return B_BAD_ADDRESS;
4304 	}
4305 
4306 	return status;
4307 }
4308 
4309 
4310 status_t
4311 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4312 {
4313 	status_t status;
4314 	team_info info;
4315 	int32 cookie;
4316 
4317 	if (!IS_USER_ADDRESS(userCookie)
4318 		|| !IS_USER_ADDRESS(userInfo)
4319 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4320 		return B_BAD_ADDRESS;
4321 
4322 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4323 	if (status != B_OK)
4324 		return status;
4325 
4326 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4327 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4328 		return B_BAD_ADDRESS;
4329 
4330 	return status;
4331 }
4332 
4333 
4334 team_id
4335 _user_get_current_team(void)
4336 {
4337 	return team_get_current_team_id();
4338 }
4339 
4340 
4341 status_t
4342 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4343 	size_t size)
4344 {
4345 	if (size != sizeof(team_usage_info))
4346 		return B_BAD_VALUE;
4347 
4348 	team_usage_info info;
4349 	status_t status = common_get_team_usage_info(team, who, &info,
4350 		B_CHECK_PERMISSION);
4351 
4352 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4353 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4354 		return B_BAD_ADDRESS;
4355 	}
4356 
4357 	return status;
4358 }
4359 
4360 
4361 status_t
4362 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4363 	size_t size, size_t* _sizeNeeded)
4364 {
4365 	// check parameters
4366 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4367 		|| (buffer == NULL && size > 0)
4368 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4369 		return B_BAD_ADDRESS;
4370 	}
4371 
4372 	KMessage info;
4373 
4374 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4375 		// allocate memory for a copy of the needed team data
4376 		struct ExtendedTeamData {
4377 			team_id	id;
4378 			pid_t	group_id;
4379 			pid_t	session_id;
4380 			uid_t	real_uid;
4381 			gid_t	real_gid;
4382 			uid_t	effective_uid;
4383 			gid_t	effective_gid;
4384 			char	name[B_OS_NAME_LENGTH];
4385 		};
4386 
4387 		ExtendedTeamData* teamClone
4388 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4389 			// It would be nicer to use new, but then we'd have to use
4390 			// ObjectDeleter and declare the structure outside of the function
4391 			// due to template parameter restrictions.
4392 		if (teamClone == NULL)
4393 			return B_NO_MEMORY;
4394 		MemoryDeleter teamCloneDeleter(teamClone);
4395 
4396 		io_context* ioContext;
4397 		{
4398 			// get the team structure
4399 			Team* team = Team::GetAndLock(teamID);
4400 			if (team == NULL)
4401 				return B_BAD_TEAM_ID;
4402 			BReference<Team> teamReference(team, true);
4403 			TeamLocker teamLocker(team, true);
4404 
4405 			// copy the data
4406 			teamClone->id = team->id;
4407 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4408 			teamClone->group_id = team->group_id;
4409 			teamClone->session_id = team->session_id;
4410 			teamClone->real_uid = team->real_uid;
4411 			teamClone->real_gid = team->real_gid;
4412 			teamClone->effective_uid = team->effective_uid;
4413 			teamClone->effective_gid = team->effective_gid;
4414 
4415 			// also fetch a reference to the I/O context
4416 			ioContext = team->io_context;
4417 			vfs_get_io_context(ioContext);
4418 		}
4419 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4420 			&vfs_put_io_context);
4421 
4422 		// add the basic data to the info message
4423 		if (info.AddInt32("id", teamClone->id) != B_OK
4424 			|| info.AddString("name", teamClone->name) != B_OK
4425 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4426 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4427 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4428 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4429 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4430 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4431 			return B_NO_MEMORY;
4432 		}
4433 
4434 		// get the current working directory from the I/O context
4435 		dev_t cwdDevice;
4436 		ino_t cwdDirectory;
4437 		{
4438 			MutexLocker ioContextLocker(ioContext->io_mutex);
4439 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4440 		}
4441 
4442 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4443 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4444 			return B_NO_MEMORY;
4445 		}
4446 	}
4447 
4448 	// TODO: Support the other flags!
4449 
4450 	// copy the needed size and, if it fits, the message back to userland
4451 	size_t sizeNeeded = info.ContentSize();
4452 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4453 		return B_BAD_ADDRESS;
4454 
4455 	if (sizeNeeded > size)
4456 		return B_BUFFER_OVERFLOW;
4457 
4458 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4459 		return B_BAD_ADDRESS;
4460 
4461 	return B_OK;
4462 }
4463