xref: /haiku/src/system/kernel/team.cpp (revision 9295c1f645806eca5d7699c985f7b509528c9eaa)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 
62 #include "TeamThreadTables.h"
63 
64 
65 //#define TRACE_TEAM
66 #ifdef TRACE_TEAM
67 #	define TRACE(x) dprintf x
68 #else
69 #	define TRACE(x) ;
70 #endif
71 
72 
73 struct team_key {
74 	team_id id;
75 };
76 
77 struct team_arg {
78 	char	*path;
79 	char	**flat_args;
80 	size_t	flat_args_size;
81 	uint32	arg_count;
82 	uint32	env_count;
83 	mode_t	umask;
84 	uint32	flags;
85 	port_id	error_port;
86 	uint32	error_token;
87 };
88 
89 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
90 
91 
92 namespace {
93 
94 
95 class TeamNotificationService : public DefaultNotificationService {
96 public:
97 							TeamNotificationService();
98 
99 			void			Notify(uint32 eventCode, Team* team);
100 };
101 
102 
103 // #pragma mark - TeamTable
104 
105 
106 typedef BKernel::TeamThreadTable<Team> TeamTable;
107 
108 
109 // #pragma mark - ProcessGroupHashDefinition
110 
111 
112 struct ProcessGroupHashDefinition {
113 	typedef pid_t			KeyType;
114 	typedef	ProcessGroup	ValueType;
115 
116 	size_t HashKey(pid_t key) const
117 	{
118 		return key;
119 	}
120 
121 	size_t Hash(ProcessGroup* value) const
122 	{
123 		return HashKey(value->id);
124 	}
125 
126 	bool Compare(pid_t key, ProcessGroup* value) const
127 	{
128 		return value->id == key;
129 	}
130 
131 	ProcessGroup*& GetLink(ProcessGroup* value) const
132 	{
133 		return value->next;
134 	}
135 };
136 
137 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
138 
139 
140 }	// unnamed namespace
141 
142 
143 // #pragma mark -
144 
145 
146 // the team_id -> Team hash table and the lock protecting it
147 static TeamTable sTeamHash;
148 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
149 
150 // the pid_t -> ProcessGroup hash table and the lock protecting it
151 static ProcessGroupHashTable sGroupHash;
152 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
153 
154 static Team* sKernelTeam = NULL;
155 static bool sDisableUserAddOns = false;
156 
157 // A list of process groups of children of dying session leaders that need to
158 // be signalled, if they have become orphaned and contain stopped processes.
159 static ProcessGroupList sOrphanedCheckProcessGroups;
160 static mutex sOrphanedCheckLock
161 	= MUTEX_INITIALIZER("orphaned process group check");
162 
163 // some arbitrarily chosen limits -- should probably depend on the available
164 // memory (the limit is not yet enforced)
165 static int32 sMaxTeams = 2048;
166 static int32 sUsedTeams = 1;
167 
168 static TeamNotificationService sNotificationService;
169 
170 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
171 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
172 
173 
174 // #pragma mark - TeamListIterator
175 
176 
177 TeamListIterator::TeamListIterator()
178 {
179 	// queue the entry
180 	InterruptsWriteSpinLocker locker(sTeamHashLock);
181 	sTeamHash.InsertIteratorEntry(&fEntry);
182 }
183 
184 
185 TeamListIterator::~TeamListIterator()
186 {
187 	// remove the entry
188 	InterruptsWriteSpinLocker locker(sTeamHashLock);
189 	sTeamHash.RemoveIteratorEntry(&fEntry);
190 }
191 
192 
193 Team*
194 TeamListIterator::Next()
195 {
196 	// get the next team -- if there is one, get reference for it
197 	InterruptsWriteSpinLocker locker(sTeamHashLock);
198 	Team* team = sTeamHash.NextElement(&fEntry);
199 	if (team != NULL)
200 		team->AcquireReference();
201 
202 	return team;
203 }
204 
205 
206 // #pragma mark - Tracing
207 
208 
209 #if TEAM_TRACING
210 namespace TeamTracing {
211 
212 class TeamForked : public AbstractTraceEntry {
213 public:
214 	TeamForked(thread_id forkedThread)
215 		:
216 		fForkedThread(forkedThread)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
224 	}
225 
226 private:
227 	thread_id			fForkedThread;
228 };
229 
230 
231 class ExecTeam : public AbstractTraceEntry {
232 public:
233 	ExecTeam(const char* path, int32 argCount, const char* const* args,
234 			int32 envCount, const char* const* env)
235 		:
236 		fArgCount(argCount),
237 		fArgs(NULL)
238 	{
239 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
240 			false);
241 
242 		// determine the buffer size we need for the args
243 		size_t argBufferSize = 0;
244 		for (int32 i = 0; i < argCount; i++)
245 			argBufferSize += strlen(args[i]) + 1;
246 
247 		// allocate a buffer
248 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
249 		if (fArgs) {
250 			char* buffer = fArgs;
251 			for (int32 i = 0; i < argCount; i++) {
252 				size_t argSize = strlen(args[i]) + 1;
253 				memcpy(buffer, args[i], argSize);
254 				buffer += argSize;
255 			}
256 		}
257 
258 		// ignore env for the time being
259 		(void)envCount;
260 		(void)env;
261 
262 		Initialized();
263 	}
264 
265 	virtual void AddDump(TraceOutput& out)
266 	{
267 		out.Print("team exec, \"%p\", args:", fPath);
268 
269 		if (fArgs != NULL) {
270 			char* args = fArgs;
271 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
272 				out.Print(" \"%s\"", args);
273 				args += strlen(args) + 1;
274 			}
275 		} else
276 			out.Print(" <too long>");
277 	}
278 
279 private:
280 	char*	fPath;
281 	int32	fArgCount;
282 	char*	fArgs;
283 };
284 
285 
286 static const char*
287 job_control_state_name(job_control_state state)
288 {
289 	switch (state) {
290 		case JOB_CONTROL_STATE_NONE:
291 			return "none";
292 		case JOB_CONTROL_STATE_STOPPED:
293 			return "stopped";
294 		case JOB_CONTROL_STATE_CONTINUED:
295 			return "continued";
296 		case JOB_CONTROL_STATE_DEAD:
297 			return "dead";
298 		default:
299 			return "invalid";
300 	}
301 }
302 
303 
304 class SetJobControlState : public AbstractTraceEntry {
305 public:
306 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
307 		:
308 		fTeam(team),
309 		fNewState(newState),
310 		fSignal(signal != NULL ? signal->Number() : 0)
311 	{
312 		Initialized();
313 	}
314 
315 	virtual void AddDump(TraceOutput& out)
316 	{
317 		out.Print("team set job control state, team %" B_PRId32 ", "
318 			"new state: %s, signal: %d",
319 			fTeam, job_control_state_name(fNewState), fSignal);
320 	}
321 
322 private:
323 	team_id				fTeam;
324 	job_control_state	fNewState;
325 	int					fSignal;
326 };
327 
328 
329 class WaitForChild : public AbstractTraceEntry {
330 public:
331 	WaitForChild(pid_t child, uint32 flags)
332 		:
333 		fChild(child),
334 		fFlags(flags)
335 	{
336 		Initialized();
337 	}
338 
339 	virtual void AddDump(TraceOutput& out)
340 	{
341 		out.Print("team wait for child, child: %" B_PRId32 ", "
342 			"flags: %#" B_PRIx32, fChild, fFlags);
343 	}
344 
345 private:
346 	pid_t	fChild;
347 	uint32	fFlags;
348 };
349 
350 
351 class WaitForChildDone : public AbstractTraceEntry {
352 public:
353 	WaitForChildDone(const job_control_entry& entry)
354 		:
355 		fState(entry.state),
356 		fTeam(entry.thread),
357 		fStatus(entry.status),
358 		fReason(entry.reason),
359 		fSignal(entry.signal)
360 	{
361 		Initialized();
362 	}
363 
364 	WaitForChildDone(status_t error)
365 		:
366 		fTeam(error)
367 	{
368 		Initialized();
369 	}
370 
371 	virtual void AddDump(TraceOutput& out)
372 	{
373 		if (fTeam >= 0) {
374 			out.Print("team wait for child done, team: %" B_PRId32 ", "
375 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
376 				fTeam, job_control_state_name(fState), fStatus, fReason,
377 				fSignal);
378 		} else {
379 			out.Print("team wait for child failed, error: "
380 				"%#" B_PRIx32 ", ", fTeam);
381 		}
382 	}
383 
384 private:
385 	job_control_state	fState;
386 	team_id				fTeam;
387 	status_t			fStatus;
388 	uint16				fReason;
389 	uint16				fSignal;
390 };
391 
392 }	// namespace TeamTracing
393 
394 #	define T(x) new(std::nothrow) TeamTracing::x;
395 #else
396 #	define T(x) ;
397 #endif
398 
399 
400 //	#pragma mark - TeamNotificationService
401 
402 
403 TeamNotificationService::TeamNotificationService()
404 	: DefaultNotificationService("teams")
405 {
406 }
407 
408 
409 void
410 TeamNotificationService::Notify(uint32 eventCode, Team* team)
411 {
412 	char eventBuffer[128];
413 	KMessage event;
414 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
415 	event.AddInt32("event", eventCode);
416 	event.AddInt32("team", team->id);
417 	event.AddPointer("teamStruct", team);
418 
419 	DefaultNotificationService::Notify(event, eventCode);
420 }
421 
422 
423 //	#pragma mark - Team
424 
425 
426 Team::Team(team_id id, bool kernel)
427 {
428 	// allocate an ID
429 	this->id = id;
430 	visible = true;
431 	serial_number = -1;
432 
433 	// init mutex
434 	if (kernel) {
435 		mutex_init(&fLock, "Team:kernel");
436 	} else {
437 		char lockName[16];
438 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
439 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
440 	}
441 
442 	hash_next = siblings_next = children = parent = NULL;
443 	fName[0] = '\0';
444 	fArgs[0] = '\0';
445 	num_threads = 0;
446 	io_context = NULL;
447 	address_space = NULL;
448 	realtime_sem_context = NULL;
449 	xsi_sem_context = NULL;
450 	thread_list = NULL;
451 	main_thread = NULL;
452 	loading_info = NULL;
453 	state = TEAM_STATE_BIRTH;
454 	flags = 0;
455 	death_entry = NULL;
456 	user_data_area = -1;
457 	user_data = 0;
458 	used_user_data = 0;
459 	user_data_size = 0;
460 	free_user_threads = NULL;
461 
462 	commpage_address = NULL;
463 
464 	supplementary_groups = NULL;
465 	supplementary_group_count = 0;
466 
467 	dead_threads_kernel_time = 0;
468 	dead_threads_user_time = 0;
469 	cpu_clock_offset = 0;
470 
471 	// dead threads
472 	list_init(&dead_threads);
473 	dead_threads_count = 0;
474 
475 	// dead children
476 	dead_children.count = 0;
477 	dead_children.kernel_time = 0;
478 	dead_children.user_time = 0;
479 
480 	// job control entry
481 	job_control_entry = new(nothrow) ::job_control_entry;
482 	if (job_control_entry != NULL) {
483 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
484 		job_control_entry->thread = id;
485 		job_control_entry->team = this;
486 	}
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	list_init(&sem_list);
492 	list_init_etc(&port_list, port_team_link_offset());
493 	list_init(&image_list);
494 	list_init(&watcher_list);
495 
496 	clear_team_debug_info(&debug_info, true);
497 
498 	// init dead/stopped/continued children condition vars
499 	dead_children.condition_variable.Init(&dead_children, "team children");
500 
501 	B_INITIALIZE_SPINLOCK(&time_lock);
502 	B_INITIALIZE_SPINLOCK(&signal_lock);
503 
504 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
505 		kernel ? -1 : MAX_QUEUED_SIGNALS);
506 	memset(fSignalActions, 0, sizeof(fSignalActions));
507 
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	malloc_referenced_release(supplementary_groups);
545 
546 	delete job_control_entry;
547 		// usually already NULL and transferred to the parent
548 
549 	mutex_destroy(&fLock);
550 }
551 
552 
553 /*static*/ Team*
554 Team::Create(team_id id, const char* name, bool kernel)
555 {
556 	// create the team object
557 	Team* team = new(std::nothrow) Team(id, kernel);
558 	if (team == NULL)
559 		return NULL;
560 	ObjectDeleter<Team> teamDeleter(team);
561 
562 	if (name != NULL)
563 		team->SetName(name);
564 
565 	// check initialization
566 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
567 		return NULL;
568 
569 	// finish initialization (arch specifics)
570 	if (arch_team_init_team_struct(team, kernel) != B_OK)
571 		return NULL;
572 
573 	if (!kernel) {
574 		status_t error = user_timer_create_team_timers(team);
575 		if (error != B_OK)
576 			return NULL;
577 	}
578 
579 	// everything went fine
580 	return teamDeleter.Detach();
581 }
582 
583 
584 /*!	\brief Returns the team with the given ID.
585 	Returns a reference to the team.
586 	Team and thread spinlock must not be held.
587 */
588 /*static*/ Team*
589 Team::Get(team_id id)
590 {
591 	if (id == B_CURRENT_TEAM) {
592 		Team* team = thread_get_current_thread()->team;
593 		team->AcquireReference();
594 		return team;
595 	}
596 
597 	InterruptsReadSpinLocker locker(sTeamHashLock);
598 	Team* team = sTeamHash.Lookup(id);
599 	if (team != NULL)
600 		team->AcquireReference();
601 	return team;
602 }
603 
604 
605 /*!	\brief Returns the team with the given ID in a locked state.
606 	Returns a reference to the team.
607 	Team and thread spinlock must not be held.
608 */
609 /*static*/ Team*
610 Team::GetAndLock(team_id id)
611 {
612 	// get the team
613 	Team* team = Get(id);
614 	if (team == NULL)
615 		return NULL;
616 
617 	// lock it
618 	team->Lock();
619 
620 	// only return the team, when it isn't already dying
621 	if (team->state >= TEAM_STATE_SHUTDOWN) {
622 		team->Unlock();
623 		team->ReleaseReference();
624 		return NULL;
625 	}
626 
627 	return team;
628 }
629 
630 
631 /*!	Locks the team and its parent team (if any).
632 	The caller must hold a reference to the team or otherwise make sure that
633 	it won't be deleted.
634 	If the team doesn't have a parent, only the team itself is locked. If the
635 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
636 	only the team itself is locked.
637 
638 	\param dontLockParentIfKernel If \c true, the team's parent team is only
639 		locked, if it is not the kernel team.
640 */
641 void
642 Team::LockTeamAndParent(bool dontLockParentIfKernel)
643 {
644 	// The locking order is parent -> child. Since the parent can change as long
645 	// as we don't lock the team, we need to do a trial and error loop.
646 	Lock();
647 
648 	while (true) {
649 		// If the team doesn't have a parent, we're done. Otherwise try to lock
650 		// the parent.This will succeed in most cases, simplifying things.
651 		Team* parent = this->parent;
652 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
653 			|| parent->TryLock()) {
654 			return;
655 		}
656 
657 		// get a temporary reference to the parent, unlock this team, lock the
658 		// parent, and re-lock this team
659 		BReference<Team> parentReference(parent);
660 
661 		Unlock();
662 		parent->Lock();
663 		Lock();
664 
665 		// If the parent hasn't changed in the meantime, we're done.
666 		if (this->parent == parent)
667 			return;
668 
669 		// The parent has changed -- unlock and retry.
670 		parent->Unlock();
671 	}
672 }
673 
674 
675 /*!	Unlocks the team and its parent team (if any).
676 */
677 void
678 Team::UnlockTeamAndParent()
679 {
680 	if (parent != NULL)
681 		parent->Unlock();
682 
683 	Unlock();
684 }
685 
686 
687 /*!	Locks the team, its parent team (if any), and the team's process group.
688 	The caller must hold a reference to the team or otherwise make sure that
689 	it won't be deleted.
690 	If the team doesn't have a parent, only the team itself is locked.
691 */
692 void
693 Team::LockTeamParentAndProcessGroup()
694 {
695 	LockTeamAndProcessGroup();
696 
697 	// We hold the group's and the team's lock, but not the parent team's lock.
698 	// If we have a parent, try to lock it.
699 	if (this->parent == NULL || this->parent->TryLock())
700 		return;
701 
702 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
703 	// the job.
704 	Unlock();
705 	LockTeamAndParent(false);
706 }
707 
708 
709 /*!	Unlocks the team, its parent team (if any), and the team's process group.
710 */
711 void
712 Team::UnlockTeamParentAndProcessGroup()
713 {
714 	group->Unlock();
715 
716 	if (parent != NULL)
717 		parent->Unlock();
718 
719 	Unlock();
720 }
721 
722 
723 void
724 Team::LockTeamAndProcessGroup()
725 {
726 	// The locking order is process group -> child. Since the process group can
727 	// change as long as we don't lock the team, we need to do a trial and error
728 	// loop.
729 	Lock();
730 
731 	while (true) {
732 		// Try to lock the group. This will succeed in most cases, simplifying
733 		// things.
734 		ProcessGroup* group = this->group;
735 		if (group->TryLock())
736 			return;
737 
738 		// get a temporary reference to the group, unlock this team, lock the
739 		// group, and re-lock this team
740 		BReference<ProcessGroup> groupReference(group);
741 
742 		Unlock();
743 		group->Lock();
744 		Lock();
745 
746 		// If the group hasn't changed in the meantime, we're done.
747 		if (this->group == group)
748 			return;
749 
750 		// The group has changed -- unlock and retry.
751 		group->Unlock();
752 	}
753 }
754 
755 
756 void
757 Team::UnlockTeamAndProcessGroup()
758 {
759 	group->Unlock();
760 	Unlock();
761 }
762 
763 
764 void
765 Team::SetName(const char* name)
766 {
767 	if (const char* lastSlash = strrchr(name, '/'))
768 		name = lastSlash + 1;
769 
770 	strlcpy(fName, name, B_OS_NAME_LENGTH);
771 }
772 
773 
774 void
775 Team::SetArgs(const char* args)
776 {
777 	strlcpy(fArgs, args, sizeof(fArgs));
778 }
779 
780 
781 void
782 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
783 {
784 	fArgs[0] = '\0';
785 	strlcpy(fArgs, path, sizeof(fArgs));
786 	for (int i = 0; i < otherArgCount; i++) {
787 		strlcat(fArgs, " ", sizeof(fArgs));
788 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
789 	}
790 }
791 
792 
793 void
794 Team::ResetSignalsOnExec()
795 {
796 	// We are supposed to keep pending signals. Signal actions shall be reset
797 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
798 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
799 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
800 	// flags, but since there aren't any handlers, they make little sense, so
801 	// we clear them.
802 
803 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
804 		struct sigaction& action = SignalActionFor(i);
805 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
806 			action.sa_handler = SIG_DFL;
807 
808 		action.sa_mask = 0;
809 		action.sa_flags = 0;
810 		action.sa_userdata = NULL;
811 	}
812 }
813 
814 
815 void
816 Team::InheritSignalActions(Team* parent)
817 {
818 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
819 }
820 
821 
822 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
823 	ID.
824 
825 	The caller must hold the team's lock.
826 
827 	\param timer The timer to be added. If it doesn't have an ID yet, it is
828 		considered user-defined and will be assigned an ID.
829 	\return \c B_OK, if the timer was added successfully, another error code
830 		otherwise.
831 */
832 status_t
833 Team::AddUserTimer(UserTimer* timer)
834 {
835 	// don't allow addition of timers when already shutting the team down
836 	if (state >= TEAM_STATE_SHUTDOWN)
837 		return B_BAD_TEAM_ID;
838 
839 	// If the timer is user-defined, check timer limit and increment
840 	// user-defined count.
841 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
842 		return EAGAIN;
843 
844 	fUserTimers.AddTimer(timer);
845 
846 	return B_OK;
847 }
848 
849 
850 /*!	Removes the given user timer from the team.
851 
852 	The caller must hold the team's lock.
853 
854 	\param timer The timer to be removed.
855 
856 */
857 void
858 Team::RemoveUserTimer(UserTimer* timer)
859 {
860 	fUserTimers.RemoveTimer(timer);
861 
862 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
863 		UserDefinedTimersRemoved(1);
864 }
865 
866 
867 /*!	Deletes all (or all user-defined) user timers of the team.
868 
869 	Timer's belonging to the team's threads are not affected.
870 	The caller must hold the team's lock.
871 
872 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
873 		otherwise all timers are deleted.
874 */
875 void
876 Team::DeleteUserTimers(bool userDefinedOnly)
877 {
878 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
879 	UserDefinedTimersRemoved(count);
880 }
881 
882 
883 /*!	If not at the limit yet, increments the team's user-defined timer count.
884 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
885 */
886 bool
887 Team::CheckAddUserDefinedTimer()
888 {
889 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
890 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
891 		atomic_add(&fUserDefinedTimerCount, -1);
892 		return false;
893 	}
894 
895 	return true;
896 }
897 
898 
899 /*!	Subtracts the given count for the team's user-defined timer count.
900 	\param count The count to subtract.
901 */
902 void
903 Team::UserDefinedTimersRemoved(int32 count)
904 {
905 	atomic_add(&fUserDefinedTimerCount, -count);
906 }
907 
908 
909 void
910 Team::DeactivateCPUTimeUserTimers()
911 {
912 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
913 		timer->Deactivate();
914 
915 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
916 		timer->Deactivate();
917 }
918 
919 
920 /*!	Returns the team's current total CPU time (kernel + user + offset).
921 
922 	The caller must hold \c time_lock.
923 
924 	\param ignoreCurrentRun If \c true and the current thread is one team's
925 		threads, don't add the time since the last time \c last_time was
926 		updated. Should be used in "thread unscheduled" scheduler callbacks,
927 		since although the thread is still running at that time, its time has
928 		already been stopped.
929 	\return The team's current total CPU time.
930 */
931 bigtime_t
932 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
933 {
934 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
935 		+ dead_threads_user_time;
936 
937 	Thread* currentThread = thread_get_current_thread();
938 	bigtime_t now = system_time();
939 
940 	for (Thread* thread = thread_list; thread != NULL;
941 			thread = thread->team_next) {
942 		bool alreadyLocked = thread == lockedThread;
943 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
944 		time += thread->kernel_time + thread->user_time;
945 
946 		if (thread->last_time != 0) {
947 			if (!ignoreCurrentRun || thread != currentThread)
948 				time += now - thread->last_time;
949 		}
950 
951 		if (alreadyLocked)
952 			threadTimeLocker.Detach();
953 	}
954 
955 	return time;
956 }
957 
958 
959 /*!	Returns the team's current user CPU time.
960 
961 	The caller must hold \c time_lock.
962 
963 	\return The team's current user CPU time.
964 */
965 bigtime_t
966 Team::UserCPUTime() const
967 {
968 	bigtime_t time = dead_threads_user_time;
969 
970 	bigtime_t now = system_time();
971 
972 	for (Thread* thread = thread_list; thread != NULL;
973 			thread = thread->team_next) {
974 		SpinLocker threadTimeLocker(thread->time_lock);
975 		time += thread->user_time;
976 
977 		if (thread->last_time != 0 && !thread->in_kernel)
978 			time += now - thread->last_time;
979 	}
980 
981 	return time;
982 }
983 
984 
985 //	#pragma mark - ProcessGroup
986 
987 
988 ProcessGroup::ProcessGroup(pid_t id)
989 	:
990 	id(id),
991 	teams(NULL),
992 	fSession(NULL),
993 	fInOrphanedCheckList(false)
994 {
995 	char lockName[32];
996 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
997 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
998 }
999 
1000 
1001 ProcessGroup::~ProcessGroup()
1002 {
1003 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1004 
1005 	// If the group is in the orphaned check list, remove it.
1006 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1007 
1008 	if (fInOrphanedCheckList)
1009 		sOrphanedCheckProcessGroups.Remove(this);
1010 
1011 	orphanedCheckLocker.Unlock();
1012 
1013 	// remove group from the hash table and from the session
1014 	if (fSession != NULL) {
1015 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1016 		sGroupHash.RemoveUnchecked(this);
1017 		groupHashLocker.Unlock();
1018 
1019 		fSession->ReleaseReference();
1020 	}
1021 
1022 	mutex_destroy(&fLock);
1023 }
1024 
1025 
1026 /*static*/ ProcessGroup*
1027 ProcessGroup::Get(pid_t id)
1028 {
1029 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1030 	ProcessGroup* group = sGroupHash.Lookup(id);
1031 	if (group != NULL)
1032 		group->AcquireReference();
1033 	return group;
1034 }
1035 
1036 
1037 /*!	Adds the group the given session and makes it publicly accessible.
1038 	The caller must not hold the process group hash lock.
1039 */
1040 void
1041 ProcessGroup::Publish(ProcessSession* session)
1042 {
1043 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1044 	PublishLocked(session);
1045 }
1046 
1047 
1048 /*!	Adds the group to the given session and makes it publicly accessible.
1049 	The caller must hold the process group hash lock.
1050 */
1051 void
1052 ProcessGroup::PublishLocked(ProcessSession* session)
1053 {
1054 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1055 
1056 	fSession = session;
1057 	fSession->AcquireReference();
1058 
1059 	sGroupHash.InsertUnchecked(this);
1060 }
1061 
1062 
1063 /*!	Checks whether the process group is orphaned.
1064 	The caller must hold the group's lock.
1065 	\return \c true, if the group is orphaned, \c false otherwise.
1066 */
1067 bool
1068 ProcessGroup::IsOrphaned() const
1069 {
1070 	// Orphaned Process Group: "A process group in which the parent of every
1071 	// member is either itself a member of the group or is not a member of the
1072 	// group's session." (Open Group Base Specs Issue 7)
1073 	bool orphaned = true;
1074 
1075 	Team* team = teams;
1076 	while (orphaned && team != NULL) {
1077 		team->LockTeamAndParent(false);
1078 
1079 		Team* parent = team->parent;
1080 		if (parent != NULL && parent->group_id != id
1081 			&& parent->session_id == fSession->id) {
1082 			orphaned = false;
1083 		}
1084 
1085 		team->UnlockTeamAndParent();
1086 
1087 		team = team->group_next;
1088 	}
1089 
1090 	return orphaned;
1091 }
1092 
1093 
1094 void
1095 ProcessGroup::ScheduleOrphanedCheck()
1096 {
1097 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1098 
1099 	if (!fInOrphanedCheckList) {
1100 		sOrphanedCheckProcessGroups.Add(this);
1101 		fInOrphanedCheckList = true;
1102 	}
1103 }
1104 
1105 
1106 void
1107 ProcessGroup::UnsetOrphanedCheck()
1108 {
1109 	fInOrphanedCheckList = false;
1110 }
1111 
1112 
1113 //	#pragma mark - ProcessSession
1114 
1115 
1116 ProcessSession::ProcessSession(pid_t id)
1117 	:
1118 	id(id),
1119 	controlling_tty(-1),
1120 	foreground_group(-1)
1121 {
1122 	char lockName[32];
1123 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1124 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1125 }
1126 
1127 
1128 ProcessSession::~ProcessSession()
1129 {
1130 	mutex_destroy(&fLock);
1131 }
1132 
1133 
1134 //	#pragma mark - KDL functions
1135 
1136 
1137 static void
1138 _dump_team_info(Team* team)
1139 {
1140 	kprintf("TEAM: %p\n", team);
1141 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1142 		team->id);
1143 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1144 	kprintf("name:             '%s'\n", team->Name());
1145 	kprintf("args:             '%s'\n", team->Args());
1146 	kprintf("hash_next:        %p\n", team->hash_next);
1147 	kprintf("parent:           %p", team->parent);
1148 	if (team->parent != NULL) {
1149 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1150 	} else
1151 		kprintf("\n");
1152 
1153 	kprintf("children:         %p\n", team->children);
1154 	kprintf("num_threads:      %d\n", team->num_threads);
1155 	kprintf("state:            %d\n", team->state);
1156 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1157 	kprintf("io_context:       %p\n", team->io_context);
1158 	if (team->address_space)
1159 		kprintf("address_space:    %p\n", team->address_space);
1160 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1161 		(void*)team->user_data, team->user_data_area);
1162 	kprintf("free user thread: %p\n", team->free_user_threads);
1163 	kprintf("main_thread:      %p\n", team->main_thread);
1164 	kprintf("thread_list:      %p\n", team->thread_list);
1165 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1166 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1167 }
1168 
1169 
1170 static int
1171 dump_team_info(int argc, char** argv)
1172 {
1173 	ulong arg;
1174 	bool found = false;
1175 
1176 	if (argc < 2) {
1177 		Thread* thread = thread_get_current_thread();
1178 		if (thread != NULL && thread->team != NULL)
1179 			_dump_team_info(thread->team);
1180 		else
1181 			kprintf("No current team!\n");
1182 		return 0;
1183 	}
1184 
1185 	arg = strtoul(argv[1], NULL, 0);
1186 	if (IS_KERNEL_ADDRESS(arg)) {
1187 		// semi-hack
1188 		_dump_team_info((Team*)arg);
1189 		return 0;
1190 	}
1191 
1192 	// walk through the thread list, trying to match name or id
1193 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1194 		Team* team = it.Next();) {
1195 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1196 			|| team->id == (team_id)arg) {
1197 			_dump_team_info(team);
1198 			found = true;
1199 			break;
1200 		}
1201 	}
1202 
1203 	if (!found)
1204 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1205 	return 0;
1206 }
1207 
1208 
1209 static int
1210 dump_teams(int argc, char** argv)
1211 {
1212 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1213 		B_PRINTF_POINTER_WIDTH, "parent");
1214 
1215 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1216 		Team* team = it.Next();) {
1217 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 
1224 //	#pragma mark - Private functions
1225 
1226 
1227 /*!	Inserts team \a team into the child list of team \a parent.
1228 
1229 	The caller must hold the lock of both \a parent and \a team.
1230 
1231 	\param parent The parent team.
1232 	\param team The team to be inserted into \a parent's child list.
1233 */
1234 static void
1235 insert_team_into_parent(Team* parent, Team* team)
1236 {
1237 	ASSERT(parent != NULL);
1238 
1239 	team->siblings_next = parent->children;
1240 	parent->children = team;
1241 	team->parent = parent;
1242 }
1243 
1244 
1245 /*!	Removes team \a team from the child list of team \a parent.
1246 
1247 	The caller must hold the lock of both \a parent and \a team.
1248 
1249 	\param parent The parent team.
1250 	\param team The team to be removed from \a parent's child list.
1251 */
1252 static void
1253 remove_team_from_parent(Team* parent, Team* team)
1254 {
1255 	Team* child;
1256 	Team* last = NULL;
1257 
1258 	for (child = parent->children; child != NULL;
1259 			child = child->siblings_next) {
1260 		if (child == team) {
1261 			if (last == NULL)
1262 				parent->children = child->siblings_next;
1263 			else
1264 				last->siblings_next = child->siblings_next;
1265 
1266 			team->parent = NULL;
1267 			break;
1268 		}
1269 		last = child;
1270 	}
1271 }
1272 
1273 
1274 /*!	Returns whether the given team is a session leader.
1275 	The caller must hold the team's lock or its process group's lock.
1276 */
1277 static bool
1278 is_session_leader(Team* team)
1279 {
1280 	return team->session_id == team->id;
1281 }
1282 
1283 
1284 /*!	Returns whether the given team is a process group leader.
1285 	The caller must hold the team's lock or its process group's lock.
1286 */
1287 static bool
1288 is_process_group_leader(Team* team)
1289 {
1290 	return team->group_id == team->id;
1291 }
1292 
1293 
1294 /*!	Inserts the given team into the given process group.
1295 	The caller must hold the process group's lock, the team's lock, and the
1296 	team's parent's lock.
1297 */
1298 static void
1299 insert_team_into_group(ProcessGroup* group, Team* team)
1300 {
1301 	team->group = group;
1302 	team->group_id = group->id;
1303 	team->session_id = group->Session()->id;
1304 
1305 	team->group_next = group->teams;
1306 	group->teams = team;
1307 	group->AcquireReference();
1308 }
1309 
1310 
1311 /*!	Removes the given team from its process group.
1312 
1313 	The caller must hold the process group's lock, the team's lock, and the
1314 	team's parent's lock. Interrupts must be enabled.
1315 
1316 	\param team The team that'll be removed from its process group.
1317 */
1318 static void
1319 remove_team_from_group(Team* team)
1320 {
1321 	ProcessGroup* group = team->group;
1322 	Team* current;
1323 	Team* last = NULL;
1324 
1325 	// the team must be in a process group to let this function have any effect
1326 	if  (group == NULL)
1327 		return;
1328 
1329 	for (current = group->teams; current != NULL;
1330 			current = current->group_next) {
1331 		if (current == team) {
1332 			if (last == NULL)
1333 				group->teams = current->group_next;
1334 			else
1335 				last->group_next = current->group_next;
1336 
1337 			team->group = NULL;
1338 			break;
1339 		}
1340 		last = current;
1341 	}
1342 
1343 	team->group = NULL;
1344 	team->group_next = NULL;
1345 
1346 	group->ReleaseReference();
1347 }
1348 
1349 
1350 static status_t
1351 create_team_user_data(Team* team, void* exactAddress = NULL)
1352 {
1353 	void* address;
1354 	uint32 addressSpec;
1355 
1356 	if (exactAddress != NULL) {
1357 		address = exactAddress;
1358 		addressSpec = B_EXACT_ADDRESS;
1359 	} else {
1360 		address = (void*)KERNEL_USER_DATA_BASE;
1361 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1362 	}
1363 
1364 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1365 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1366 
1367 	virtual_address_restrictions virtualRestrictions = {};
1368 	if (result == B_OK || exactAddress != NULL) {
1369 		if (exactAddress != NULL)
1370 			virtualRestrictions.address = exactAddress;
1371 		else
1372 			virtualRestrictions.address = address;
1373 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1374 	} else {
1375 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1376 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1377 	}
1378 
1379 	physical_address_restrictions physicalRestrictions = {};
1380 	team->user_data_area = create_area_etc(team->id, "user area",
1381 		kTeamUserDataInitialSize, B_FULL_LOCK,
1382 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1383 		&virtualRestrictions, &physicalRestrictions, &address);
1384 	if (team->user_data_area < 0)
1385 		return team->user_data_area;
1386 
1387 	team->user_data = (addr_t)address;
1388 	team->used_user_data = 0;
1389 	team->user_data_size = kTeamUserDataInitialSize;
1390 	team->free_user_threads = NULL;
1391 
1392 	return B_OK;
1393 }
1394 
1395 
1396 static void
1397 delete_team_user_data(Team* team)
1398 {
1399 	if (team->user_data_area >= 0) {
1400 		vm_delete_area(team->id, team->user_data_area, true);
1401 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1402 			kTeamUserDataReservedSize);
1403 
1404 		team->user_data = 0;
1405 		team->used_user_data = 0;
1406 		team->user_data_size = 0;
1407 		team->user_data_area = -1;
1408 		while (free_user_thread* entry = team->free_user_threads) {
1409 			team->free_user_threads = entry->next;
1410 			free(entry);
1411 		}
1412 	}
1413 }
1414 
1415 
1416 static status_t
1417 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1418 	int32 argCount, int32 envCount, char**& _flatArgs)
1419 {
1420 	if (argCount < 0 || envCount < 0)
1421 		return B_BAD_VALUE;
1422 
1423 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1424 		return B_TOO_MANY_ARGS;
1425 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1426 		return B_BAD_VALUE;
1427 
1428 	if (!IS_USER_ADDRESS(userFlatArgs))
1429 		return B_BAD_ADDRESS;
1430 
1431 	// allocate kernel memory
1432 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1433 	if (flatArgs == NULL)
1434 		return B_NO_MEMORY;
1435 
1436 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1437 		free(flatArgs);
1438 		return B_BAD_ADDRESS;
1439 	}
1440 
1441 	// check and relocate the array
1442 	status_t error = B_OK;
1443 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1444 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1445 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1446 		if (i == argCount || i == argCount + envCount + 1) {
1447 			// check array null termination
1448 			if (flatArgs[i] != NULL) {
1449 				error = B_BAD_VALUE;
1450 				break;
1451 			}
1452 		} else {
1453 			// check string
1454 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1455 			size_t maxLen = stringEnd - arg;
1456 			if (arg < stringBase || arg >= stringEnd
1457 					|| strnlen(arg, maxLen) == maxLen) {
1458 				error = B_BAD_VALUE;
1459 				break;
1460 			}
1461 
1462 			flatArgs[i] = arg;
1463 		}
1464 	}
1465 
1466 	if (error == B_OK)
1467 		_flatArgs = flatArgs;
1468 	else
1469 		free(flatArgs);
1470 
1471 	return error;
1472 }
1473 
1474 
1475 static void
1476 free_team_arg(struct team_arg* teamArg)
1477 {
1478 	if (teamArg != NULL) {
1479 		free(teamArg->flat_args);
1480 		free(teamArg->path);
1481 		free(teamArg);
1482 	}
1483 }
1484 
1485 
1486 static status_t
1487 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1488 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1489 	port_id port, uint32 token)
1490 {
1491 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1492 	if (teamArg == NULL)
1493 		return B_NO_MEMORY;
1494 
1495 	teamArg->path = strdup(path);
1496 	if (teamArg->path == NULL) {
1497 		free(teamArg);
1498 		return B_NO_MEMORY;
1499 	}
1500 
1501 	// copy the args over
1502 	teamArg->flat_args = flatArgs;
1503 	teamArg->flat_args_size = flatArgsSize;
1504 	teamArg->arg_count = argCount;
1505 	teamArg->env_count = envCount;
1506 	teamArg->flags = 0;
1507 	teamArg->umask = umask;
1508 	teamArg->error_port = port;
1509 	teamArg->error_token = token;
1510 
1511 	// determine the flags from the environment
1512 	const char* const* env = flatArgs + argCount + 1;
1513 	for (int32 i = 0; i < envCount; i++) {
1514 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1515 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1516 			break;
1517 		}
1518 	}
1519 
1520 	*_teamArg = teamArg;
1521 	return B_OK;
1522 }
1523 
1524 
1525 static status_t
1526 team_create_thread_start_internal(void* args)
1527 {
1528 	status_t err;
1529 	Thread* thread;
1530 	Team* team;
1531 	struct team_arg* teamArgs = (struct team_arg*)args;
1532 	const char* path;
1533 	addr_t entry;
1534 	char** userArgs;
1535 	char** userEnv;
1536 	struct user_space_program_args* programArgs;
1537 	uint32 argCount, envCount;
1538 
1539 	thread = thread_get_current_thread();
1540 	team = thread->team;
1541 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1542 
1543 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1544 		thread->id));
1545 
1546 	// Main stack area layout is currently as follows (starting from 0):
1547 	//
1548 	// size								| usage
1549 	// ---------------------------------+--------------------------------
1550 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1551 	// TLS_SIZE							| TLS data
1552 	// sizeof(user_space_program_args)	| argument structure for the runtime
1553 	//									| loader
1554 	// flat arguments size				| flat process arguments and environment
1555 
1556 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1557 	// the heap
1558 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1559 
1560 	argCount = teamArgs->arg_count;
1561 	envCount = teamArgs->env_count;
1562 
1563 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1564 		+ thread->user_stack_size + TLS_SIZE);
1565 
1566 	userArgs = (char**)(programArgs + 1);
1567 	userEnv = userArgs + argCount + 1;
1568 	path = teamArgs->path;
1569 
1570 	if (user_strlcpy(programArgs->program_path, path,
1571 				sizeof(programArgs->program_path)) < B_OK
1572 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1573 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1574 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1575 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1576 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1577 				sizeof(port_id)) < B_OK
1578 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1579 				sizeof(uint32)) < B_OK
1580 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1581 		|| user_memcpy(&programArgs->disable_user_addons,
1582 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1583 		|| user_memcpy(userArgs, teamArgs->flat_args,
1584 				teamArgs->flat_args_size) < B_OK) {
1585 		// the team deletion process will clean this mess
1586 		free_team_arg(teamArgs);
1587 		return B_BAD_ADDRESS;
1588 	}
1589 
1590 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1591 
1592 	// set team args and update state
1593 	team->Lock();
1594 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1595 	team->state = TEAM_STATE_NORMAL;
1596 	team->Unlock();
1597 
1598 	free_team_arg(teamArgs);
1599 		// the arguments are already on the user stack, we no longer need
1600 		// them in this form
1601 
1602 	// Clone commpage area
1603 	area_id commPageArea = clone_commpage_area(team->id,
1604 		&team->commpage_address);
1605 	if (commPageArea  < B_OK) {
1606 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1607 			strerror(commPageArea)));
1608 		return commPageArea;
1609 	}
1610 
1611 	// Register commpage image
1612 	image_id commPageImage = get_commpage_image();
1613 	extended_image_info imageInfo;
1614 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1615 	if (err != B_OK) {
1616 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1617 			strerror(err)));
1618 		return err;
1619 	}
1620 	imageInfo.basic_info.text = team->commpage_address;
1621 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1622 	imageInfo.symbol_table = NULL;
1623 	imageInfo.symbol_hash = NULL;
1624 	imageInfo.string_table = NULL;
1625 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1626 	if (image < 0) {
1627 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1628 			strerror(image)));
1629 		return image;
1630 	}
1631 
1632 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1633 	// automatic variables with function scope will never be destroyed.
1634 	{
1635 		// find runtime_loader path
1636 		KPath runtimeLoaderPath;
1637 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1638 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1639 		if (err < B_OK) {
1640 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1641 				strerror(err)));
1642 			return err;
1643 		}
1644 		runtimeLoaderPath.UnlockBuffer();
1645 		err = runtimeLoaderPath.Append("runtime_loader");
1646 
1647 		if (err == B_OK) {
1648 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1649 				&entry);
1650 		}
1651 	}
1652 
1653 	if (err < B_OK) {
1654 		// Luckily, we don't have to clean up the mess we created - that's
1655 		// done for us by the normal team deletion process
1656 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1657 			"%s\n", strerror(err)));
1658 		return err;
1659 	}
1660 
1661 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1662 
1663 	// enter userspace -- returns only in case of error
1664 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1665 		programArgs, team->commpage_address);
1666 }
1667 
1668 
1669 static status_t
1670 team_create_thread_start(void* args)
1671 {
1672 	team_create_thread_start_internal(args);
1673 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1674 	thread_exit();
1675 		// does not return
1676 	return B_OK;
1677 }
1678 
1679 
1680 static thread_id
1681 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1682 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1683 	port_id errorPort, uint32 errorToken)
1684 {
1685 	char** flatArgs = _flatArgs;
1686 	thread_id thread;
1687 	status_t status;
1688 	struct team_arg* teamArgs;
1689 	struct team_loading_info loadingInfo;
1690 	ConditionVariableEntry loadingWaitEntry;
1691 	io_context* parentIOContext = NULL;
1692 	team_id teamID;
1693 	bool teamLimitReached = false;
1694 
1695 	if (flatArgs == NULL || argCount == 0)
1696 		return B_BAD_VALUE;
1697 
1698 	const char* path = flatArgs[0];
1699 
1700 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1701 		"\n", path, flatArgs, argCount));
1702 
1703 	// cut the path from the main thread name
1704 	const char* threadName = strrchr(path, '/');
1705 	if (threadName != NULL)
1706 		threadName++;
1707 	else
1708 		threadName = path;
1709 
1710 	// create the main thread object
1711 	Thread* mainThread;
1712 	status = Thread::Create(threadName, mainThread);
1713 	if (status != B_OK)
1714 		return status;
1715 	BReference<Thread> mainThreadReference(mainThread, true);
1716 
1717 	// create team object
1718 	Team* team = Team::Create(mainThread->id, path, false);
1719 	if (team == NULL)
1720 		return B_NO_MEMORY;
1721 	BReference<Team> teamReference(team, true);
1722 
1723 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1724 		loadingInfo.condition.Init(team, "image load");
1725 		loadingInfo.condition.Add(&loadingWaitEntry);
1726 		loadingInfo.result = B_ERROR;
1727 		team->loading_info = &loadingInfo;
1728 	}
1729 
1730 	// get the parent team
1731 	Team* parent = Team::Get(parentID);
1732 	if (parent == NULL)
1733 		return B_BAD_TEAM_ID;
1734 	BReference<Team> parentReference(parent, true);
1735 
1736 	parent->LockTeamAndProcessGroup();
1737 	team->Lock();
1738 
1739 	// inherit the parent's user/group
1740 	inherit_parent_user_and_group(team, parent);
1741 
1742 	// get a reference to the parent's I/O context -- we need it to create ours
1743 	parentIOContext = parent->io_context;
1744 	vfs_get_io_context(parentIOContext);
1745 
1746 	team->Unlock();
1747 	parent->UnlockTeamAndProcessGroup();
1748 
1749 	// check the executable's set-user/group-id permission
1750 	update_set_id_user_and_group(team, path);
1751 
1752 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1753 		envCount, (mode_t)-1, errorPort, errorToken);
1754 	if (status != B_OK)
1755 		goto err1;
1756 
1757 	_flatArgs = NULL;
1758 		// args are owned by the team_arg structure now
1759 
1760 	// create a new io_context for this team
1761 	team->io_context = vfs_new_io_context(parentIOContext, true);
1762 	if (!team->io_context) {
1763 		status = B_NO_MEMORY;
1764 		goto err2;
1765 	}
1766 
1767 	// We don't need the parent's I/O context any longer.
1768 	vfs_put_io_context(parentIOContext);
1769 	parentIOContext = NULL;
1770 
1771 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1772 	vfs_exec_io_context(team->io_context);
1773 
1774 	// create an address space for this team
1775 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1776 		&team->address_space);
1777 	if (status != B_OK)
1778 		goto err2;
1779 
1780 	team->address_space->SetRandomizingEnabled(
1781 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1782 
1783 	// create the user data area
1784 	status = create_team_user_data(team);
1785 	if (status != B_OK)
1786 		goto err4;
1787 
1788 	// insert the team into its parent and the teams hash
1789 	parent->LockTeamAndProcessGroup();
1790 	team->Lock();
1791 
1792 	{
1793 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1794 
1795 		sTeamHash.Insert(team);
1796 		teamLimitReached = sUsedTeams >= sMaxTeams;
1797 		if (!teamLimitReached)
1798 			sUsedTeams++;
1799 	}
1800 
1801 	insert_team_into_parent(parent, team);
1802 	insert_team_into_group(parent->group, team);
1803 
1804 	team->Unlock();
1805 	parent->UnlockTeamAndProcessGroup();
1806 
1807 	// notify team listeners
1808 	sNotificationService.Notify(TEAM_ADDED, team);
1809 
1810 	if (teamLimitReached) {
1811 		status = B_NO_MORE_TEAMS;
1812 		goto err6;
1813 	}
1814 
1815 	// In case we start the main thread, we shouldn't access the team object
1816 	// afterwards, so cache the team's ID.
1817 	teamID = team->id;
1818 
1819 	// Create a kernel thread, but under the context of the new team
1820 	// The new thread will take over ownership of teamArgs.
1821 	{
1822 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1823 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1824 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1825 			+ teamArgs->flat_args_size;
1826 		thread = thread_create_thread(threadAttributes, false);
1827 		if (thread < 0) {
1828 			status = thread;
1829 			goto err6;
1830 		}
1831 	}
1832 
1833 	// The team has been created successfully, so we keep the reference. Or
1834 	// more precisely: It's owned by the team's main thread, now.
1835 	teamReference.Detach();
1836 
1837 	// wait for the loader of the new team to finish its work
1838 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1839 		if (mainThread != NULL) {
1840 			// resume the team's main thread
1841 			thread_continue(mainThread);
1842 		}
1843 
1844 		// Now wait until loading is finished. We will be woken either by the
1845 		// thread, when it finished or aborted loading, or when the team is
1846 		// going to die (e.g. is killed). In either case the one notifying is
1847 		// responsible for unsetting `loading_info` in the team structure.
1848 		loadingWaitEntry.Wait();
1849 
1850 		if (loadingInfo.result < B_OK)
1851 			return loadingInfo.result;
1852 	}
1853 
1854 	// notify the debugger
1855 	user_debug_team_created(teamID);
1856 
1857 	return thread;
1858 
1859 err6:
1860 	// Remove the team structure from the process group, the parent team, and
1861 	// the team hash table and delete the team structure.
1862 	parent->LockTeamAndProcessGroup();
1863 	team->Lock();
1864 
1865 	remove_team_from_group(team);
1866 	remove_team_from_parent(team->parent, team);
1867 
1868 	team->Unlock();
1869 	parent->UnlockTeamAndProcessGroup();
1870 
1871 	{
1872 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1873 		sTeamHash.Remove(team);
1874 		if (!teamLimitReached)
1875 			sUsedTeams--;
1876 	}
1877 
1878 	sNotificationService.Notify(TEAM_REMOVED, team);
1879 
1880 	delete_team_user_data(team);
1881 err4:
1882 	team->address_space->Put();
1883 err2:
1884 	free_team_arg(teamArgs);
1885 err1:
1886 	if (parentIOContext != NULL)
1887 		vfs_put_io_context(parentIOContext);
1888 
1889 	return status;
1890 }
1891 
1892 
1893 /*!	Almost shuts down the current team and loads a new image into it.
1894 	If successful, this function does not return and will takeover ownership of
1895 	the arguments provided.
1896 	This function may only be called in a userland team (caused by one of the
1897 	exec*() syscalls).
1898 */
1899 static status_t
1900 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1901 	int32 argCount, int32 envCount, mode_t umask)
1902 {
1903 	// NOTE: Since this function normally doesn't return, don't use automatic
1904 	// variables that need destruction in the function scope.
1905 	char** flatArgs = _flatArgs;
1906 	Team* team = thread_get_current_thread()->team;
1907 	struct team_arg* teamArgs;
1908 	const char* threadName;
1909 	thread_id nubThreadID = -1;
1910 
1911 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1912 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1913 		team->id));
1914 
1915 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1916 
1917 	// switching the kernel at run time is probably not a good idea :)
1918 	if (team == team_get_kernel_team())
1919 		return B_NOT_ALLOWED;
1920 
1921 	// we currently need to be single threaded here
1922 	// TODO: maybe we should just kill all other threads and
1923 	//	make the current thread the team's main thread?
1924 	Thread* currentThread = thread_get_current_thread();
1925 	if (currentThread != team->main_thread)
1926 		return B_NOT_ALLOWED;
1927 
1928 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1929 	// We iterate through the thread list to make sure that there's no other
1930 	// thread.
1931 	TeamLocker teamLocker(team);
1932 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1933 
1934 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1935 		nubThreadID = team->debug_info.nub_thread;
1936 
1937 	debugInfoLocker.Unlock();
1938 
1939 	for (Thread* thread = team->thread_list; thread != NULL;
1940 			thread = thread->team_next) {
1941 		if (thread != team->main_thread && thread->id != nubThreadID)
1942 			return B_NOT_ALLOWED;
1943 	}
1944 
1945 	team->DeleteUserTimers(true);
1946 	team->ResetSignalsOnExec();
1947 
1948 	teamLocker.Unlock();
1949 
1950 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1951 		argCount, envCount, umask, -1, 0);
1952 	if (status != B_OK)
1953 		return status;
1954 
1955 	_flatArgs = NULL;
1956 		// args are owned by the team_arg structure now
1957 
1958 	// TODO: remove team resources if there are any left
1959 	// thread_atkernel_exit() might not be called at all
1960 
1961 	thread_reset_for_exec();
1962 
1963 	user_debug_prepare_for_exec();
1964 
1965 	delete_team_user_data(team);
1966 	vm_delete_areas(team->address_space, false);
1967 	xsi_sem_undo(team);
1968 	delete_owned_ports(team);
1969 	sem_delete_owned_sems(team);
1970 	remove_images(team);
1971 	vfs_exec_io_context(team->io_context);
1972 	delete_realtime_sem_context(team->realtime_sem_context);
1973 	team->realtime_sem_context = NULL;
1974 
1975 	// update ASLR
1976 	team->address_space->SetRandomizingEnabled(
1977 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1978 
1979 	status = create_team_user_data(team);
1980 	if (status != B_OK) {
1981 		// creating the user data failed -- we're toast
1982 		free_team_arg(teamArgs);
1983 		exit_thread(status);
1984 		return status;
1985 	}
1986 
1987 	user_debug_finish_after_exec();
1988 
1989 	// rename the team
1990 
1991 	team->Lock();
1992 	team->SetName(path);
1993 	team->Unlock();
1994 
1995 	// cut the path from the team name and rename the main thread, too
1996 	threadName = strrchr(path, '/');
1997 	if (threadName != NULL)
1998 		threadName++;
1999 	else
2000 		threadName = path;
2001 	rename_thread(thread_get_current_thread_id(), threadName);
2002 
2003 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2004 
2005 	// Update user/group according to the executable's set-user/group-id
2006 	// permission.
2007 	update_set_id_user_and_group(team, path);
2008 
2009 	user_debug_team_exec();
2010 
2011 	// notify team listeners
2012 	sNotificationService.Notify(TEAM_EXEC, team);
2013 
2014 	// get a user thread for the thread
2015 	user_thread* userThread = team_allocate_user_thread(team);
2016 		// cannot fail (the allocation for the team would have failed already)
2017 	ThreadLocker currentThreadLocker(currentThread);
2018 	currentThread->user_thread = userThread;
2019 	currentThreadLocker.Unlock();
2020 
2021 	// create the user stack for the thread
2022 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2023 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2024 	if (status == B_OK) {
2025 		// prepare the stack, load the runtime loader, and enter userspace
2026 		team_create_thread_start(teamArgs);
2027 			// does never return
2028 	} else
2029 		free_team_arg(teamArgs);
2030 
2031 	// Sorry, we have to kill ourselves, there is no way out anymore
2032 	// (without any areas left and all that).
2033 	exit_thread(status);
2034 
2035 	// We return a status here since the signal that is sent by the
2036 	// call above is not immediately handled.
2037 	return B_ERROR;
2038 }
2039 
2040 
2041 static thread_id
2042 fork_team(void)
2043 {
2044 	Thread* parentThread = thread_get_current_thread();
2045 	Team* parentTeam = parentThread->team;
2046 	Team* team;
2047 	arch_fork_arg* forkArgs;
2048 	struct area_info info;
2049 	thread_id threadID;
2050 	status_t status;
2051 	ssize_t areaCookie;
2052 	bool teamLimitReached = false;
2053 
2054 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2055 
2056 	if (parentTeam == team_get_kernel_team())
2057 		return B_NOT_ALLOWED;
2058 
2059 	// create a new team
2060 	// TODO: this is very similar to load_image_internal() - maybe we can do
2061 	// something about it :)
2062 
2063 	// create the main thread object
2064 	Thread* thread;
2065 	status = Thread::Create(parentThread->name, thread);
2066 	if (status != B_OK)
2067 		return status;
2068 	BReference<Thread> threadReference(thread, true);
2069 
2070 	// create the team object
2071 	team = Team::Create(thread->id, NULL, false);
2072 	if (team == NULL)
2073 		return B_NO_MEMORY;
2074 
2075 	parentTeam->LockTeamAndProcessGroup();
2076 	team->Lock();
2077 
2078 	team->SetName(parentTeam->Name());
2079 	team->SetArgs(parentTeam->Args());
2080 
2081 	team->commpage_address = parentTeam->commpage_address;
2082 
2083 	// Inherit the parent's user/group.
2084 	inherit_parent_user_and_group(team, parentTeam);
2085 
2086 	// inherit signal handlers
2087 	team->InheritSignalActions(parentTeam);
2088 
2089 	team->Unlock();
2090 	parentTeam->UnlockTeamAndProcessGroup();
2091 
2092 	// inherit some team debug flags
2093 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2094 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2095 
2096 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2097 	if (forkArgs == NULL) {
2098 		status = B_NO_MEMORY;
2099 		goto err1;
2100 	}
2101 
2102 	// create a new io_context for this team
2103 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2104 	if (!team->io_context) {
2105 		status = B_NO_MEMORY;
2106 		goto err2;
2107 	}
2108 
2109 	// duplicate the realtime sem context
2110 	if (parentTeam->realtime_sem_context) {
2111 		team->realtime_sem_context = clone_realtime_sem_context(
2112 			parentTeam->realtime_sem_context);
2113 		if (team->realtime_sem_context == NULL) {
2114 			status = B_NO_MEMORY;
2115 			goto err2;
2116 		}
2117 	}
2118 
2119 	// create an address space for this team
2120 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2121 		&team->address_space);
2122 	if (status < B_OK)
2123 		goto err3;
2124 
2125 	// copy all areas of the team
2126 	// TODO: should be able to handle stack areas differently (ie. don't have
2127 	// them copy-on-write)
2128 
2129 	areaCookie = 0;
2130 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2131 		if (info.area == parentTeam->user_data_area) {
2132 			// don't clone the user area; just create a new one
2133 			status = create_team_user_data(team, info.address);
2134 			if (status != B_OK)
2135 				break;
2136 
2137 			thread->user_thread = team_allocate_user_thread(team);
2138 		} else {
2139 			void* address;
2140 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2141 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2142 			if (area < B_OK) {
2143 				status = area;
2144 				break;
2145 			}
2146 
2147 			if (info.area == parentThread->user_stack_area)
2148 				thread->user_stack_area = area;
2149 		}
2150 	}
2151 
2152 	if (status < B_OK)
2153 		goto err4;
2154 
2155 	if (thread->user_thread == NULL) {
2156 #if KDEBUG
2157 		panic("user data area not found, parent area is %" B_PRId32,
2158 			parentTeam->user_data_area);
2159 #endif
2160 		status = B_ERROR;
2161 		goto err4;
2162 	}
2163 
2164 	thread->user_stack_base = parentThread->user_stack_base;
2165 	thread->user_stack_size = parentThread->user_stack_size;
2166 	thread->user_local_storage = parentThread->user_local_storage;
2167 	thread->sig_block_mask = parentThread->sig_block_mask;
2168 	thread->signal_stack_base = parentThread->signal_stack_base;
2169 	thread->signal_stack_size = parentThread->signal_stack_size;
2170 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2171 
2172 	arch_store_fork_frame(forkArgs);
2173 
2174 	// copy image list
2175 	if (copy_images(parentTeam->id, team) != B_OK)
2176 		goto err5;
2177 
2178 	// insert the team into its parent and the teams hash
2179 	parentTeam->LockTeamAndProcessGroup();
2180 	team->Lock();
2181 
2182 	{
2183 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2184 
2185 		sTeamHash.Insert(team);
2186 		teamLimitReached = sUsedTeams >= sMaxTeams;
2187 		if (!teamLimitReached)
2188 			sUsedTeams++;
2189 	}
2190 
2191 	insert_team_into_parent(parentTeam, team);
2192 	insert_team_into_group(parentTeam->group, team);
2193 
2194 	team->Unlock();
2195 	parentTeam->UnlockTeamAndProcessGroup();
2196 
2197 	// notify team listeners
2198 	sNotificationService.Notify(TEAM_ADDED, team);
2199 
2200 	if (teamLimitReached) {
2201 		status = B_NO_MORE_TEAMS;
2202 		goto err6;
2203 	}
2204 
2205 	// create the main thread
2206 	{
2207 		ThreadCreationAttributes threadCreationAttributes(NULL,
2208 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2209 		threadCreationAttributes.forkArgs = forkArgs;
2210 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2211 		threadID = thread_create_thread(threadCreationAttributes, false);
2212 		if (threadID < 0) {
2213 			status = threadID;
2214 			goto err6;
2215 		}
2216 	}
2217 
2218 	// notify the debugger
2219 	user_debug_team_created(team->id);
2220 
2221 	T(TeamForked(threadID));
2222 
2223 	resume_thread(threadID);
2224 	return threadID;
2225 
2226 err6:
2227 	// Remove the team structure from the process group, the parent team, and
2228 	// the team hash table and delete the team structure.
2229 	parentTeam->LockTeamAndProcessGroup();
2230 	team->Lock();
2231 
2232 	remove_team_from_group(team);
2233 	remove_team_from_parent(team->parent, team);
2234 
2235 	team->Unlock();
2236 	parentTeam->UnlockTeamAndProcessGroup();
2237 
2238 	{
2239 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2240 		sTeamHash.Remove(team);
2241 		if (!teamLimitReached)
2242 			sUsedTeams--;
2243 	}
2244 
2245 	sNotificationService.Notify(TEAM_REMOVED, team);
2246 err5:
2247 	remove_images(team);
2248 err4:
2249 	team->address_space->RemoveAndPut();
2250 err3:
2251 	delete_realtime_sem_context(team->realtime_sem_context);
2252 err2:
2253 	free(forkArgs);
2254 err1:
2255 	team->ReleaseReference();
2256 
2257 	return status;
2258 }
2259 
2260 
2261 /*!	Returns if the specified team \a parent has any children belonging to the
2262 	process group with the specified ID \a groupID.
2263 	The caller must hold \a parent's lock.
2264 */
2265 static bool
2266 has_children_in_group(Team* parent, pid_t groupID)
2267 {
2268 	for (Team* child = parent->children; child != NULL;
2269 			child = child->siblings_next) {
2270 		TeamLocker childLocker(child);
2271 		if (child->group_id == groupID)
2272 			return true;
2273 	}
2274 
2275 	return false;
2276 }
2277 
2278 
2279 /*!	Returns the first job control entry from \a children, which matches \a id.
2280 	\a id can be:
2281 	- \code > 0 \endcode: Matching an entry with that team ID.
2282 	- \code == -1 \endcode: Matching any entry.
2283 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2284 	\c 0 is an invalid value for \a id.
2285 
2286 	The caller must hold the lock of the team that \a children belongs to.
2287 
2288 	\param children The job control entry list to check.
2289 	\param id The match criterion.
2290 	\return The first matching entry or \c NULL, if none matches.
2291 */
2292 static job_control_entry*
2293 get_job_control_entry(team_job_control_children& children, pid_t id)
2294 {
2295 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2296 		 job_control_entry* entry = it.Next();) {
2297 
2298 		if (id > 0) {
2299 			if (entry->thread == id)
2300 				return entry;
2301 		} else if (id == -1) {
2302 			return entry;
2303 		} else {
2304 			pid_t processGroup
2305 				= (entry->team ? entry->team->group_id : entry->group_id);
2306 			if (processGroup == -id)
2307 				return entry;
2308 		}
2309 	}
2310 
2311 	return NULL;
2312 }
2313 
2314 
2315 /*!	Returns the first job control entry from one of team's dead, continued, or
2316 	stopped children which matches \a id.
2317 	\a id can be:
2318 	- \code > 0 \endcode: Matching an entry with that team ID.
2319 	- \code == -1 \endcode: Matching any entry.
2320 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2321 	\c 0 is an invalid value for \a id.
2322 
2323 	The caller must hold \a team's lock.
2324 
2325 	\param team The team whose dead, stopped, and continued child lists shall be
2326 		checked.
2327 	\param id The match criterion.
2328 	\param flags Specifies which children shall be considered. Dead children
2329 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2330 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2331 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2332 		\c WCONTINUED.
2333 	\return The first matching entry or \c NULL, if none matches.
2334 */
2335 static job_control_entry*
2336 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2337 {
2338 	job_control_entry* entry = NULL;
2339 
2340 	if ((flags & WEXITED) != 0)
2341 		entry = get_job_control_entry(team->dead_children, id);
2342 
2343 	if (entry == NULL && (flags & WCONTINUED) != 0)
2344 		entry = get_job_control_entry(team->continued_children, id);
2345 
2346 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2347 		entry = get_job_control_entry(team->stopped_children, id);
2348 
2349 	return entry;
2350 }
2351 
2352 
2353 job_control_entry::job_control_entry()
2354 	:
2355 	has_group_ref(false)
2356 {
2357 }
2358 
2359 
2360 job_control_entry::~job_control_entry()
2361 {
2362 	if (has_group_ref) {
2363 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2364 
2365 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2366 		if (group == NULL) {
2367 			panic("job_control_entry::~job_control_entry(): unknown group "
2368 				"ID: %" B_PRId32, group_id);
2369 			return;
2370 		}
2371 
2372 		groupHashLocker.Unlock();
2373 
2374 		group->ReleaseReference();
2375 	}
2376 }
2377 
2378 
2379 /*!	Invoked when the owning team is dying, initializing the entry according to
2380 	the dead state.
2381 
2382 	The caller must hold the owning team's lock and the scheduler lock.
2383 */
2384 void
2385 job_control_entry::InitDeadState()
2386 {
2387 	if (team != NULL) {
2388 		ASSERT(team->exit.initialized);
2389 
2390 		group_id = team->group_id;
2391 		team->group->AcquireReference();
2392 		has_group_ref = true;
2393 
2394 		thread = team->id;
2395 		status = team->exit.status;
2396 		reason = team->exit.reason;
2397 		signal = team->exit.signal;
2398 		signaling_user = team->exit.signaling_user;
2399 		user_time = team->dead_threads_user_time
2400 			+ team->dead_children.user_time;
2401 		kernel_time = team->dead_threads_kernel_time
2402 			+ team->dead_children.kernel_time;
2403 
2404 		team = NULL;
2405 	}
2406 }
2407 
2408 
2409 job_control_entry&
2410 job_control_entry::operator=(const job_control_entry& other)
2411 {
2412 	state = other.state;
2413 	thread = other.thread;
2414 	signal = other.signal;
2415 	has_group_ref = false;
2416 	signaling_user = other.signaling_user;
2417 	team = other.team;
2418 	group_id = other.group_id;
2419 	status = other.status;
2420 	reason = other.reason;
2421 	user_time = other.user_time;
2422 	kernel_time = other.kernel_time;
2423 
2424 	return *this;
2425 }
2426 
2427 
2428 /*! This is the kernel backend for waitid().
2429 */
2430 static thread_id
2431 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2432 	team_usage_info& _usage_info)
2433 {
2434 	Thread* thread = thread_get_current_thread();
2435 	Team* team = thread->team;
2436 	struct job_control_entry foundEntry;
2437 	struct job_control_entry* freeDeathEntry = NULL;
2438 	status_t status = B_OK;
2439 
2440 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2441 		child, flags));
2442 
2443 	T(WaitForChild(child, flags));
2444 
2445 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2446 		T(WaitForChildDone(B_BAD_VALUE));
2447 		return B_BAD_VALUE;
2448 	}
2449 
2450 	pid_t originalChild = child;
2451 
2452 	bool ignoreFoundEntries = false;
2453 	bool ignoreFoundEntriesChecked = false;
2454 
2455 	while (true) {
2456 		// lock the team
2457 		TeamLocker teamLocker(team);
2458 
2459 		// A 0 child argument means to wait for all children in the process
2460 		// group of the calling team.
2461 		child = originalChild == 0 ? -team->group_id : originalChild;
2462 
2463 		// check whether any condition holds
2464 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2465 
2466 		// If we don't have an entry yet, check whether there are any children
2467 		// complying to the process group specification at all.
2468 		if (entry == NULL) {
2469 			// No success yet -- check whether there are any children complying
2470 			// to the process group specification at all.
2471 			bool childrenExist = false;
2472 			if (child == -1) {
2473 				childrenExist = team->children != NULL;
2474 			} else if (child < -1) {
2475 				childrenExist = has_children_in_group(team, -child);
2476 			} else if (child != team->id) {
2477 				if (Team* childTeam = Team::Get(child)) {
2478 					BReference<Team> childTeamReference(childTeam, true);
2479 					TeamLocker childTeamLocker(childTeam);
2480 					childrenExist = childTeam->parent == team;
2481 				}
2482 			}
2483 
2484 			if (!childrenExist) {
2485 				// there is no child we could wait for
2486 				status = ECHILD;
2487 			} else {
2488 				// the children we're waiting for are still running
2489 				status = B_WOULD_BLOCK;
2490 			}
2491 		} else {
2492 			// got something
2493 			foundEntry = *entry;
2494 
2495 			// unless WNOWAIT has been specified, "consume" the wait state
2496 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2497 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2498 					// The child is dead. Reap its death entry.
2499 					freeDeathEntry = entry;
2500 					team->dead_children.entries.Remove(entry);
2501 					team->dead_children.count--;
2502 				} else {
2503 					// The child is well. Reset its job control state.
2504 					team_set_job_control_state(entry->team,
2505 						JOB_CONTROL_STATE_NONE, NULL);
2506 				}
2507 			}
2508 		}
2509 
2510 		// If we haven't got anything yet, prepare for waiting for the
2511 		// condition variable.
2512 		ConditionVariableEntry deadWaitEntry;
2513 
2514 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2515 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2516 
2517 		teamLocker.Unlock();
2518 
2519 		// we got our entry and can return to our caller
2520 		if (status == B_OK) {
2521 			if (ignoreFoundEntries) {
2522 				// ... unless we shall ignore found entries
2523 				delete freeDeathEntry;
2524 				freeDeathEntry = NULL;
2525 				continue;
2526 			}
2527 
2528 			break;
2529 		}
2530 
2531 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2532 			T(WaitForChildDone(status));
2533 			return status;
2534 		}
2535 
2536 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2537 		if (status == B_INTERRUPTED) {
2538 			T(WaitForChildDone(status));
2539 			return status;
2540 		}
2541 
2542 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2543 		// all our children are dead and fail with ECHILD. We check the
2544 		// condition at this point.
2545 		if (!ignoreFoundEntriesChecked) {
2546 			teamLocker.Lock();
2547 
2548 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2549 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2550 				|| handler.sa_handler == SIG_IGN) {
2551 				ignoreFoundEntries = true;
2552 			}
2553 
2554 			teamLocker.Unlock();
2555 
2556 			ignoreFoundEntriesChecked = true;
2557 		}
2558 	}
2559 
2560 	delete freeDeathEntry;
2561 
2562 	// When we got here, we have a valid death entry, and already got
2563 	// unregistered from the team or group. Fill in the returned info.
2564 	memset(&_info, 0, sizeof(_info));
2565 	_info.si_signo = SIGCHLD;
2566 	_info.si_pid = foundEntry.thread;
2567 	_info.si_uid = foundEntry.signaling_user;
2568 	// TODO: Fill in si_errno?
2569 
2570 	switch (foundEntry.state) {
2571 		case JOB_CONTROL_STATE_DEAD:
2572 			_info.si_code = foundEntry.reason;
2573 			_info.si_status = foundEntry.reason == CLD_EXITED
2574 				? foundEntry.status : foundEntry.signal;
2575 			_usage_info.user_time = foundEntry.user_time;
2576 			_usage_info.kernel_time = foundEntry.kernel_time;
2577 			break;
2578 		case JOB_CONTROL_STATE_STOPPED:
2579 			_info.si_code = CLD_STOPPED;
2580 			_info.si_status = foundEntry.signal;
2581 			break;
2582 		case JOB_CONTROL_STATE_CONTINUED:
2583 			_info.si_code = CLD_CONTINUED;
2584 			_info.si_status = 0;
2585 			break;
2586 		case JOB_CONTROL_STATE_NONE:
2587 			// can't happen
2588 			break;
2589 	}
2590 
2591 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2592 	// status is available.
2593 	TeamLocker teamLocker(team);
2594 	InterruptsSpinLocker signalLocker(team->signal_lock);
2595 	SpinLocker threadCreationLocker(gThreadCreationLock);
2596 
2597 	if (is_team_signal_blocked(team, SIGCHLD)) {
2598 		if (get_job_control_entry(team, child, flags) == NULL)
2599 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2600 	}
2601 
2602 	threadCreationLocker.Unlock();
2603 	signalLocker.Unlock();
2604 	teamLocker.Unlock();
2605 
2606 	// When the team is dead, the main thread continues to live in the kernel
2607 	// team for a very short time. To avoid surprises for the caller we rather
2608 	// wait until the thread is really gone.
2609 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2610 		wait_for_thread(foundEntry.thread, NULL);
2611 
2612 	T(WaitForChildDone(foundEntry));
2613 
2614 	return foundEntry.thread;
2615 }
2616 
2617 
2618 /*! Fills the team_info structure with information from the specified team.
2619 	Interrupts must be enabled. The team must not be locked.
2620 */
2621 static status_t
2622 fill_team_info(Team* team, team_info* info, size_t size)
2623 {
2624 	if (size != sizeof(team_info))
2625 		return B_BAD_VALUE;
2626 
2627 	// TODO: Set more informations for team_info
2628 	memset(info, 0, size);
2629 
2630 	info->team = team->id;
2631 		// immutable
2632 	info->image_count = count_images(team);
2633 		// protected by sImageMutex
2634 
2635 	TeamLocker teamLocker(team);
2636 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2637 
2638 	info->thread_count = team->num_threads;
2639 	//info->area_count =
2640 	info->debugger_nub_thread = team->debug_info.nub_thread;
2641 	info->debugger_nub_port = team->debug_info.nub_port;
2642 	info->uid = team->effective_uid;
2643 	info->gid = team->effective_gid;
2644 
2645 	strlcpy(info->args, team->Args(), sizeof(info->args));
2646 	info->argc = 1;
2647 
2648 	return B_OK;
2649 }
2650 
2651 
2652 /*!	Returns whether the process group contains stopped processes.
2653 	The caller must hold the process group's lock.
2654 */
2655 static bool
2656 process_group_has_stopped_processes(ProcessGroup* group)
2657 {
2658 	Team* team = group->teams;
2659 	while (team != NULL) {
2660 		// the parent team's lock guards the job control entry -- acquire it
2661 		team->LockTeamAndParent(false);
2662 
2663 		if (team->job_control_entry != NULL
2664 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2665 			team->UnlockTeamAndParent();
2666 			return true;
2667 		}
2668 
2669 		team->UnlockTeamAndParent();
2670 
2671 		team = team->group_next;
2672 	}
2673 
2674 	return false;
2675 }
2676 
2677 
2678 /*!	Iterates through all process groups queued in team_remove_team() and signals
2679 	those that are orphaned and have stopped processes.
2680 	The caller must not hold any team or process group locks.
2681 */
2682 static void
2683 orphaned_process_group_check()
2684 {
2685 	// process as long as there are groups in the list
2686 	while (true) {
2687 		// remove the head from the list
2688 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2689 
2690 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2691 		if (group == NULL)
2692 			return;
2693 
2694 		group->UnsetOrphanedCheck();
2695 		BReference<ProcessGroup> groupReference(group);
2696 
2697 		orphanedCheckLocker.Unlock();
2698 
2699 		AutoLocker<ProcessGroup> groupLocker(group);
2700 
2701 		// If the group is orphaned and contains stopped processes, we're
2702 		// supposed to send SIGHUP + SIGCONT.
2703 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2704 			Thread* currentThread = thread_get_current_thread();
2705 
2706 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2707 			send_signal_to_process_group_locked(group, signal, 0);
2708 
2709 			signal.SetNumber(SIGCONT);
2710 			send_signal_to_process_group_locked(group, signal, 0);
2711 		}
2712 	}
2713 }
2714 
2715 
2716 static status_t
2717 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2718 	uint32 flags)
2719 {
2720 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2721 		return B_BAD_VALUE;
2722 
2723 	// get the team
2724 	Team* team = Team::GetAndLock(id);
2725 	if (team == NULL)
2726 		return B_BAD_TEAM_ID;
2727 	BReference<Team> teamReference(team, true);
2728 	TeamLocker teamLocker(team, true);
2729 
2730 	if ((flags & B_CHECK_PERMISSION) != 0) {
2731 		uid_t uid = geteuid();
2732 		if (uid != 0 && uid != team->effective_uid)
2733 			return B_NOT_ALLOWED;
2734 	}
2735 
2736 	bigtime_t kernelTime = 0;
2737 	bigtime_t userTime = 0;
2738 
2739 	switch (who) {
2740 		case B_TEAM_USAGE_SELF:
2741 		{
2742 			Thread* thread = team->thread_list;
2743 
2744 			for (; thread != NULL; thread = thread->team_next) {
2745 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2746 				kernelTime += thread->kernel_time;
2747 				userTime += thread->user_time;
2748 			}
2749 
2750 			kernelTime += team->dead_threads_kernel_time;
2751 			userTime += team->dead_threads_user_time;
2752 			break;
2753 		}
2754 
2755 		case B_TEAM_USAGE_CHILDREN:
2756 		{
2757 			Team* child = team->children;
2758 			for (; child != NULL; child = child->siblings_next) {
2759 				TeamLocker childLocker(child);
2760 
2761 				Thread* thread = team->thread_list;
2762 
2763 				for (; thread != NULL; thread = thread->team_next) {
2764 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2765 					kernelTime += thread->kernel_time;
2766 					userTime += thread->user_time;
2767 				}
2768 
2769 				kernelTime += child->dead_threads_kernel_time;
2770 				userTime += child->dead_threads_user_time;
2771 			}
2772 
2773 			kernelTime += team->dead_children.kernel_time;
2774 			userTime += team->dead_children.user_time;
2775 			break;
2776 		}
2777 	}
2778 
2779 	info->kernel_time = kernelTime;
2780 	info->user_time = userTime;
2781 
2782 	return B_OK;
2783 }
2784 
2785 
2786 //	#pragma mark - Private kernel API
2787 
2788 
2789 status_t
2790 team_init(kernel_args* args)
2791 {
2792 	// create the team hash table
2793 	new(&sTeamHash) TeamTable;
2794 	if (sTeamHash.Init(64) != B_OK)
2795 		panic("Failed to init team hash table!");
2796 
2797 	new(&sGroupHash) ProcessGroupHashTable;
2798 	if (sGroupHash.Init() != B_OK)
2799 		panic("Failed to init process group hash table!");
2800 
2801 	// create initial session and process groups
2802 
2803 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2804 	if (session == NULL)
2805 		panic("Could not create initial session.\n");
2806 	BReference<ProcessSession> sessionReference(session, true);
2807 
2808 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2809 	if (group == NULL)
2810 		panic("Could not create initial process group.\n");
2811 	BReference<ProcessGroup> groupReference(group, true);
2812 
2813 	group->Publish(session);
2814 
2815 	// create the kernel team
2816 	sKernelTeam = Team::Create(1, "kernel_team", true);
2817 	if (sKernelTeam == NULL)
2818 		panic("could not create kernel team!\n");
2819 	sKernelTeam->SetArgs(sKernelTeam->Name());
2820 	sKernelTeam->state = TEAM_STATE_NORMAL;
2821 
2822 	sKernelTeam->saved_set_uid = 0;
2823 	sKernelTeam->real_uid = 0;
2824 	sKernelTeam->effective_uid = 0;
2825 	sKernelTeam->saved_set_gid = 0;
2826 	sKernelTeam->real_gid = 0;
2827 	sKernelTeam->effective_gid = 0;
2828 	sKernelTeam->supplementary_groups = NULL;
2829 	sKernelTeam->supplementary_group_count = 0;
2830 
2831 	insert_team_into_group(group, sKernelTeam);
2832 
2833 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2834 	if (sKernelTeam->io_context == NULL)
2835 		panic("could not create io_context for kernel team!\n");
2836 
2837 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2838 		dprintf("Failed to resize FD table for kernel team!\n");
2839 
2840 	// stick it in the team hash
2841 	sTeamHash.Insert(sKernelTeam);
2842 
2843 	// check safe mode settings
2844 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2845 		false);
2846 
2847 	add_debugger_command_etc("team", &dump_team_info,
2848 		"Dump info about a particular team",
2849 		"[ <id> | <address> | <name> ]\n"
2850 		"Prints information about the specified team. If no argument is given\n"
2851 		"the current team is selected.\n"
2852 		"  <id>       - The ID of the team.\n"
2853 		"  <address>  - The address of the team structure.\n"
2854 		"  <name>     - The team's name.\n", 0);
2855 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2856 		"\n"
2857 		"Prints a list of all existing teams.\n", 0);
2858 
2859 	new(&sNotificationService) TeamNotificationService();
2860 
2861 	sNotificationService.Register();
2862 
2863 	return B_OK;
2864 }
2865 
2866 
2867 int32
2868 team_max_teams(void)
2869 {
2870 	return sMaxTeams;
2871 }
2872 
2873 
2874 int32
2875 team_used_teams(void)
2876 {
2877 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2878 	return sUsedTeams;
2879 }
2880 
2881 
2882 /*! Returns a death entry of a child team specified by ID (if any).
2883 	The caller must hold the team's lock.
2884 
2885 	\param team The team whose dead children list to check.
2886 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2887 	\param _deleteEntry Return variable, indicating whether the caller needs to
2888 		delete the returned entry.
2889 	\return The death entry of the matching team, or \c NULL, if no death entry
2890 		for the team was found.
2891 */
2892 job_control_entry*
2893 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2894 {
2895 	if (child <= 0)
2896 		return NULL;
2897 
2898 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2899 		child);
2900 	if (entry) {
2901 		// remove the entry only, if the caller is the parent of the found team
2902 		if (team_get_current_team_id() == entry->thread) {
2903 			team->dead_children.entries.Remove(entry);
2904 			team->dead_children.count--;
2905 			*_deleteEntry = true;
2906 		} else {
2907 			*_deleteEntry = false;
2908 		}
2909 	}
2910 
2911 	return entry;
2912 }
2913 
2914 
2915 /*! Quick check to see if we have a valid team ID. */
2916 bool
2917 team_is_valid(team_id id)
2918 {
2919 	if (id <= 0)
2920 		return false;
2921 
2922 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2923 	return team_get_team_struct_locked(id) != NULL;
2924 }
2925 
2926 
2927 Team*
2928 team_get_team_struct_locked(team_id id)
2929 {
2930 	return sTeamHash.Lookup(id);
2931 }
2932 
2933 
2934 void
2935 team_set_controlling_tty(int32 ttyIndex)
2936 {
2937 	// lock the team, so its session won't change while we're playing with it
2938 	Team* team = thread_get_current_thread()->team;
2939 	TeamLocker teamLocker(team);
2940 
2941 	// get and lock the session
2942 	ProcessSession* session = team->group->Session();
2943 	AutoLocker<ProcessSession> sessionLocker(session);
2944 
2945 	// set the session's fields
2946 	session->controlling_tty = ttyIndex;
2947 	session->foreground_group = -1;
2948 }
2949 
2950 
2951 int32
2952 team_get_controlling_tty()
2953 {
2954 	// lock the team, so its session won't change while we're playing with it
2955 	Team* team = thread_get_current_thread()->team;
2956 	TeamLocker teamLocker(team);
2957 
2958 	// get and lock the session
2959 	ProcessSession* session = team->group->Session();
2960 	AutoLocker<ProcessSession> sessionLocker(session);
2961 
2962 	// get the session's field
2963 	return session->controlling_tty;
2964 }
2965 
2966 
2967 status_t
2968 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2969 {
2970 	// lock the team, so its session won't change while we're playing with it
2971 	Thread* thread = thread_get_current_thread();
2972 	Team* team = thread->team;
2973 	TeamLocker teamLocker(team);
2974 
2975 	// get and lock the session
2976 	ProcessSession* session = team->group->Session();
2977 	AutoLocker<ProcessSession> sessionLocker(session);
2978 
2979 	// check given TTY -- must be the controlling tty of the calling process
2980 	if (session->controlling_tty != ttyIndex)
2981 		return ENOTTY;
2982 
2983 	// check given process group -- must belong to our session
2984 	{
2985 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2986 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2987 		if (group == NULL || group->Session() != session)
2988 			return B_BAD_VALUE;
2989 	}
2990 
2991 	// If we are a background group, we can do that unharmed only when we
2992 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2993 	if (session->foreground_group != -1
2994 		&& session->foreground_group != team->group_id
2995 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
2996 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
2997 		InterruptsSpinLocker signalLocker(team->signal_lock);
2998 
2999 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3000 			pid_t groupID = team->group_id;
3001 
3002 			signalLocker.Unlock();
3003 			sessionLocker.Unlock();
3004 			teamLocker.Unlock();
3005 
3006 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3007 			send_signal_to_process_group(groupID, signal, 0);
3008 			return B_INTERRUPTED;
3009 		}
3010 	}
3011 
3012 	session->foreground_group = processGroupID;
3013 
3014 	return B_OK;
3015 }
3016 
3017 
3018 uid_t
3019 team_geteuid(team_id id)
3020 {
3021 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3022 	Team* team = team_get_team_struct_locked(id);
3023 	if (team == NULL)
3024 		return (uid_t)-1;
3025 	return team->effective_uid;
3026 }
3027 
3028 
3029 /*!	Removes the specified team from the global team hash, from its process
3030 	group, and from its parent.
3031 	It also moves all of its children to the kernel team.
3032 
3033 	The caller must hold the following locks:
3034 	- \a team's process group's lock,
3035 	- the kernel team's lock,
3036 	- \a team's parent team's lock (might be the kernel team), and
3037 	- \a team's lock.
3038 */
3039 void
3040 team_remove_team(Team* team, pid_t& _signalGroup)
3041 {
3042 	Team* parent = team->parent;
3043 
3044 	// remember how long this team lasted
3045 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3046 		+ team->dead_children.kernel_time;
3047 	parent->dead_children.user_time += team->dead_threads_user_time
3048 		+ team->dead_children.user_time;
3049 
3050 	// remove the team from the hash table
3051 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3052 	sTeamHash.Remove(team);
3053 	sUsedTeams--;
3054 	teamsLocker.Unlock();
3055 
3056 	// The team can no longer be accessed by ID. Navigation to it is still
3057 	// possible from its process group and its parent and children, but that
3058 	// will be rectified shortly.
3059 	team->state = TEAM_STATE_DEATH;
3060 
3061 	// If we're a controlling process (i.e. a session leader with controlling
3062 	// terminal), there's a bit of signalling we have to do. We can't do any of
3063 	// the signaling here due to the bunch of locks we're holding, but we need
3064 	// to determine, whom to signal.
3065 	_signalGroup = -1;
3066 	bool isSessionLeader = false;
3067 	if (team->session_id == team->id
3068 		&& team->group->Session()->controlling_tty >= 0) {
3069 		isSessionLeader = true;
3070 
3071 		ProcessSession* session = team->group->Session();
3072 
3073 		AutoLocker<ProcessSession> sessionLocker(session);
3074 
3075 		session->controlling_tty = -1;
3076 		_signalGroup = session->foreground_group;
3077 	}
3078 
3079 	// remove us from our process group
3080 	remove_team_from_group(team);
3081 
3082 	// move the team's children to the kernel team
3083 	while (Team* child = team->children) {
3084 		// remove the child from the current team and add it to the kernel team
3085 		TeamLocker childLocker(child);
3086 
3087 		remove_team_from_parent(team, child);
3088 		insert_team_into_parent(sKernelTeam, child);
3089 
3090 		// move job control entries too
3091 		sKernelTeam->stopped_children.entries.MoveFrom(
3092 			&team->stopped_children.entries);
3093 		sKernelTeam->continued_children.entries.MoveFrom(
3094 			&team->continued_children.entries);
3095 
3096 		// If the team was a session leader with controlling terminal,
3097 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3098 		// groups with stopped processes. Due to locking complications we can't
3099 		// do that here, so we only check whether we were a reason for the
3100 		// child's process group not being an orphan and, if so, schedule a
3101 		// later check (cf. orphaned_process_group_check()).
3102 		if (isSessionLeader) {
3103 			ProcessGroup* childGroup = child->group;
3104 			if (childGroup->Session()->id == team->session_id
3105 				&& childGroup->id != team->group_id) {
3106 				childGroup->ScheduleOrphanedCheck();
3107 			}
3108 		}
3109 
3110 		// Note, we don't move the dead children entries. Those will be deleted
3111 		// when the team structure is deleted.
3112 	}
3113 
3114 	// remove us from our parent
3115 	remove_team_from_parent(parent, team);
3116 }
3117 
3118 
3119 /*!	Kills all threads but the main thread of the team and shuts down user
3120 	debugging for it.
3121 	To be called on exit of the team's main thread. No locks must be held.
3122 
3123 	\param team The team in question.
3124 	\return The port of the debugger for the team, -1 if none. To be passed to
3125 		team_delete_team().
3126 */
3127 port_id
3128 team_shutdown_team(Team* team)
3129 {
3130 	ASSERT(thread_get_current_thread() == team->main_thread);
3131 
3132 	TeamLocker teamLocker(team);
3133 
3134 	// Make sure debugging changes won't happen anymore.
3135 	port_id debuggerPort = -1;
3136 	while (true) {
3137 		// If a debugger change is in progress for the team, we'll have to
3138 		// wait until it is done.
3139 		ConditionVariableEntry waitForDebuggerEntry;
3140 		bool waitForDebugger = false;
3141 
3142 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3143 
3144 		if (team->debug_info.debugger_changed_condition != NULL) {
3145 			team->debug_info.debugger_changed_condition->Add(
3146 				&waitForDebuggerEntry);
3147 			waitForDebugger = true;
3148 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3149 			// The team is being debugged. That will stop with the termination
3150 			// of the nub thread. Since we set the team state to death, no one
3151 			// can install a debugger anymore. We fetch the debugger's port to
3152 			// send it a message at the bitter end.
3153 			debuggerPort = team->debug_info.debugger_port;
3154 		}
3155 
3156 		debugInfoLocker.Unlock();
3157 
3158 		if (!waitForDebugger)
3159 			break;
3160 
3161 		// wait for the debugger change to be finished
3162 		teamLocker.Unlock();
3163 
3164 		waitForDebuggerEntry.Wait();
3165 
3166 		teamLocker.Lock();
3167 	}
3168 
3169 	// Mark the team as shutting down. That will prevent new threads from being
3170 	// created and debugger changes from taking place.
3171 	team->state = TEAM_STATE_SHUTDOWN;
3172 
3173 	// delete all timers
3174 	team->DeleteUserTimers(false);
3175 
3176 	// deactivate CPU time user timers for the team
3177 	InterruptsSpinLocker timeLocker(team->time_lock);
3178 
3179 	if (team->HasActiveCPUTimeUserTimers())
3180 		team->DeactivateCPUTimeUserTimers();
3181 
3182 	timeLocker.Unlock();
3183 
3184 	// kill all threads but the main thread
3185 	team_death_entry deathEntry;
3186 	deathEntry.condition.Init(team, "team death");
3187 
3188 	while (true) {
3189 		team->death_entry = &deathEntry;
3190 		deathEntry.remaining_threads = 0;
3191 
3192 		Thread* thread = team->thread_list;
3193 		while (thread != NULL) {
3194 			if (thread != team->main_thread) {
3195 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3196 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3197 				deathEntry.remaining_threads++;
3198 			}
3199 
3200 			thread = thread->team_next;
3201 		}
3202 
3203 		if (deathEntry.remaining_threads == 0)
3204 			break;
3205 
3206 		// there are threads to wait for
3207 		ConditionVariableEntry entry;
3208 		deathEntry.condition.Add(&entry);
3209 
3210 		teamLocker.Unlock();
3211 
3212 		entry.Wait();
3213 
3214 		teamLocker.Lock();
3215 	}
3216 
3217 	team->death_entry = NULL;
3218 
3219 	return debuggerPort;
3220 }
3221 
3222 
3223 /*!	Called on team exit to notify threads waiting on the team and free most
3224 	resources associated with it.
3225 	The caller shouldn't hold any locks.
3226 */
3227 void
3228 team_delete_team(Team* team, port_id debuggerPort)
3229 {
3230 	// Not quite in our job description, but work that has been left by
3231 	// team_remove_team() and that can be done now that we're not holding any
3232 	// locks.
3233 	orphaned_process_group_check();
3234 
3235 	team_id teamID = team->id;
3236 
3237 	ASSERT(team->num_threads == 0);
3238 
3239 	// If someone is waiting for this team to be loaded, but it dies
3240 	// unexpectedly before being done, we need to notify the waiting
3241 	// thread now.
3242 
3243 	TeamLocker teamLocker(team);
3244 
3245 	if (team->loading_info) {
3246 		// there's indeed someone waiting
3247 		struct team_loading_info* loadingInfo = team->loading_info;
3248 		team->loading_info = NULL;
3249 
3250 		loadingInfo->result = B_ERROR;
3251 
3252 		// wake up the waiting thread
3253 		loadingInfo->condition.NotifyAll();
3254 	}
3255 
3256 	// notify team watchers
3257 
3258 	{
3259 		// we're not reachable from anyone anymore at this point, so we
3260 		// can safely access the list without any locking
3261 		struct team_watcher* watcher;
3262 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3263 				&team->watcher_list)) != NULL) {
3264 			watcher->hook(teamID, watcher->data);
3265 			free(watcher);
3266 		}
3267 	}
3268 
3269 	teamLocker.Unlock();
3270 
3271 	sNotificationService.Notify(TEAM_REMOVED, team);
3272 
3273 	// free team resources
3274 
3275 	delete_realtime_sem_context(team->realtime_sem_context);
3276 	xsi_sem_undo(team);
3277 	remove_images(team);
3278 	team->address_space->RemoveAndPut();
3279 
3280 	team->ReleaseReference();
3281 
3282 	// notify the debugger, that the team is gone
3283 	user_debug_team_deleted(teamID, debuggerPort);
3284 }
3285 
3286 
3287 Team*
3288 team_get_kernel_team(void)
3289 {
3290 	return sKernelTeam;
3291 }
3292 
3293 
3294 team_id
3295 team_get_kernel_team_id(void)
3296 {
3297 	if (!sKernelTeam)
3298 		return 0;
3299 
3300 	return sKernelTeam->id;
3301 }
3302 
3303 
3304 team_id
3305 team_get_current_team_id(void)
3306 {
3307 	return thread_get_current_thread()->team->id;
3308 }
3309 
3310 
3311 status_t
3312 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3313 {
3314 	if (id == sKernelTeam->id) {
3315 		// we're the kernel team, so we don't have to go through all
3316 		// the hassle (locking and hash lookup)
3317 		*_addressSpace = VMAddressSpace::GetKernel();
3318 		return B_OK;
3319 	}
3320 
3321 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3322 
3323 	Team* team = team_get_team_struct_locked(id);
3324 	if (team == NULL)
3325 		return B_BAD_VALUE;
3326 
3327 	team->address_space->Get();
3328 	*_addressSpace = team->address_space;
3329 	return B_OK;
3330 }
3331 
3332 
3333 /*!	Sets the team's job control state.
3334 	The caller must hold the parent team's lock. Interrupts are allowed to be
3335 	enabled or disabled.
3336 	\a team The team whose job control state shall be set.
3337 	\a newState The new state to be set.
3338 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3339 		the caller is responsible for filling in the following fields of the
3340 		entry before releasing the parent team's lock, unless the new state is
3341 		\c JOB_CONTROL_STATE_NONE:
3342 		- \c signal: The number of the signal causing the state change.
3343 		- \c signaling_user: The real UID of the user sending the signal.
3344 */
3345 void
3346 team_set_job_control_state(Team* team, job_control_state newState,
3347 	Signal* signal)
3348 {
3349 	if (team == NULL || team->job_control_entry == NULL)
3350 		return;
3351 
3352 	// don't touch anything, if the state stays the same or the team is already
3353 	// dead
3354 	job_control_entry* entry = team->job_control_entry;
3355 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3356 		return;
3357 
3358 	T(SetJobControlState(team->id, newState, signal));
3359 
3360 	// remove from the old list
3361 	switch (entry->state) {
3362 		case JOB_CONTROL_STATE_NONE:
3363 			// entry is in no list ATM
3364 			break;
3365 		case JOB_CONTROL_STATE_DEAD:
3366 			// can't get here
3367 			break;
3368 		case JOB_CONTROL_STATE_STOPPED:
3369 			team->parent->stopped_children.entries.Remove(entry);
3370 			break;
3371 		case JOB_CONTROL_STATE_CONTINUED:
3372 			team->parent->continued_children.entries.Remove(entry);
3373 			break;
3374 	}
3375 
3376 	entry->state = newState;
3377 
3378 	if (signal != NULL) {
3379 		entry->signal = signal->Number();
3380 		entry->signaling_user = signal->SendingUser();
3381 	}
3382 
3383 	// add to new list
3384 	team_job_control_children* childList = NULL;
3385 	switch (entry->state) {
3386 		case JOB_CONTROL_STATE_NONE:
3387 			// entry doesn't get into any list
3388 			break;
3389 		case JOB_CONTROL_STATE_DEAD:
3390 			childList = &team->parent->dead_children;
3391 			team->parent->dead_children.count++;
3392 			break;
3393 		case JOB_CONTROL_STATE_STOPPED:
3394 			childList = &team->parent->stopped_children;
3395 			break;
3396 		case JOB_CONTROL_STATE_CONTINUED:
3397 			childList = &team->parent->continued_children;
3398 			break;
3399 	}
3400 
3401 	if (childList != NULL) {
3402 		childList->entries.Add(entry);
3403 		team->parent->dead_children.condition_variable.NotifyAll();
3404 	}
3405 }
3406 
3407 
3408 /*!	Inits the given team's exit information, if not yet initialized, to some
3409 	generic "killed" status.
3410 	The caller must not hold the team's lock. Interrupts must be enabled.
3411 
3412 	\param team The team whose exit info shall be initialized.
3413 */
3414 void
3415 team_init_exit_info_on_error(Team* team)
3416 {
3417 	TeamLocker teamLocker(team);
3418 
3419 	if (!team->exit.initialized) {
3420 		team->exit.reason = CLD_KILLED;
3421 		team->exit.signal = SIGKILL;
3422 		team->exit.signaling_user = geteuid();
3423 		team->exit.status = 0;
3424 		team->exit.initialized = true;
3425 	}
3426 }
3427 
3428 
3429 /*! Adds a hook to the team that is called as soon as this team goes away.
3430 	This call might get public in the future.
3431 */
3432 status_t
3433 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3434 {
3435 	if (hook == NULL || teamID < B_OK)
3436 		return B_BAD_VALUE;
3437 
3438 	// create the watcher object
3439 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3440 	if (watcher == NULL)
3441 		return B_NO_MEMORY;
3442 
3443 	watcher->hook = hook;
3444 	watcher->data = data;
3445 
3446 	// add watcher, if the team isn't already dying
3447 	// get the team
3448 	Team* team = Team::GetAndLock(teamID);
3449 	if (team == NULL) {
3450 		free(watcher);
3451 		return B_BAD_TEAM_ID;
3452 	}
3453 
3454 	list_add_item(&team->watcher_list, watcher);
3455 
3456 	team->UnlockAndReleaseReference();
3457 
3458 	return B_OK;
3459 }
3460 
3461 
3462 status_t
3463 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3464 {
3465 	if (hook == NULL || teamID < 0)
3466 		return B_BAD_VALUE;
3467 
3468 	// get team and remove watcher (if present)
3469 	Team* team = Team::GetAndLock(teamID);
3470 	if (team == NULL)
3471 		return B_BAD_TEAM_ID;
3472 
3473 	// search for watcher
3474 	team_watcher* watcher = NULL;
3475 	while ((watcher = (team_watcher*)list_get_next_item(
3476 			&team->watcher_list, watcher)) != NULL) {
3477 		if (watcher->hook == hook && watcher->data == data) {
3478 			// got it!
3479 			list_remove_item(&team->watcher_list, watcher);
3480 			break;
3481 		}
3482 	}
3483 
3484 	team->UnlockAndReleaseReference();
3485 
3486 	if (watcher == NULL)
3487 		return B_ENTRY_NOT_FOUND;
3488 
3489 	free(watcher);
3490 	return B_OK;
3491 }
3492 
3493 
3494 /*!	Allocates a user_thread structure from the team.
3495 	The team lock must be held, unless the function is called for the team's
3496 	main thread. Interrupts must be enabled.
3497 */
3498 struct user_thread*
3499 team_allocate_user_thread(Team* team)
3500 {
3501 	if (team->user_data == 0)
3502 		return NULL;
3503 
3504 	// take an entry from the free list, if any
3505 	if (struct free_user_thread* entry = team->free_user_threads) {
3506 		user_thread* thread = entry->thread;
3507 		team->free_user_threads = entry->next;
3508 		free(entry);
3509 		return thread;
3510 	}
3511 
3512 	while (true) {
3513 		// enough space left?
3514 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3515 		if (team->user_data_size - team->used_user_data < needed) {
3516 			// try to resize the area
3517 			if (resize_area(team->user_data_area,
3518 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3519 				return NULL;
3520 			}
3521 
3522 			// resized user area successfully -- try to allocate the user_thread
3523 			// again
3524 			team->user_data_size += B_PAGE_SIZE;
3525 			continue;
3526 		}
3527 
3528 		// allocate the user_thread
3529 		user_thread* thread
3530 			= (user_thread*)(team->user_data + team->used_user_data);
3531 		team->used_user_data += needed;
3532 
3533 		return thread;
3534 	}
3535 }
3536 
3537 
3538 /*!	Frees the given user_thread structure.
3539 	The team's lock must not be held. Interrupts must be enabled.
3540 	\param team The team the user thread was allocated from.
3541 	\param userThread The user thread to free.
3542 */
3543 void
3544 team_free_user_thread(Team* team, struct user_thread* userThread)
3545 {
3546 	if (userThread == NULL)
3547 		return;
3548 
3549 	// create a free list entry
3550 	free_user_thread* entry
3551 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3552 	if (entry == NULL) {
3553 		// we have to leak the user thread :-/
3554 		return;
3555 	}
3556 
3557 	// add to free list
3558 	TeamLocker teamLocker(team);
3559 
3560 	entry->thread = userThread;
3561 	entry->next = team->free_user_threads;
3562 	team->free_user_threads = entry;
3563 }
3564 
3565 
3566 //	#pragma mark - Associated data interface
3567 
3568 
3569 AssociatedData::AssociatedData()
3570 	:
3571 	fOwner(NULL)
3572 {
3573 }
3574 
3575 
3576 AssociatedData::~AssociatedData()
3577 {
3578 }
3579 
3580 
3581 void
3582 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3583 {
3584 }
3585 
3586 
3587 AssociatedDataOwner::AssociatedDataOwner()
3588 {
3589 	mutex_init(&fLock, "associated data owner");
3590 }
3591 
3592 
3593 AssociatedDataOwner::~AssociatedDataOwner()
3594 {
3595 	mutex_destroy(&fLock);
3596 }
3597 
3598 
3599 bool
3600 AssociatedDataOwner::AddData(AssociatedData* data)
3601 {
3602 	MutexLocker locker(fLock);
3603 
3604 	if (data->Owner() != NULL)
3605 		return false;
3606 
3607 	data->AcquireReference();
3608 	fList.Add(data);
3609 	data->SetOwner(this);
3610 
3611 	return true;
3612 }
3613 
3614 
3615 bool
3616 AssociatedDataOwner::RemoveData(AssociatedData* data)
3617 {
3618 	MutexLocker locker(fLock);
3619 
3620 	if (data->Owner() != this)
3621 		return false;
3622 
3623 	data->SetOwner(NULL);
3624 	fList.Remove(data);
3625 
3626 	locker.Unlock();
3627 
3628 	data->ReleaseReference();
3629 
3630 	return true;
3631 }
3632 
3633 
3634 void
3635 AssociatedDataOwner::PrepareForDeletion()
3636 {
3637 	MutexLocker locker(fLock);
3638 
3639 	// move all data to a temporary list and unset the owner
3640 	DataList list;
3641 	list.MoveFrom(&fList);
3642 
3643 	for (DataList::Iterator it = list.GetIterator();
3644 		AssociatedData* data = it.Next();) {
3645 		data->SetOwner(NULL);
3646 	}
3647 
3648 	locker.Unlock();
3649 
3650 	// call the notification hooks and release our references
3651 	while (AssociatedData* data = list.RemoveHead()) {
3652 		data->OwnerDeleted(this);
3653 		data->ReleaseReference();
3654 	}
3655 }
3656 
3657 
3658 /*!	Associates data with the current team.
3659 	When the team is deleted, the data object is notified.
3660 	The team acquires a reference to the object.
3661 
3662 	\param data The data object.
3663 	\return \c true on success, \c false otherwise. Fails only when the supplied
3664 		data object is already associated with another owner.
3665 */
3666 bool
3667 team_associate_data(AssociatedData* data)
3668 {
3669 	return thread_get_current_thread()->team->AddData(data);
3670 }
3671 
3672 
3673 /*!	Dissociates data from the current team.
3674 	Balances an earlier call to team_associate_data().
3675 
3676 	\param data The data object.
3677 	\return \c true on success, \c false otherwise. Fails only when the data
3678 		object is not associated with the current team.
3679 */
3680 bool
3681 team_dissociate_data(AssociatedData* data)
3682 {
3683 	return thread_get_current_thread()->team->RemoveData(data);
3684 }
3685 
3686 
3687 //	#pragma mark - Public kernel API
3688 
3689 
3690 thread_id
3691 load_image(int32 argCount, const char** args, const char** env)
3692 {
3693 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3694 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3695 }
3696 
3697 
3698 thread_id
3699 load_image_etc(int32 argCount, const char* const* args,
3700 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3701 {
3702 	// we need to flatten the args and environment
3703 
3704 	if (args == NULL)
3705 		return B_BAD_VALUE;
3706 
3707 	// determine total needed size
3708 	int32 argSize = 0;
3709 	for (int32 i = 0; i < argCount; i++)
3710 		argSize += strlen(args[i]) + 1;
3711 
3712 	int32 envCount = 0;
3713 	int32 envSize = 0;
3714 	while (env != NULL && env[envCount] != NULL)
3715 		envSize += strlen(env[envCount++]) + 1;
3716 
3717 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3718 	if (size > MAX_PROCESS_ARGS_SIZE)
3719 		return B_TOO_MANY_ARGS;
3720 
3721 	// allocate space
3722 	char** flatArgs = (char**)malloc(size);
3723 	if (flatArgs == NULL)
3724 		return B_NO_MEMORY;
3725 
3726 	char** slot = flatArgs;
3727 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3728 
3729 	// copy arguments and environment
3730 	for (int32 i = 0; i < argCount; i++) {
3731 		int32 argSize = strlen(args[i]) + 1;
3732 		memcpy(stringSpace, args[i], argSize);
3733 		*slot++ = stringSpace;
3734 		stringSpace += argSize;
3735 	}
3736 
3737 	*slot++ = NULL;
3738 
3739 	for (int32 i = 0; i < envCount; i++) {
3740 		int32 envSize = strlen(env[i]) + 1;
3741 		memcpy(stringSpace, env[i], envSize);
3742 		*slot++ = stringSpace;
3743 		stringSpace += envSize;
3744 	}
3745 
3746 	*slot++ = NULL;
3747 
3748 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3749 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3750 
3751 	free(flatArgs);
3752 		// load_image_internal() unset our variable if it took over ownership
3753 
3754 	return thread;
3755 }
3756 
3757 
3758 status_t
3759 wait_for_team(team_id id, status_t* _returnCode)
3760 {
3761 	// check whether the team exists
3762 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3763 
3764 	Team* team = team_get_team_struct_locked(id);
3765 	if (team == NULL)
3766 		return B_BAD_TEAM_ID;
3767 
3768 	id = team->id;
3769 
3770 	teamsLocker.Unlock();
3771 
3772 	// wait for the main thread (it has the same ID as the team)
3773 	return wait_for_thread(id, _returnCode);
3774 }
3775 
3776 
3777 status_t
3778 kill_team(team_id id)
3779 {
3780 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3781 
3782 	Team* team = team_get_team_struct_locked(id);
3783 	if (team == NULL)
3784 		return B_BAD_TEAM_ID;
3785 
3786 	id = team->id;
3787 
3788 	teamsLocker.Unlock();
3789 
3790 	if (team == sKernelTeam)
3791 		return B_NOT_ALLOWED;
3792 
3793 	// Just kill the team's main thread (it has same ID as the team). The
3794 	// cleanup code there will take care of the team.
3795 	return kill_thread(id);
3796 }
3797 
3798 
3799 status_t
3800 _get_team_info(team_id id, team_info* info, size_t size)
3801 {
3802 	// get the team
3803 	Team* team = Team::Get(id);
3804 	if (team == NULL)
3805 		return B_BAD_TEAM_ID;
3806 	BReference<Team> teamReference(team, true);
3807 
3808 	// fill in the info
3809 	return fill_team_info(team, info, size);
3810 }
3811 
3812 
3813 status_t
3814 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3815 {
3816 	int32 slot = *cookie;
3817 	if (slot < 1)
3818 		slot = 1;
3819 
3820 	InterruptsReadSpinLocker locker(sTeamHashLock);
3821 
3822 	team_id lastTeamID = peek_next_thread_id();
3823 		// TODO: This is broken, since the id can wrap around!
3824 
3825 	// get next valid team
3826 	Team* team = NULL;
3827 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3828 		slot++;
3829 
3830 	if (team == NULL)
3831 		return B_BAD_TEAM_ID;
3832 
3833 	// get a reference to the team and unlock
3834 	BReference<Team> teamReference(team);
3835 	locker.Unlock();
3836 
3837 	// fill in the info
3838 	*cookie = ++slot;
3839 	return fill_team_info(team, info, size);
3840 }
3841 
3842 
3843 status_t
3844 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3845 {
3846 	if (size != sizeof(team_usage_info))
3847 		return B_BAD_VALUE;
3848 
3849 	return common_get_team_usage_info(id, who, info, 0);
3850 }
3851 
3852 
3853 pid_t
3854 getpid(void)
3855 {
3856 	return thread_get_current_thread()->team->id;
3857 }
3858 
3859 
3860 pid_t
3861 getppid(void)
3862 {
3863 	Team* team = thread_get_current_thread()->team;
3864 
3865 	TeamLocker teamLocker(team);
3866 
3867 	return team->parent->id;
3868 }
3869 
3870 
3871 pid_t
3872 getpgid(pid_t id)
3873 {
3874 	if (id < 0) {
3875 		errno = EINVAL;
3876 		return -1;
3877 	}
3878 
3879 	if (id == 0) {
3880 		// get process group of the calling process
3881 		Team* team = thread_get_current_thread()->team;
3882 		TeamLocker teamLocker(team);
3883 		return team->group_id;
3884 	}
3885 
3886 	// get the team
3887 	Team* team = Team::GetAndLock(id);
3888 	if (team == NULL) {
3889 		errno = ESRCH;
3890 		return -1;
3891 	}
3892 
3893 	// get the team's process group ID
3894 	pid_t groupID = team->group_id;
3895 
3896 	team->UnlockAndReleaseReference();
3897 
3898 	return groupID;
3899 }
3900 
3901 
3902 pid_t
3903 getsid(pid_t id)
3904 {
3905 	if (id < 0) {
3906 		errno = EINVAL;
3907 		return -1;
3908 	}
3909 
3910 	if (id == 0) {
3911 		// get session of the calling process
3912 		Team* team = thread_get_current_thread()->team;
3913 		TeamLocker teamLocker(team);
3914 		return team->session_id;
3915 	}
3916 
3917 	// get the team
3918 	Team* team = Team::GetAndLock(id);
3919 	if (team == NULL) {
3920 		errno = ESRCH;
3921 		return -1;
3922 	}
3923 
3924 	// get the team's session ID
3925 	pid_t sessionID = team->session_id;
3926 
3927 	team->UnlockAndReleaseReference();
3928 
3929 	return sessionID;
3930 }
3931 
3932 
3933 //	#pragma mark - User syscalls
3934 
3935 
3936 status_t
3937 _user_exec(const char* userPath, const char* const* userFlatArgs,
3938 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3939 {
3940 	// NOTE: Since this function normally doesn't return, don't use automatic
3941 	// variables that need destruction in the function scope.
3942 	char path[B_PATH_NAME_LENGTH];
3943 
3944 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3945 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3946 		return B_BAD_ADDRESS;
3947 
3948 	// copy and relocate the flat arguments
3949 	char** flatArgs;
3950 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3951 		argCount, envCount, flatArgs);
3952 
3953 	if (error == B_OK) {
3954 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3955 			envCount, umask);
3956 			// this one only returns in case of error
3957 	}
3958 
3959 	free(flatArgs);
3960 	return error;
3961 }
3962 
3963 
3964 thread_id
3965 _user_fork(void)
3966 {
3967 	return fork_team();
3968 }
3969 
3970 
3971 pid_t
3972 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
3973 	team_usage_info* usageInfo)
3974 {
3975 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3976 		return B_BAD_ADDRESS;
3977 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
3978 		return B_BAD_ADDRESS;
3979 
3980 	siginfo_t info;
3981 	team_usage_info usage_info;
3982 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
3983 	if (foundChild < 0)
3984 		return syscall_restart_handle_post(foundChild);
3985 
3986 	// copy info back to userland
3987 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3988 		return B_BAD_ADDRESS;
3989 	// copy usage_info back to userland
3990 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
3991 		sizeof(usage_info)) != B_OK) {
3992 		return B_BAD_ADDRESS;
3993 	}
3994 
3995 	return foundChild;
3996 }
3997 
3998 
3999 pid_t
4000 _user_process_info(pid_t process, int32 which)
4001 {
4002 	// we only allow to return the parent of the current process
4003 	if (which == PARENT_ID
4004 		&& process != 0 && process != thread_get_current_thread()->team->id)
4005 		return B_BAD_VALUE;
4006 
4007 	pid_t result;
4008 	switch (which) {
4009 		case SESSION_ID:
4010 			result = getsid(process);
4011 			break;
4012 		case GROUP_ID:
4013 			result = getpgid(process);
4014 			break;
4015 		case PARENT_ID:
4016 			result = getppid();
4017 			break;
4018 		default:
4019 			return B_BAD_VALUE;
4020 	}
4021 
4022 	return result >= 0 ? result : errno;
4023 }
4024 
4025 
4026 pid_t
4027 _user_setpgid(pid_t processID, pid_t groupID)
4028 {
4029 	// setpgid() can be called either by the parent of the target process or
4030 	// by the process itself to do one of two things:
4031 	// * Create a new process group with the target process' ID and the target
4032 	//   process as group leader.
4033 	// * Set the target process' process group to an already existing one in the
4034 	//   same session.
4035 
4036 	if (groupID < 0)
4037 		return B_BAD_VALUE;
4038 
4039 	Team* currentTeam = thread_get_current_thread()->team;
4040 	if (processID == 0)
4041 		processID = currentTeam->id;
4042 
4043 	// if the group ID is not specified, use the target process' ID
4044 	if (groupID == 0)
4045 		groupID = processID;
4046 
4047 	// We loop when running into the following race condition: We create a new
4048 	// process group, because there isn't one with that ID yet, but later when
4049 	// trying to publish it, we find that someone else created and published
4050 	// a group with that ID in the meantime. In that case we just restart the
4051 	// whole action.
4052 	while (true) {
4053 		// Look up the process group by ID. If it doesn't exist yet and we are
4054 		// allowed to create a new one, do that.
4055 		ProcessGroup* group = ProcessGroup::Get(groupID);
4056 		bool newGroup = false;
4057 		if (group == NULL) {
4058 			if (groupID != processID)
4059 				return B_NOT_ALLOWED;
4060 
4061 			group = new(std::nothrow) ProcessGroup(groupID);
4062 			if (group == NULL)
4063 				return B_NO_MEMORY;
4064 
4065 			newGroup = true;
4066 		}
4067 		BReference<ProcessGroup> groupReference(group, true);
4068 
4069 		// get the target team
4070 		Team* team = Team::Get(processID);
4071 		if (team == NULL)
4072 			return ESRCH;
4073 		BReference<Team> teamReference(team, true);
4074 
4075 		// lock the new process group and the team's current process group
4076 		while (true) {
4077 			// lock the team's current process group
4078 			team->LockProcessGroup();
4079 
4080 			ProcessGroup* oldGroup = team->group;
4081 			if (oldGroup == group) {
4082 				// it's the same as the target group, so just bail out
4083 				oldGroup->Unlock();
4084 				return group->id;
4085 			}
4086 
4087 			oldGroup->AcquireReference();
4088 
4089 			// lock the target process group, if locking order allows it
4090 			if (newGroup || group->id > oldGroup->id) {
4091 				group->Lock();
4092 				break;
4093 			}
4094 
4095 			// try to lock
4096 			if (group->TryLock())
4097 				break;
4098 
4099 			// no dice -- unlock the team's current process group and relock in
4100 			// the correct order
4101 			oldGroup->Unlock();
4102 
4103 			group->Lock();
4104 			oldGroup->Lock();
4105 
4106 			// check whether things are still the same
4107 			TeamLocker teamLocker(team);
4108 			if (team->group == oldGroup)
4109 				break;
4110 
4111 			// something changed -- unlock everything and retry
4112 			teamLocker.Unlock();
4113 			oldGroup->Unlock();
4114 			group->Unlock();
4115 			oldGroup->ReleaseReference();
4116 		}
4117 
4118 		// we now have references and locks of both new and old process group
4119 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4120 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4121 		AutoLocker<ProcessGroup> groupLocker(group, true);
4122 
4123 		// also lock the target team and its parent
4124 		team->LockTeamAndParent(false);
4125 		TeamLocker parentLocker(team->parent, true);
4126 		TeamLocker teamLocker(team, true);
4127 
4128 		// perform the checks
4129 		if (team == currentTeam) {
4130 			// we set our own group
4131 
4132 			// we must not change our process group ID if we're a session leader
4133 			if (is_session_leader(currentTeam))
4134 				return B_NOT_ALLOWED;
4135 		} else {
4136 			// Calling team != target team. The target team must be a child of
4137 			// the calling team and in the same session. (If that's the case it
4138 			// isn't a session leader either.)
4139 			if (team->parent != currentTeam
4140 				|| team->session_id != currentTeam->session_id) {
4141 				return B_NOT_ALLOWED;
4142 			}
4143 
4144 			// The call is also supposed to fail on a child, when the child has
4145 			// already executed exec*() [EACCES].
4146 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4147 				return EACCES;
4148 		}
4149 
4150 		// If we created a new process group, publish it now.
4151 		if (newGroup) {
4152 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4153 			if (sGroupHash.Lookup(groupID)) {
4154 				// A group with the group ID appeared since we first checked.
4155 				// Back to square one.
4156 				continue;
4157 			}
4158 
4159 			group->PublishLocked(team->group->Session());
4160 		} else if (group->Session()->id != team->session_id) {
4161 			// The existing target process group belongs to a different session.
4162 			// That's not allowed.
4163 			return B_NOT_ALLOWED;
4164 		}
4165 
4166 		// Everything is ready -- set the group.
4167 		remove_team_from_group(team);
4168 		insert_team_into_group(group, team);
4169 
4170 		// Changing the process group might have changed the situation for a
4171 		// parent waiting in wait_for_child(). Hence we notify it.
4172 		team->parent->dead_children.condition_variable.NotifyAll();
4173 
4174 		return group->id;
4175 	}
4176 }
4177 
4178 
4179 pid_t
4180 _user_setsid(void)
4181 {
4182 	Team* team = thread_get_current_thread()->team;
4183 
4184 	// create a new process group and session
4185 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4186 	if (group == NULL)
4187 		return B_NO_MEMORY;
4188 	BReference<ProcessGroup> groupReference(group, true);
4189 	AutoLocker<ProcessGroup> groupLocker(group);
4190 
4191 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4192 	if (session == NULL)
4193 		return B_NO_MEMORY;
4194 	BReference<ProcessSession> sessionReference(session, true);
4195 
4196 	// lock the team's current process group, parent, and the team itself
4197 	team->LockTeamParentAndProcessGroup();
4198 	BReference<ProcessGroup> oldGroupReference(team->group);
4199 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4200 	TeamLocker parentLocker(team->parent, true);
4201 	TeamLocker teamLocker(team, true);
4202 
4203 	// the team must not already be a process group leader
4204 	if (is_process_group_leader(team))
4205 		return B_NOT_ALLOWED;
4206 
4207 	// remove the team from the old and add it to the new process group
4208 	remove_team_from_group(team);
4209 	group->Publish(session);
4210 	insert_team_into_group(group, team);
4211 
4212 	// Changing the process group might have changed the situation for a
4213 	// parent waiting in wait_for_child(). Hence we notify it.
4214 	team->parent->dead_children.condition_variable.NotifyAll();
4215 
4216 	return group->id;
4217 }
4218 
4219 
4220 status_t
4221 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4222 {
4223 	status_t returnCode;
4224 	status_t status;
4225 
4226 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4227 		return B_BAD_ADDRESS;
4228 
4229 	status = wait_for_team(id, &returnCode);
4230 	if (status >= B_OK && _userReturnCode != NULL) {
4231 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4232 				!= B_OK)
4233 			return B_BAD_ADDRESS;
4234 		return B_OK;
4235 	}
4236 
4237 	return syscall_restart_handle_post(status);
4238 }
4239 
4240 
4241 thread_id
4242 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4243 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4244 	port_id errorPort, uint32 errorToken)
4245 {
4246 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4247 
4248 	if (argCount < 1)
4249 		return B_BAD_VALUE;
4250 
4251 	// copy and relocate the flat arguments
4252 	char** flatArgs;
4253 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4254 		argCount, envCount, flatArgs);
4255 	if (error != B_OK)
4256 		return error;
4257 
4258 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4259 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4260 		errorToken);
4261 
4262 	free(flatArgs);
4263 		// load_image_internal() unset our variable if it took over ownership
4264 
4265 	return thread;
4266 }
4267 
4268 
4269 void
4270 _user_exit_team(status_t returnValue)
4271 {
4272 	Thread* thread = thread_get_current_thread();
4273 	Team* team = thread->team;
4274 
4275 	// set this thread's exit status
4276 	thread->exit.status = returnValue;
4277 
4278 	// set the team exit status
4279 	TeamLocker teamLocker(team);
4280 
4281 	if (!team->exit.initialized) {
4282 		team->exit.reason = CLD_EXITED;
4283 		team->exit.signal = 0;
4284 		team->exit.signaling_user = 0;
4285 		team->exit.status = returnValue;
4286 		team->exit.initialized = true;
4287 	}
4288 
4289 	teamLocker.Unlock();
4290 
4291 	// Stop the thread, if the team is being debugged and that has been
4292 	// requested.
4293 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4294 		user_debug_stop_thread();
4295 
4296 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4297 	// userland. The signal handling code forwards the signal to the main
4298 	// thread (if that's not already this one), which will take the team down.
4299 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4300 	send_signal_to_thread(thread, signal, 0);
4301 }
4302 
4303 
4304 status_t
4305 _user_kill_team(team_id team)
4306 {
4307 	return kill_team(team);
4308 }
4309 
4310 
4311 status_t
4312 _user_get_team_info(team_id id, team_info* userInfo)
4313 {
4314 	status_t status;
4315 	team_info info;
4316 
4317 	if (!IS_USER_ADDRESS(userInfo))
4318 		return B_BAD_ADDRESS;
4319 
4320 	status = _get_team_info(id, &info, sizeof(team_info));
4321 	if (status == B_OK) {
4322 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4323 			return B_BAD_ADDRESS;
4324 	}
4325 
4326 	return status;
4327 }
4328 
4329 
4330 status_t
4331 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4332 {
4333 	status_t status;
4334 	team_info info;
4335 	int32 cookie;
4336 
4337 	if (!IS_USER_ADDRESS(userCookie)
4338 		|| !IS_USER_ADDRESS(userInfo)
4339 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4340 		return B_BAD_ADDRESS;
4341 
4342 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4343 	if (status != B_OK)
4344 		return status;
4345 
4346 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4347 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4348 		return B_BAD_ADDRESS;
4349 
4350 	return status;
4351 }
4352 
4353 
4354 team_id
4355 _user_get_current_team(void)
4356 {
4357 	return team_get_current_team_id();
4358 }
4359 
4360 
4361 status_t
4362 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4363 	size_t size)
4364 {
4365 	if (size != sizeof(team_usage_info))
4366 		return B_BAD_VALUE;
4367 
4368 	team_usage_info info;
4369 	status_t status = common_get_team_usage_info(team, who, &info,
4370 		B_CHECK_PERMISSION);
4371 
4372 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4373 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4374 		return B_BAD_ADDRESS;
4375 	}
4376 
4377 	return status;
4378 }
4379 
4380 
4381 status_t
4382 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4383 	size_t size, size_t* _sizeNeeded)
4384 {
4385 	// check parameters
4386 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4387 		|| (buffer == NULL && size > 0)
4388 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4389 		return B_BAD_ADDRESS;
4390 	}
4391 
4392 	KMessage info;
4393 
4394 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4395 		// allocate memory for a copy of the needed team data
4396 		struct ExtendedTeamData {
4397 			team_id	id;
4398 			pid_t	group_id;
4399 			pid_t	session_id;
4400 			uid_t	real_uid;
4401 			gid_t	real_gid;
4402 			uid_t	effective_uid;
4403 			gid_t	effective_gid;
4404 			char	name[B_OS_NAME_LENGTH];
4405 		} teamClone;
4406 
4407 		io_context* ioContext;
4408 		{
4409 			// get the team structure
4410 			Team* team = Team::GetAndLock(teamID);
4411 			if (team == NULL)
4412 				return B_BAD_TEAM_ID;
4413 			BReference<Team> teamReference(team, true);
4414 			TeamLocker teamLocker(team, true);
4415 
4416 			// copy the data
4417 			teamClone.id = team->id;
4418 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4419 			teamClone.group_id = team->group_id;
4420 			teamClone.session_id = team->session_id;
4421 			teamClone.real_uid = team->real_uid;
4422 			teamClone.real_gid = team->real_gid;
4423 			teamClone.effective_uid = team->effective_uid;
4424 			teamClone.effective_gid = team->effective_gid;
4425 
4426 			// also fetch a reference to the I/O context
4427 			ioContext = team->io_context;
4428 			vfs_get_io_context(ioContext);
4429 		}
4430 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4431 			&vfs_put_io_context);
4432 
4433 		// add the basic data to the info message
4434 		if (info.AddInt32("id", teamClone.id) != B_OK
4435 			|| info.AddString("name", teamClone.name) != B_OK
4436 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4437 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4438 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4439 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4440 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4441 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4442 			return B_NO_MEMORY;
4443 		}
4444 
4445 		// get the current working directory from the I/O context
4446 		dev_t cwdDevice;
4447 		ino_t cwdDirectory;
4448 		{
4449 			MutexLocker ioContextLocker(ioContext->io_mutex);
4450 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4451 		}
4452 
4453 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4454 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4455 			return B_NO_MEMORY;
4456 		}
4457 	}
4458 
4459 	// TODO: Support the other flags!
4460 
4461 	// copy the needed size and, if it fits, the message back to userland
4462 	size_t sizeNeeded = info.ContentSize();
4463 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4464 		return B_BAD_ADDRESS;
4465 
4466 	if (sizeNeeded > size)
4467 		return B_BUFFER_OVERFLOW;
4468 
4469 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4470 		return B_BAD_ADDRESS;
4471 
4472 	return B_OK;
4473 }
4474