xref: /haiku/src/system/kernel/team.cpp (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 #include <util/ThreadAutoLock.h>
62 
63 #include "TeamThreadTables.h"
64 
65 
66 //#define TRACE_TEAM
67 #ifdef TRACE_TEAM
68 #	define TRACE(x) dprintf x
69 #else
70 #	define TRACE(x) ;
71 #endif
72 
73 
74 struct team_key {
75 	team_id id;
76 };
77 
78 struct team_arg {
79 	char	*path;
80 	char	**flat_args;
81 	size_t	flat_args_size;
82 	uint32	arg_count;
83 	uint32	env_count;
84 	mode_t	umask;
85 	uint32	flags;
86 	port_id	error_port;
87 	uint32	error_token;
88 };
89 
90 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
91 
92 
93 namespace {
94 
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 // #pragma mark - TeamTable
105 
106 
107 typedef BKernel::TeamThreadTable<Team> TeamTable;
108 
109 
110 // #pragma mark - ProcessGroupHashDefinition
111 
112 
113 struct ProcessGroupHashDefinition {
114 	typedef pid_t			KeyType;
115 	typedef	ProcessGroup	ValueType;
116 
117 	size_t HashKey(pid_t key) const
118 	{
119 		return key;
120 	}
121 
122 	size_t Hash(ProcessGroup* value) const
123 	{
124 		return HashKey(value->id);
125 	}
126 
127 	bool Compare(pid_t key, ProcessGroup* value) const
128 	{
129 		return value->id == key;
130 	}
131 
132 	ProcessGroup*& GetLink(ProcessGroup* value) const
133 	{
134 		return value->next;
135 	}
136 };
137 
138 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
139 
140 
141 }	// unnamed namespace
142 
143 
144 // #pragma mark -
145 
146 
147 // the team_id -> Team hash table and the lock protecting it
148 static TeamTable sTeamHash;
149 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
150 
151 // the pid_t -> ProcessGroup hash table and the lock protecting it
152 static ProcessGroupHashTable sGroupHash;
153 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
154 
155 static Team* sKernelTeam = NULL;
156 static bool sDisableUserAddOns = false;
157 
158 // A list of process groups of children of dying session leaders that need to
159 // be signalled, if they have become orphaned and contain stopped processes.
160 static ProcessGroupList sOrphanedCheckProcessGroups;
161 static mutex sOrphanedCheckLock
162 	= MUTEX_INITIALIZER("orphaned process group check");
163 
164 // some arbitrarily chosen limits -- should probably depend on the available
165 // memory (the limit is not yet enforced)
166 static int32 sMaxTeams = 2048;
167 static int32 sUsedTeams = 1;
168 
169 static TeamNotificationService sNotificationService;
170 
171 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
172 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
173 
174 
175 // #pragma mark - TeamListIterator
176 
177 
178 TeamListIterator::TeamListIterator()
179 {
180 	// queue the entry
181 	InterruptsWriteSpinLocker locker(sTeamHashLock);
182 	sTeamHash.InsertIteratorEntry(&fEntry);
183 }
184 
185 
186 TeamListIterator::~TeamListIterator()
187 {
188 	// remove the entry
189 	InterruptsWriteSpinLocker locker(sTeamHashLock);
190 	sTeamHash.RemoveIteratorEntry(&fEntry);
191 }
192 
193 
194 Team*
195 TeamListIterator::Next()
196 {
197 	// get the next team -- if there is one, get reference for it
198 	InterruptsWriteSpinLocker locker(sTeamHashLock);
199 	Team* team = sTeamHash.NextElement(&fEntry);
200 	if (team != NULL)
201 		team->AcquireReference();
202 
203 	return team;
204 }
205 
206 
207 // #pragma mark - Tracing
208 
209 
210 #if TEAM_TRACING
211 namespace TeamTracing {
212 
213 class TeamForked : public AbstractTraceEntry {
214 public:
215 	TeamForked(thread_id forkedThread)
216 		:
217 		fForkedThread(forkedThread)
218 	{
219 		Initialized();
220 	}
221 
222 	virtual void AddDump(TraceOutput& out)
223 	{
224 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
225 	}
226 
227 private:
228 	thread_id			fForkedThread;
229 };
230 
231 
232 class ExecTeam : public AbstractTraceEntry {
233 public:
234 	ExecTeam(const char* path, int32 argCount, const char* const* args,
235 			int32 envCount, const char* const* env)
236 		:
237 		fArgCount(argCount),
238 		fArgs(NULL)
239 	{
240 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
241 			false);
242 
243 		// determine the buffer size we need for the args
244 		size_t argBufferSize = 0;
245 		for (int32 i = 0; i < argCount; i++)
246 			argBufferSize += strlen(args[i]) + 1;
247 
248 		// allocate a buffer
249 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
250 		if (fArgs) {
251 			char* buffer = fArgs;
252 			for (int32 i = 0; i < argCount; i++) {
253 				size_t argSize = strlen(args[i]) + 1;
254 				memcpy(buffer, args[i], argSize);
255 				buffer += argSize;
256 			}
257 		}
258 
259 		// ignore env for the time being
260 		(void)envCount;
261 		(void)env;
262 
263 		Initialized();
264 	}
265 
266 	virtual void AddDump(TraceOutput& out)
267 	{
268 		out.Print("team exec, \"%p\", args:", fPath);
269 
270 		if (fArgs != NULL) {
271 			char* args = fArgs;
272 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
273 				out.Print(" \"%s\"", args);
274 				args += strlen(args) + 1;
275 			}
276 		} else
277 			out.Print(" <too long>");
278 	}
279 
280 private:
281 	char*	fPath;
282 	int32	fArgCount;
283 	char*	fArgs;
284 };
285 
286 
287 static const char*
288 job_control_state_name(job_control_state state)
289 {
290 	switch (state) {
291 		case JOB_CONTROL_STATE_NONE:
292 			return "none";
293 		case JOB_CONTROL_STATE_STOPPED:
294 			return "stopped";
295 		case JOB_CONTROL_STATE_CONTINUED:
296 			return "continued";
297 		case JOB_CONTROL_STATE_DEAD:
298 			return "dead";
299 		default:
300 			return "invalid";
301 	}
302 }
303 
304 
305 class SetJobControlState : public AbstractTraceEntry {
306 public:
307 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
308 		:
309 		fTeam(team),
310 		fNewState(newState),
311 		fSignal(signal != NULL ? signal->Number() : 0)
312 	{
313 		Initialized();
314 	}
315 
316 	virtual void AddDump(TraceOutput& out)
317 	{
318 		out.Print("team set job control state, team %" B_PRId32 ", "
319 			"new state: %s, signal: %d",
320 			fTeam, job_control_state_name(fNewState), fSignal);
321 	}
322 
323 private:
324 	team_id				fTeam;
325 	job_control_state	fNewState;
326 	int					fSignal;
327 };
328 
329 
330 class WaitForChild : public AbstractTraceEntry {
331 public:
332 	WaitForChild(pid_t child, uint32 flags)
333 		:
334 		fChild(child),
335 		fFlags(flags)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("team wait for child, child: %" B_PRId32 ", "
343 			"flags: %#" B_PRIx32, fChild, fFlags);
344 	}
345 
346 private:
347 	pid_t	fChild;
348 	uint32	fFlags;
349 };
350 
351 
352 class WaitForChildDone : public AbstractTraceEntry {
353 public:
354 	WaitForChildDone(const job_control_entry& entry)
355 		:
356 		fState(entry.state),
357 		fTeam(entry.thread),
358 		fStatus(entry.status),
359 		fReason(entry.reason),
360 		fSignal(entry.signal)
361 	{
362 		Initialized();
363 	}
364 
365 	WaitForChildDone(status_t error)
366 		:
367 		fTeam(error)
368 	{
369 		Initialized();
370 	}
371 
372 	virtual void AddDump(TraceOutput& out)
373 	{
374 		if (fTeam >= 0) {
375 			out.Print("team wait for child done, team: %" B_PRId32 ", "
376 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
377 				fTeam, job_control_state_name(fState), fStatus, fReason,
378 				fSignal);
379 		} else {
380 			out.Print("team wait for child failed, error: "
381 				"%#" B_PRIx32 ", ", fTeam);
382 		}
383 	}
384 
385 private:
386 	job_control_state	fState;
387 	team_id				fTeam;
388 	status_t			fStatus;
389 	uint16				fReason;
390 	uint16				fSignal;
391 };
392 
393 }	// namespace TeamTracing
394 
395 #	define T(x) new(std::nothrow) TeamTracing::x;
396 #else
397 #	define T(x) ;
398 #endif
399 
400 
401 //	#pragma mark - TeamNotificationService
402 
403 
404 TeamNotificationService::TeamNotificationService()
405 	: DefaultNotificationService("teams")
406 {
407 }
408 
409 
410 void
411 TeamNotificationService::Notify(uint32 eventCode, Team* team)
412 {
413 	char eventBuffer[128];
414 	KMessage event;
415 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
416 	event.AddInt32("event", eventCode);
417 	event.AddInt32("team", team->id);
418 	event.AddPointer("teamStruct", team);
419 
420 	DefaultNotificationService::Notify(event, eventCode);
421 }
422 
423 
424 //	#pragma mark - Team
425 
426 
427 Team::Team(team_id id, bool kernel)
428 {
429 	// allocate an ID
430 	this->id = id;
431 	visible = true;
432 
433 	hash_next = siblings_next = parent = children = group_next = NULL;
434 	serial_number = -1;
435 
436 	group_id = session_id = -1;
437 	group = NULL;
438 
439 	num_threads = 0;
440 	state = TEAM_STATE_BIRTH;
441 	flags = 0;
442 	io_context = NULL;
443 	realtime_sem_context = NULL;
444 	xsi_sem_context = NULL;
445 	death_entry = NULL;
446 	list_init(&dead_threads);
447 
448 	dead_children.condition_variable.Init(&dead_children, "team children");
449 	dead_children.count = 0;
450 	dead_children.kernel_time = 0;
451 	dead_children.user_time = 0;
452 
453 	job_control_entry = new(nothrow) ::job_control_entry;
454 	if (job_control_entry != NULL) {
455 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
456 		job_control_entry->thread = id;
457 		job_control_entry->team = this;
458 	}
459 
460 	address_space = NULL;
461 	main_thread = NULL;
462 	thread_list = NULL;
463 	loading_info = NULL;
464 
465 	list_init(&image_list);
466 	list_init(&watcher_list);
467 	list_init(&sem_list);
468 	list_init_etc(&port_list, port_team_link_offset());
469 
470 	user_data = 0;
471 	user_data_area = -1;
472 	used_user_data = 0;
473 	user_data_size = 0;
474 	free_user_threads = NULL;
475 
476 	commpage_address = NULL;
477 
478 	clear_team_debug_info(&debug_info, true);
479 
480 	dead_threads_kernel_time = 0;
481 	dead_threads_user_time = 0;
482 	cpu_clock_offset = 0;
483 	B_INITIALIZE_SPINLOCK(&time_lock);
484 
485 	saved_set_uid = real_uid = effective_uid = -1;
486 	saved_set_gid = real_gid = effective_gid = -1;
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	B_INITIALIZE_SPINLOCK(&signal_lock);
492 
493 	// init mutex
494 	if (kernel) {
495 		mutex_init(&fLock, "Team:kernel");
496 	} else {
497 		char lockName[16];
498 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
499 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
500 	}
501 
502 	fName[0] = '\0';
503 	fArgs[0] = '\0';
504 
505 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
506 		kernel ? -1 : MAX_QUEUED_SIGNALS);
507 	memset(fSignalActions, 0, sizeof(fSignalActions));
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	delete job_control_entry;
545 		// usually already NULL and transferred to the parent
546 
547 	mutex_destroy(&fLock);
548 }
549 
550 
551 /*static*/ Team*
552 Team::Create(team_id id, const char* name, bool kernel)
553 {
554 	// create the team object
555 	Team* team = new(std::nothrow) Team(id, kernel);
556 	if (team == NULL)
557 		return NULL;
558 	ObjectDeleter<Team> teamDeleter(team);
559 
560 	if (name != NULL)
561 		team->SetName(name);
562 
563 	// check initialization
564 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
565 		return NULL;
566 
567 	// finish initialization (arch specifics)
568 	if (arch_team_init_team_struct(team, kernel) != B_OK)
569 		return NULL;
570 
571 	if (!kernel) {
572 		status_t error = user_timer_create_team_timers(team);
573 		if (error != B_OK)
574 			return NULL;
575 	}
576 
577 	// everything went fine
578 	return teamDeleter.Detach();
579 }
580 
581 
582 /*!	\brief Returns the team with the given ID.
583 	Returns a reference to the team.
584 	Team and thread spinlock must not be held.
585 */
586 /*static*/ Team*
587 Team::Get(team_id id)
588 {
589 	if (id == B_CURRENT_TEAM) {
590 		Team* team = thread_get_current_thread()->team;
591 		team->AcquireReference();
592 		return team;
593 	}
594 
595 	InterruptsReadSpinLocker locker(sTeamHashLock);
596 	Team* team = sTeamHash.Lookup(id);
597 	if (team != NULL)
598 		team->AcquireReference();
599 	return team;
600 }
601 
602 
603 /*!	\brief Returns the team with the given ID in a locked state.
604 	Returns a reference to the team.
605 	Team and thread spinlock must not be held.
606 */
607 /*static*/ Team*
608 Team::GetAndLock(team_id id)
609 {
610 	// get the team
611 	Team* team = Get(id);
612 	if (team == NULL)
613 		return NULL;
614 
615 	// lock it
616 	team->Lock();
617 
618 	// only return the team, when it isn't already dying
619 	if (team->state >= TEAM_STATE_SHUTDOWN) {
620 		team->Unlock();
621 		team->ReleaseReference();
622 		return NULL;
623 	}
624 
625 	return team;
626 }
627 
628 
629 /*!	Locks the team and its parent team (if any).
630 	The caller must hold a reference to the team or otherwise make sure that
631 	it won't be deleted.
632 	If the team doesn't have a parent, only the team itself is locked. If the
633 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
634 	only the team itself is locked.
635 
636 	\param dontLockParentIfKernel If \c true, the team's parent team is only
637 		locked, if it is not the kernel team.
638 */
639 void
640 Team::LockTeamAndParent(bool dontLockParentIfKernel)
641 {
642 	// The locking order is parent -> child. Since the parent can change as long
643 	// as we don't lock the team, we need to do a trial and error loop.
644 	Lock();
645 
646 	while (true) {
647 		// If the team doesn't have a parent, we're done. Otherwise try to lock
648 		// the parent.This will succeed in most cases, simplifying things.
649 		Team* parent = this->parent;
650 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
651 			|| parent->TryLock()) {
652 			return;
653 		}
654 
655 		// get a temporary reference to the parent, unlock this team, lock the
656 		// parent, and re-lock this team
657 		BReference<Team> parentReference(parent);
658 
659 		Unlock();
660 		parent->Lock();
661 		Lock();
662 
663 		// If the parent hasn't changed in the meantime, we're done.
664 		if (this->parent == parent)
665 			return;
666 
667 		// The parent has changed -- unlock and retry.
668 		parent->Unlock();
669 	}
670 }
671 
672 
673 /*!	Unlocks the team and its parent team (if any).
674 */
675 void
676 Team::UnlockTeamAndParent()
677 {
678 	if (parent != NULL)
679 		parent->Unlock();
680 
681 	Unlock();
682 }
683 
684 
685 /*!	Locks the team, its parent team (if any), and the team's process group.
686 	The caller must hold a reference to the team or otherwise make sure that
687 	it won't be deleted.
688 	If the team doesn't have a parent, only the team itself is locked.
689 */
690 void
691 Team::LockTeamParentAndProcessGroup()
692 {
693 	LockTeamAndProcessGroup();
694 
695 	// We hold the group's and the team's lock, but not the parent team's lock.
696 	// If we have a parent, try to lock it.
697 	if (this->parent == NULL || this->parent->TryLock())
698 		return;
699 
700 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
701 	// the job.
702 	Unlock();
703 	LockTeamAndParent(false);
704 }
705 
706 
707 /*!	Unlocks the team, its parent team (if any), and the team's process group.
708 */
709 void
710 Team::UnlockTeamParentAndProcessGroup()
711 {
712 	group->Unlock();
713 
714 	if (parent != NULL)
715 		parent->Unlock();
716 
717 	Unlock();
718 }
719 
720 
721 void
722 Team::LockTeamAndProcessGroup()
723 {
724 	// The locking order is process group -> child. Since the process group can
725 	// change as long as we don't lock the team, we need to do a trial and error
726 	// loop.
727 	Lock();
728 
729 	while (true) {
730 		// Try to lock the group. This will succeed in most cases, simplifying
731 		// things.
732 		ProcessGroup* group = this->group;
733 		if (group == NULL)
734 			return;
735 
736 		if (group->TryLock())
737 			return;
738 
739 		// get a temporary reference to the group, unlock this team, lock the
740 		// group, and re-lock this team
741 		BReference<ProcessGroup> groupReference(group);
742 
743 		Unlock();
744 		group->Lock();
745 		Lock();
746 
747 		// If the group hasn't changed in the meantime, we're done.
748 		if (this->group == group)
749 			return;
750 
751 		// The group has changed -- unlock and retry.
752 		group->Unlock();
753 	}
754 }
755 
756 
757 void
758 Team::UnlockTeamAndProcessGroup()
759 {
760 	group->Unlock();
761 	Unlock();
762 }
763 
764 
765 void
766 Team::SetName(const char* name)
767 {
768 	if (const char* lastSlash = strrchr(name, '/'))
769 		name = lastSlash + 1;
770 
771 	strlcpy(fName, name, B_OS_NAME_LENGTH);
772 }
773 
774 
775 void
776 Team::SetArgs(const char* args)
777 {
778 	strlcpy(fArgs, args, sizeof(fArgs));
779 }
780 
781 
782 void
783 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
784 {
785 	fArgs[0] = '\0';
786 	strlcpy(fArgs, path, sizeof(fArgs));
787 	for (int i = 0; i < otherArgCount; i++) {
788 		strlcat(fArgs, " ", sizeof(fArgs));
789 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
790 	}
791 }
792 
793 
794 void
795 Team::ResetSignalsOnExec()
796 {
797 	// We are supposed to keep pending signals. Signal actions shall be reset
798 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
799 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
800 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
801 	// flags, but since there aren't any handlers, they make little sense, so
802 	// we clear them.
803 
804 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
805 		struct sigaction& action = SignalActionFor(i);
806 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
807 			action.sa_handler = SIG_DFL;
808 
809 		action.sa_mask = 0;
810 		action.sa_flags = 0;
811 		action.sa_userdata = NULL;
812 	}
813 }
814 
815 
816 void
817 Team::InheritSignalActions(Team* parent)
818 {
819 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
820 }
821 
822 
823 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
824 	ID.
825 
826 	The caller must hold the team's lock.
827 
828 	\param timer The timer to be added. If it doesn't have an ID yet, it is
829 		considered user-defined and will be assigned an ID.
830 	\return \c B_OK, if the timer was added successfully, another error code
831 		otherwise.
832 */
833 status_t
834 Team::AddUserTimer(UserTimer* timer)
835 {
836 	// don't allow addition of timers when already shutting the team down
837 	if (state >= TEAM_STATE_SHUTDOWN)
838 		return B_BAD_TEAM_ID;
839 
840 	// If the timer is user-defined, check timer limit and increment
841 	// user-defined count.
842 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
843 		return EAGAIN;
844 
845 	fUserTimers.AddTimer(timer);
846 
847 	return B_OK;
848 }
849 
850 
851 /*!	Removes the given user timer from the team.
852 
853 	The caller must hold the team's lock.
854 
855 	\param timer The timer to be removed.
856 
857 */
858 void
859 Team::RemoveUserTimer(UserTimer* timer)
860 {
861 	fUserTimers.RemoveTimer(timer);
862 
863 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
864 		UserDefinedTimersRemoved(1);
865 }
866 
867 
868 /*!	Deletes all (or all user-defined) user timers of the team.
869 
870 	Timer's belonging to the team's threads are not affected.
871 	The caller must hold the team's lock.
872 
873 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
874 		otherwise all timers are deleted.
875 */
876 void
877 Team::DeleteUserTimers(bool userDefinedOnly)
878 {
879 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
880 	UserDefinedTimersRemoved(count);
881 }
882 
883 
884 /*!	If not at the limit yet, increments the team's user-defined timer count.
885 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
886 */
887 bool
888 Team::CheckAddUserDefinedTimer()
889 {
890 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
891 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
892 		atomic_add(&fUserDefinedTimerCount, -1);
893 		return false;
894 	}
895 
896 	return true;
897 }
898 
899 
900 /*!	Subtracts the given count for the team's user-defined timer count.
901 	\param count The count to subtract.
902 */
903 void
904 Team::UserDefinedTimersRemoved(int32 count)
905 {
906 	atomic_add(&fUserDefinedTimerCount, -count);
907 }
908 
909 
910 void
911 Team::DeactivateCPUTimeUserTimers()
912 {
913 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
914 		timer->Deactivate();
915 
916 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
917 		timer->Deactivate();
918 }
919 
920 
921 /*!	Returns the team's current total CPU time (kernel + user + offset).
922 
923 	The caller must hold \c time_lock.
924 
925 	\param ignoreCurrentRun If \c true and the current thread is one team's
926 		threads, don't add the time since the last time \c last_time was
927 		updated. Should be used in "thread unscheduled" scheduler callbacks,
928 		since although the thread is still running at that time, its time has
929 		already been stopped.
930 	\return The team's current total CPU time.
931 */
932 bigtime_t
933 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
934 {
935 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
936 		+ dead_threads_user_time;
937 
938 	Thread* currentThread = thread_get_current_thread();
939 	bigtime_t now = system_time();
940 
941 	for (Thread* thread = thread_list; thread != NULL;
942 			thread = thread->team_next) {
943 		bool alreadyLocked = thread == lockedThread;
944 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
945 		time += thread->kernel_time + thread->user_time;
946 
947 		if (thread->last_time != 0) {
948 			if (!ignoreCurrentRun || thread != currentThread)
949 				time += now - thread->last_time;
950 		}
951 
952 		if (alreadyLocked)
953 			threadTimeLocker.Detach();
954 	}
955 
956 	return time;
957 }
958 
959 
960 /*!	Returns the team's current user CPU time.
961 
962 	The caller must hold \c time_lock.
963 
964 	\return The team's current user CPU time.
965 */
966 bigtime_t
967 Team::UserCPUTime() const
968 {
969 	bigtime_t time = dead_threads_user_time;
970 
971 	bigtime_t now = system_time();
972 
973 	for (Thread* thread = thread_list; thread != NULL;
974 			thread = thread->team_next) {
975 		SpinLocker threadTimeLocker(thread->time_lock);
976 		time += thread->user_time;
977 
978 		if (thread->last_time != 0 && !thread->in_kernel)
979 			time += now - thread->last_time;
980 	}
981 
982 	return time;
983 }
984 
985 
986 //	#pragma mark - ProcessGroup
987 
988 
989 ProcessGroup::ProcessGroup(pid_t id)
990 	:
991 	id(id),
992 	teams(NULL),
993 	fSession(NULL),
994 	fInOrphanedCheckList(false)
995 {
996 	char lockName[32];
997 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
998 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
999 }
1000 
1001 
1002 ProcessGroup::~ProcessGroup()
1003 {
1004 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1005 
1006 	// If the group is in the orphaned check list, remove it.
1007 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1008 
1009 	if (fInOrphanedCheckList)
1010 		sOrphanedCheckProcessGroups.Remove(this);
1011 
1012 	orphanedCheckLocker.Unlock();
1013 
1014 	// remove group from the hash table and from the session
1015 	if (fSession != NULL) {
1016 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1017 		sGroupHash.RemoveUnchecked(this);
1018 		groupHashLocker.Unlock();
1019 
1020 		fSession->ReleaseReference();
1021 	}
1022 
1023 	mutex_destroy(&fLock);
1024 }
1025 
1026 
1027 /*static*/ ProcessGroup*
1028 ProcessGroup::Get(pid_t id)
1029 {
1030 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1031 	ProcessGroup* group = sGroupHash.Lookup(id);
1032 	if (group != NULL)
1033 		group->AcquireReference();
1034 	return group;
1035 }
1036 
1037 
1038 /*!	Adds the group the given session and makes it publicly accessible.
1039 	The caller must not hold the process group hash lock.
1040 */
1041 void
1042 ProcessGroup::Publish(ProcessSession* session)
1043 {
1044 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1045 	PublishLocked(session);
1046 }
1047 
1048 
1049 /*!	Adds the group to the given session and makes it publicly accessible.
1050 	The caller must hold the process group hash lock.
1051 */
1052 void
1053 ProcessGroup::PublishLocked(ProcessSession* session)
1054 {
1055 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1056 
1057 	fSession = session;
1058 	fSession->AcquireReference();
1059 
1060 	sGroupHash.InsertUnchecked(this);
1061 }
1062 
1063 
1064 /*!	Checks whether the process group is orphaned.
1065 	The caller must hold the group's lock.
1066 	\return \c true, if the group is orphaned, \c false otherwise.
1067 */
1068 bool
1069 ProcessGroup::IsOrphaned() const
1070 {
1071 	// Orphaned Process Group: "A process group in which the parent of every
1072 	// member is either itself a member of the group or is not a member of the
1073 	// group's session." (Open Group Base Specs Issue 7)
1074 	bool orphaned = true;
1075 
1076 	Team* team = teams;
1077 	while (orphaned && team != NULL) {
1078 		team->LockTeamAndParent(false);
1079 
1080 		Team* parent = team->parent;
1081 		if (parent != NULL && parent->group_id != id
1082 			&& parent->session_id == fSession->id) {
1083 			orphaned = false;
1084 		}
1085 
1086 		team->UnlockTeamAndParent();
1087 
1088 		team = team->group_next;
1089 	}
1090 
1091 	return orphaned;
1092 }
1093 
1094 
1095 void
1096 ProcessGroup::ScheduleOrphanedCheck()
1097 {
1098 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1099 
1100 	if (!fInOrphanedCheckList) {
1101 		sOrphanedCheckProcessGroups.Add(this);
1102 		fInOrphanedCheckList = true;
1103 	}
1104 }
1105 
1106 
1107 void
1108 ProcessGroup::UnsetOrphanedCheck()
1109 {
1110 	fInOrphanedCheckList = false;
1111 }
1112 
1113 
1114 //	#pragma mark - ProcessSession
1115 
1116 
1117 ProcessSession::ProcessSession(pid_t id)
1118 	:
1119 	id(id),
1120 	controlling_tty(-1),
1121 	foreground_group(-1)
1122 {
1123 	char lockName[32];
1124 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1125 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1126 }
1127 
1128 
1129 ProcessSession::~ProcessSession()
1130 {
1131 	mutex_destroy(&fLock);
1132 }
1133 
1134 
1135 //	#pragma mark - KDL functions
1136 
1137 
1138 static void
1139 _dump_team_info(Team* team)
1140 {
1141 	kprintf("TEAM: %p\n", team);
1142 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1143 		team->id);
1144 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1145 	kprintf("name:             '%s'\n", team->Name());
1146 	kprintf("args:             '%s'\n", team->Args());
1147 	kprintf("hash_next:        %p\n", team->hash_next);
1148 	kprintf("parent:           %p", team->parent);
1149 	if (team->parent != NULL) {
1150 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1151 	} else
1152 		kprintf("\n");
1153 
1154 	kprintf("children:         %p\n", team->children);
1155 	kprintf("num_threads:      %d\n", team->num_threads);
1156 	kprintf("state:            %d\n", team->state);
1157 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1158 	kprintf("io_context:       %p\n", team->io_context);
1159 	if (team->address_space)
1160 		kprintf("address_space:    %p\n", team->address_space);
1161 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1162 		(void*)team->user_data, team->user_data_area);
1163 	kprintf("free user thread: %p\n", team->free_user_threads);
1164 	kprintf("main_thread:      %p\n", team->main_thread);
1165 	kprintf("thread_list:      %p\n", team->thread_list);
1166 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1167 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1168 }
1169 
1170 
1171 static int
1172 dump_team_info(int argc, char** argv)
1173 {
1174 	ulong arg;
1175 	bool found = false;
1176 
1177 	if (argc < 2) {
1178 		Thread* thread = thread_get_current_thread();
1179 		if (thread != NULL && thread->team != NULL)
1180 			_dump_team_info(thread->team);
1181 		else
1182 			kprintf("No current team!\n");
1183 		return 0;
1184 	}
1185 
1186 	arg = strtoul(argv[1], NULL, 0);
1187 	if (IS_KERNEL_ADDRESS(arg)) {
1188 		// semi-hack
1189 		_dump_team_info((Team*)arg);
1190 		return 0;
1191 	}
1192 
1193 	// walk through the thread list, trying to match name or id
1194 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1195 		Team* team = it.Next();) {
1196 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1197 			|| team->id == (team_id)arg) {
1198 			_dump_team_info(team);
1199 			found = true;
1200 			break;
1201 		}
1202 	}
1203 
1204 	if (!found)
1205 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1206 	return 0;
1207 }
1208 
1209 
1210 static int
1211 dump_teams(int argc, char** argv)
1212 {
1213 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1214 		B_PRINTF_POINTER_WIDTH, "parent");
1215 
1216 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1217 		Team* team = it.Next();) {
1218 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 
1225 //	#pragma mark - Private functions
1226 
1227 
1228 /*! Get the parent of a given process.
1229 
1230 	Used in the implementation of getppid (where a process can get its own
1231 	parent, only) as well as in user_process_info where the information is
1232 	available to anyone (allowing to display a tree of running processes)
1233 */
1234 static pid_t
1235 _getppid(pid_t id)
1236 {
1237 	if (id < 0) {
1238 		errno = EINVAL;
1239 		return -1;
1240 	}
1241 
1242 	if (id == 0) {
1243 		Team* team = thread_get_current_thread()->team;
1244 		TeamLocker teamLocker(team);
1245 		if (team->parent == NULL) {
1246 			errno = EINVAL;
1247 			return -1;
1248 		}
1249 		return team->parent->id;
1250 	}
1251 
1252 	Team* team = Team::GetAndLock(id);
1253 	if (team == NULL) {
1254 		errno = ESRCH;
1255 		return -1;
1256 	}
1257 
1258 	pid_t parentID;
1259 
1260 	if (team->parent == NULL) {
1261 		errno = EINVAL;
1262 		parentID = -1;
1263 	} else
1264 		parentID = team->parent->id;
1265 
1266 	team->UnlockAndReleaseReference();
1267 
1268 	return parentID;
1269 }
1270 
1271 
1272 /*!	Inserts team \a team into the child list of team \a parent.
1273 
1274 	The caller must hold the lock of both \a parent and \a team.
1275 
1276 	\param parent The parent team.
1277 	\param team The team to be inserted into \a parent's child list.
1278 */
1279 static void
1280 insert_team_into_parent(Team* parent, Team* team)
1281 {
1282 	ASSERT(parent != NULL);
1283 
1284 	team->siblings_next = parent->children;
1285 	parent->children = team;
1286 	team->parent = parent;
1287 }
1288 
1289 
1290 /*!	Removes team \a team from the child list of team \a parent.
1291 
1292 	The caller must hold the lock of both \a parent and \a team.
1293 
1294 	\param parent The parent team.
1295 	\param team The team to be removed from \a parent's child list.
1296 */
1297 static void
1298 remove_team_from_parent(Team* parent, Team* team)
1299 {
1300 	Team* child;
1301 	Team* last = NULL;
1302 
1303 	for (child = parent->children; child != NULL;
1304 			child = child->siblings_next) {
1305 		if (child == team) {
1306 			if (last == NULL)
1307 				parent->children = child->siblings_next;
1308 			else
1309 				last->siblings_next = child->siblings_next;
1310 
1311 			team->parent = NULL;
1312 			break;
1313 		}
1314 		last = child;
1315 	}
1316 }
1317 
1318 
1319 /*!	Returns whether the given team is a session leader.
1320 	The caller must hold the team's lock or its process group's lock.
1321 */
1322 static bool
1323 is_session_leader(Team* team)
1324 {
1325 	return team->session_id == team->id;
1326 }
1327 
1328 
1329 /*!	Returns whether the given team is a process group leader.
1330 	The caller must hold the team's lock or its process group's lock.
1331 */
1332 static bool
1333 is_process_group_leader(Team* team)
1334 {
1335 	return team->group_id == team->id;
1336 }
1337 
1338 
1339 /*!	Inserts the given team into the given process group.
1340 	The caller must hold the process group's lock, the team's lock, and the
1341 	team's parent's lock.
1342 */
1343 static void
1344 insert_team_into_group(ProcessGroup* group, Team* team)
1345 {
1346 	team->group = group;
1347 	team->group_id = group->id;
1348 	team->session_id = group->Session()->id;
1349 
1350 	team->group_next = group->teams;
1351 	group->teams = team;
1352 	group->AcquireReference();
1353 }
1354 
1355 
1356 /*!	Removes the given team from its process group.
1357 
1358 	The caller must hold the process group's lock, the team's lock, and the
1359 	team's parent's lock. Interrupts must be enabled.
1360 
1361 	\param team The team that'll be removed from its process group.
1362 */
1363 static void
1364 remove_team_from_group(Team* team)
1365 {
1366 	ProcessGroup* group = team->group;
1367 	Team* current;
1368 	Team* last = NULL;
1369 
1370 	// the team must be in a process group to let this function have any effect
1371 	if (group == NULL)
1372 		return;
1373 
1374 	for (current = group->teams; current != NULL;
1375 			current = current->group_next) {
1376 		if (current == team) {
1377 			if (last == NULL)
1378 				group->teams = current->group_next;
1379 			else
1380 				last->group_next = current->group_next;
1381 
1382 			break;
1383 		}
1384 		last = current;
1385 	}
1386 
1387 	team->group = NULL;
1388 	team->group_next = NULL;
1389 	team->group_id = -1;
1390 
1391 	group->ReleaseReference();
1392 }
1393 
1394 
1395 static status_t
1396 create_team_user_data(Team* team, void* exactAddress = NULL)
1397 {
1398 	void* address;
1399 	uint32 addressSpec;
1400 
1401 	if (exactAddress != NULL) {
1402 		address = exactAddress;
1403 		addressSpec = B_EXACT_ADDRESS;
1404 	} else {
1405 		address = (void*)KERNEL_USER_DATA_BASE;
1406 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1407 	}
1408 
1409 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1410 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1411 
1412 	virtual_address_restrictions virtualRestrictions = {};
1413 	if (result == B_OK || exactAddress != NULL) {
1414 		if (exactAddress != NULL)
1415 			virtualRestrictions.address = exactAddress;
1416 		else
1417 			virtualRestrictions.address = address;
1418 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1419 	} else {
1420 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1421 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1422 	}
1423 
1424 	physical_address_restrictions physicalRestrictions = {};
1425 	team->user_data_area = create_area_etc(team->id, "user area",
1426 		kTeamUserDataInitialSize, B_FULL_LOCK,
1427 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1428 		&virtualRestrictions, &physicalRestrictions, &address);
1429 	if (team->user_data_area < 0)
1430 		return team->user_data_area;
1431 
1432 	team->user_data = (addr_t)address;
1433 	team->used_user_data = 0;
1434 	team->user_data_size = kTeamUserDataInitialSize;
1435 	team->free_user_threads = NULL;
1436 
1437 	return B_OK;
1438 }
1439 
1440 
1441 static void
1442 delete_team_user_data(Team* team)
1443 {
1444 	if (team->user_data_area >= 0) {
1445 		vm_delete_area(team->id, team->user_data_area, true);
1446 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1447 			kTeamUserDataReservedSize);
1448 
1449 		team->user_data = 0;
1450 		team->used_user_data = 0;
1451 		team->user_data_size = 0;
1452 		team->user_data_area = -1;
1453 		while (free_user_thread* entry = team->free_user_threads) {
1454 			team->free_user_threads = entry->next;
1455 			free(entry);
1456 		}
1457 	}
1458 }
1459 
1460 
1461 static status_t
1462 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1463 	int32 argCount, int32 envCount, char**& _flatArgs)
1464 {
1465 	if (argCount < 0 || envCount < 0)
1466 		return B_BAD_VALUE;
1467 
1468 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1469 		return B_TOO_MANY_ARGS;
1470 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1471 		return B_BAD_VALUE;
1472 
1473 	if (!IS_USER_ADDRESS(userFlatArgs))
1474 		return B_BAD_ADDRESS;
1475 
1476 	// allocate kernel memory
1477 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1478 	if (flatArgs == NULL)
1479 		return B_NO_MEMORY;
1480 
1481 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1482 		free(flatArgs);
1483 		return B_BAD_ADDRESS;
1484 	}
1485 
1486 	// check and relocate the array
1487 	status_t error = B_OK;
1488 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1489 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1490 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1491 		if (i == argCount || i == argCount + envCount + 1) {
1492 			// check array null termination
1493 			if (flatArgs[i] != NULL) {
1494 				error = B_BAD_VALUE;
1495 				break;
1496 			}
1497 		} else {
1498 			// check string
1499 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1500 			size_t maxLen = stringEnd - arg;
1501 			if (arg < stringBase || arg >= stringEnd
1502 					|| strnlen(arg, maxLen) == maxLen) {
1503 				error = B_BAD_VALUE;
1504 				break;
1505 			}
1506 
1507 			flatArgs[i] = arg;
1508 		}
1509 	}
1510 
1511 	if (error == B_OK)
1512 		_flatArgs = flatArgs;
1513 	else
1514 		free(flatArgs);
1515 
1516 	return error;
1517 }
1518 
1519 
1520 static void
1521 free_team_arg(struct team_arg* teamArg)
1522 {
1523 	if (teamArg != NULL) {
1524 		free(teamArg->flat_args);
1525 		free(teamArg->path);
1526 		free(teamArg);
1527 	}
1528 }
1529 
1530 
1531 static status_t
1532 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1533 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1534 	port_id port, uint32 token)
1535 {
1536 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1537 	if (teamArg == NULL)
1538 		return B_NO_MEMORY;
1539 
1540 	teamArg->path = strdup(path);
1541 	if (teamArg->path == NULL) {
1542 		free(teamArg);
1543 		return B_NO_MEMORY;
1544 	}
1545 
1546 	// copy the args over
1547 	teamArg->flat_args = flatArgs;
1548 	teamArg->flat_args_size = flatArgsSize;
1549 	teamArg->arg_count = argCount;
1550 	teamArg->env_count = envCount;
1551 	teamArg->flags = 0;
1552 	teamArg->umask = umask;
1553 	teamArg->error_port = port;
1554 	teamArg->error_token = token;
1555 
1556 	// determine the flags from the environment
1557 	const char* const* env = flatArgs + argCount + 1;
1558 	for (int32 i = 0; i < envCount; i++) {
1559 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1560 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1561 			break;
1562 		}
1563 	}
1564 
1565 	*_teamArg = teamArg;
1566 	return B_OK;
1567 }
1568 
1569 
1570 static status_t
1571 team_create_thread_start_internal(void* args)
1572 {
1573 	status_t err;
1574 	Thread* thread;
1575 	Team* team;
1576 	struct team_arg* teamArgs = (struct team_arg*)args;
1577 	const char* path;
1578 	addr_t entry;
1579 	char** userArgs;
1580 	char** userEnv;
1581 	struct user_space_program_args* programArgs;
1582 	uint32 argCount, envCount;
1583 
1584 	thread = thread_get_current_thread();
1585 	team = thread->team;
1586 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1587 
1588 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1589 		thread->id));
1590 
1591 	// Main stack area layout is currently as follows (starting from 0):
1592 	//
1593 	// size								| usage
1594 	// ---------------------------------+--------------------------------
1595 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1596 	// TLS_SIZE							| TLS data
1597 	// sizeof(user_space_program_args)	| argument structure for the runtime
1598 	//									| loader
1599 	// flat arguments size				| flat process arguments and environment
1600 
1601 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1602 	// the heap
1603 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1604 
1605 	argCount = teamArgs->arg_count;
1606 	envCount = teamArgs->env_count;
1607 
1608 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1609 		+ thread->user_stack_size + TLS_SIZE);
1610 
1611 	userArgs = (char**)(programArgs + 1);
1612 	userEnv = userArgs + argCount + 1;
1613 	path = teamArgs->path;
1614 
1615 	if (user_strlcpy(programArgs->program_path, path,
1616 				sizeof(programArgs->program_path)) < B_OK
1617 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1618 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1619 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1620 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1621 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1622 				sizeof(port_id)) < B_OK
1623 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1624 				sizeof(uint32)) < B_OK
1625 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1626 		|| user_memcpy(&programArgs->disable_user_addons,
1627 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1628 		|| user_memcpy(userArgs, teamArgs->flat_args,
1629 				teamArgs->flat_args_size) < B_OK) {
1630 		// the team deletion process will clean this mess
1631 		free_team_arg(teamArgs);
1632 		return B_BAD_ADDRESS;
1633 	}
1634 
1635 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1636 
1637 	// set team args and update state
1638 	team->Lock();
1639 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1640 	team->state = TEAM_STATE_NORMAL;
1641 	team->Unlock();
1642 
1643 	free_team_arg(teamArgs);
1644 		// the arguments are already on the user stack, we no longer need
1645 		// them in this form
1646 
1647 	// Clone commpage area
1648 	area_id commPageArea = clone_commpage_area(team->id,
1649 		&team->commpage_address);
1650 	if (commPageArea  < B_OK) {
1651 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1652 			strerror(commPageArea)));
1653 		return commPageArea;
1654 	}
1655 
1656 	// Register commpage image
1657 	image_id commPageImage = get_commpage_image();
1658 	extended_image_info imageInfo;
1659 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1660 	if (err != B_OK) {
1661 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1662 			strerror(err)));
1663 		return err;
1664 	}
1665 	imageInfo.basic_info.text = team->commpage_address;
1666 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1667 	imageInfo.symbol_table = NULL;
1668 	imageInfo.symbol_hash = NULL;
1669 	imageInfo.string_table = NULL;
1670 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1671 	if (image < 0) {
1672 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1673 			strerror(image)));
1674 		return image;
1675 	}
1676 
1677 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1678 	// automatic variables with function scope will never be destroyed.
1679 	{
1680 		// find runtime_loader path
1681 		KPath runtimeLoaderPath;
1682 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1683 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1684 		if (err < B_OK) {
1685 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1686 				strerror(err)));
1687 			return err;
1688 		}
1689 		runtimeLoaderPath.UnlockBuffer();
1690 		err = runtimeLoaderPath.Append("runtime_loader");
1691 
1692 		if (err == B_OK) {
1693 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1694 				&entry);
1695 		}
1696 	}
1697 
1698 	if (err < B_OK) {
1699 		// Luckily, we don't have to clean up the mess we created - that's
1700 		// done for us by the normal team deletion process
1701 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1702 			"%s\n", strerror(err)));
1703 		return err;
1704 	}
1705 
1706 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1707 
1708 	// enter userspace -- returns only in case of error
1709 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1710 		programArgs, team->commpage_address);
1711 }
1712 
1713 
1714 static status_t
1715 team_create_thread_start(void* args)
1716 {
1717 	team_create_thread_start_internal(args);
1718 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1719 	thread_exit();
1720 		// does not return
1721 	return B_OK;
1722 }
1723 
1724 
1725 static thread_id
1726 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1727 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1728 	port_id errorPort, uint32 errorToken)
1729 {
1730 	char** flatArgs = _flatArgs;
1731 	thread_id thread;
1732 	status_t status;
1733 	struct team_arg* teamArgs;
1734 	struct team_loading_info loadingInfo;
1735 	ConditionVariableEntry loadingWaitEntry;
1736 	io_context* parentIOContext = NULL;
1737 	team_id teamID;
1738 	bool teamLimitReached = false;
1739 
1740 	if (flatArgs == NULL || argCount == 0)
1741 		return B_BAD_VALUE;
1742 
1743 	const char* path = flatArgs[0];
1744 
1745 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1746 		"\n", path, flatArgs, argCount));
1747 
1748 	// cut the path from the main thread name
1749 	const char* threadName = strrchr(path, '/');
1750 	if (threadName != NULL)
1751 		threadName++;
1752 	else
1753 		threadName = path;
1754 
1755 	// create the main thread object
1756 	Thread* mainThread;
1757 	status = Thread::Create(threadName, mainThread);
1758 	if (status != B_OK)
1759 		return status;
1760 	BReference<Thread> mainThreadReference(mainThread, true);
1761 
1762 	// create team object
1763 	Team* team = Team::Create(mainThread->id, path, false);
1764 	if (team == NULL)
1765 		return B_NO_MEMORY;
1766 	BReference<Team> teamReference(team, true);
1767 
1768 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1769 		loadingInfo.condition.Init(team, "image load");
1770 		loadingInfo.condition.Add(&loadingWaitEntry);
1771 		loadingInfo.result = B_ERROR;
1772 		team->loading_info = &loadingInfo;
1773 	}
1774 
1775 	// get the parent team
1776 	Team* parent = Team::Get(parentID);
1777 	if (parent == NULL)
1778 		return B_BAD_TEAM_ID;
1779 	BReference<Team> parentReference(parent, true);
1780 
1781 	parent->LockTeamAndProcessGroup();
1782 	team->Lock();
1783 
1784 	// inherit the parent's user/group
1785 	inherit_parent_user_and_group(team, parent);
1786 
1787 	// get a reference to the parent's I/O context -- we need it to create ours
1788 	parentIOContext = parent->io_context;
1789 	vfs_get_io_context(parentIOContext);
1790 
1791 	team->Unlock();
1792 	parent->UnlockTeamAndProcessGroup();
1793 
1794 	// check the executable's set-user/group-id permission
1795 	update_set_id_user_and_group(team, path);
1796 
1797 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1798 		envCount, (mode_t)-1, errorPort, errorToken);
1799 	if (status != B_OK)
1800 		goto err1;
1801 
1802 	_flatArgs = NULL;
1803 		// args are owned by the team_arg structure now
1804 
1805 	// create a new io_context for this team
1806 	team->io_context = vfs_new_io_context(parentIOContext, true);
1807 	if (!team->io_context) {
1808 		status = B_NO_MEMORY;
1809 		goto err2;
1810 	}
1811 
1812 	// We don't need the parent's I/O context any longer.
1813 	vfs_put_io_context(parentIOContext);
1814 	parentIOContext = NULL;
1815 
1816 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1817 	vfs_exec_io_context(team->io_context);
1818 
1819 	// create an address space for this team
1820 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1821 		&team->address_space);
1822 	if (status != B_OK)
1823 		goto err2;
1824 
1825 	team->address_space->SetRandomizingEnabled(
1826 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1827 
1828 	// create the user data area
1829 	status = create_team_user_data(team);
1830 	if (status != B_OK)
1831 		goto err4;
1832 
1833 	// insert the team into its parent and the teams hash
1834 	parent->LockTeamAndProcessGroup();
1835 	team->Lock();
1836 
1837 	{
1838 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1839 
1840 		sTeamHash.Insert(team);
1841 		teamLimitReached = sUsedTeams >= sMaxTeams;
1842 		if (!teamLimitReached)
1843 			sUsedTeams++;
1844 	}
1845 
1846 	insert_team_into_parent(parent, team);
1847 	insert_team_into_group(parent->group, team);
1848 
1849 	team->Unlock();
1850 	parent->UnlockTeamAndProcessGroup();
1851 
1852 	// notify team listeners
1853 	sNotificationService.Notify(TEAM_ADDED, team);
1854 
1855 	if (teamLimitReached) {
1856 		status = B_NO_MORE_TEAMS;
1857 		goto err6;
1858 	}
1859 
1860 	// In case we start the main thread, we shouldn't access the team object
1861 	// afterwards, so cache the team's ID.
1862 	teamID = team->id;
1863 
1864 	// Create a kernel thread, but under the context of the new team
1865 	// The new thread will take over ownership of teamArgs.
1866 	{
1867 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1868 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1869 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1870 			+ teamArgs->flat_args_size;
1871 		thread = thread_create_thread(threadAttributes, false);
1872 		if (thread < 0) {
1873 			status = thread;
1874 			goto err6;
1875 		}
1876 	}
1877 
1878 	// The team has been created successfully, so we keep the reference. Or
1879 	// more precisely: It's owned by the team's main thread, now.
1880 	teamReference.Detach();
1881 
1882 	// wait for the loader of the new team to finish its work
1883 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1884 		if (mainThread != NULL) {
1885 			// resume the team's main thread
1886 			thread_continue(mainThread);
1887 		}
1888 
1889 		// Now wait until loading is finished. We will be woken either by the
1890 		// thread, when it finished or aborted loading, or when the team is
1891 		// going to die (e.g. is killed). In either case the one notifying is
1892 		// responsible for unsetting `loading_info` in the team structure.
1893 		loadingWaitEntry.Wait();
1894 
1895 		if (loadingInfo.result < B_OK)
1896 			return loadingInfo.result;
1897 	}
1898 
1899 	// notify the debugger
1900 	user_debug_team_created(teamID);
1901 
1902 	return thread;
1903 
1904 err6:
1905 	// Remove the team structure from the process group, the parent team, and
1906 	// the team hash table and delete the team structure.
1907 	parent->LockTeamAndProcessGroup();
1908 	team->Lock();
1909 
1910 	remove_team_from_group(team);
1911 	remove_team_from_parent(team->parent, team);
1912 
1913 	team->Unlock();
1914 	parent->UnlockTeamAndProcessGroup();
1915 
1916 	{
1917 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1918 		sTeamHash.Remove(team);
1919 		if (!teamLimitReached)
1920 			sUsedTeams--;
1921 	}
1922 
1923 	sNotificationService.Notify(TEAM_REMOVED, team);
1924 
1925 	delete_team_user_data(team);
1926 err4:
1927 	team->address_space->Put();
1928 err2:
1929 	free_team_arg(teamArgs);
1930 err1:
1931 	if (parentIOContext != NULL)
1932 		vfs_put_io_context(parentIOContext);
1933 
1934 	return status;
1935 }
1936 
1937 
1938 /*!	Almost shuts down the current team and loads a new image into it.
1939 	If successful, this function does not return and will takeover ownership of
1940 	the arguments provided.
1941 	This function may only be called in a userland team (caused by one of the
1942 	exec*() syscalls).
1943 */
1944 static status_t
1945 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1946 	int32 argCount, int32 envCount, mode_t umask)
1947 {
1948 	// NOTE: Since this function normally doesn't return, don't use automatic
1949 	// variables that need destruction in the function scope.
1950 	char** flatArgs = _flatArgs;
1951 	Team* team = thread_get_current_thread()->team;
1952 	struct team_arg* teamArgs;
1953 	const char* threadName;
1954 	thread_id nubThreadID = -1;
1955 
1956 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1957 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1958 		team->id));
1959 
1960 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1961 
1962 	// switching the kernel at run time is probably not a good idea :)
1963 	if (team == team_get_kernel_team())
1964 		return B_NOT_ALLOWED;
1965 
1966 	// we currently need to be single threaded here
1967 	// TODO: maybe we should just kill all other threads and
1968 	//	make the current thread the team's main thread?
1969 	Thread* currentThread = thread_get_current_thread();
1970 	if (currentThread != team->main_thread)
1971 		return B_NOT_ALLOWED;
1972 
1973 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1974 	// We iterate through the thread list to make sure that there's no other
1975 	// thread.
1976 	TeamLocker teamLocker(team);
1977 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1978 
1979 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1980 		nubThreadID = team->debug_info.nub_thread;
1981 
1982 	debugInfoLocker.Unlock();
1983 
1984 	for (Thread* thread = team->thread_list; thread != NULL;
1985 			thread = thread->team_next) {
1986 		if (thread != team->main_thread && thread->id != nubThreadID)
1987 			return B_NOT_ALLOWED;
1988 	}
1989 
1990 	team->DeleteUserTimers(true);
1991 	team->ResetSignalsOnExec();
1992 
1993 	teamLocker.Unlock();
1994 
1995 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1996 		argCount, envCount, umask, -1, 0);
1997 	if (status != B_OK)
1998 		return status;
1999 
2000 	_flatArgs = NULL;
2001 		// args are owned by the team_arg structure now
2002 
2003 	// TODO: remove team resources if there are any left
2004 	// thread_atkernel_exit() might not be called at all
2005 
2006 	thread_reset_for_exec();
2007 
2008 	user_debug_prepare_for_exec();
2009 
2010 	delete_team_user_data(team);
2011 	vm_delete_areas(team->address_space, false);
2012 	xsi_sem_undo(team);
2013 	delete_owned_ports(team);
2014 	sem_delete_owned_sems(team);
2015 	remove_images(team);
2016 	vfs_exec_io_context(team->io_context);
2017 	delete_realtime_sem_context(team->realtime_sem_context);
2018 	team->realtime_sem_context = NULL;
2019 
2020 	// update ASLR
2021 	team->address_space->SetRandomizingEnabled(
2022 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2023 
2024 	status = create_team_user_data(team);
2025 	if (status != B_OK) {
2026 		// creating the user data failed -- we're toast
2027 		free_team_arg(teamArgs);
2028 		exit_thread(status);
2029 		return status;
2030 	}
2031 
2032 	user_debug_finish_after_exec();
2033 
2034 	// rename the team
2035 
2036 	team->Lock();
2037 	team->SetName(path);
2038 	team->Unlock();
2039 
2040 	// cut the path from the team name and rename the main thread, too
2041 	threadName = strrchr(path, '/');
2042 	if (threadName != NULL)
2043 		threadName++;
2044 	else
2045 		threadName = path;
2046 	rename_thread(thread_get_current_thread_id(), threadName);
2047 
2048 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2049 
2050 	// Update user/group according to the executable's set-user/group-id
2051 	// permission.
2052 	update_set_id_user_and_group(team, path);
2053 
2054 	user_debug_team_exec();
2055 
2056 	// notify team listeners
2057 	sNotificationService.Notify(TEAM_EXEC, team);
2058 
2059 	// get a user thread for the thread
2060 	user_thread* userThread = team_allocate_user_thread(team);
2061 		// cannot fail (the allocation for the team would have failed already)
2062 	ThreadLocker currentThreadLocker(currentThread);
2063 	currentThread->user_thread = userThread;
2064 	currentThreadLocker.Unlock();
2065 
2066 	// create the user stack for the thread
2067 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2068 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2069 	if (status == B_OK) {
2070 		// prepare the stack, load the runtime loader, and enter userspace
2071 		team_create_thread_start(teamArgs);
2072 			// does never return
2073 	} else
2074 		free_team_arg(teamArgs);
2075 
2076 	// Sorry, we have to kill ourselves, there is no way out anymore
2077 	// (without any areas left and all that).
2078 	exit_thread(status);
2079 
2080 	// We return a status here since the signal that is sent by the
2081 	// call above is not immediately handled.
2082 	return B_ERROR;
2083 }
2084 
2085 
2086 static thread_id
2087 fork_team(void)
2088 {
2089 	Thread* parentThread = thread_get_current_thread();
2090 	Team* parentTeam = parentThread->team;
2091 	Team* team;
2092 	arch_fork_arg* forkArgs;
2093 	struct area_info info;
2094 	thread_id threadID;
2095 	status_t status;
2096 	ssize_t areaCookie;
2097 	bool teamLimitReached = false;
2098 
2099 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2100 
2101 	if (parentTeam == team_get_kernel_team())
2102 		return B_NOT_ALLOWED;
2103 
2104 	// create a new team
2105 	// TODO: this is very similar to load_image_internal() - maybe we can do
2106 	// something about it :)
2107 
2108 	// create the main thread object
2109 	Thread* thread;
2110 	status = Thread::Create(parentThread->name, thread);
2111 	if (status != B_OK)
2112 		return status;
2113 	BReference<Thread> threadReference(thread, true);
2114 
2115 	// create the team object
2116 	team = Team::Create(thread->id, NULL, false);
2117 	if (team == NULL)
2118 		return B_NO_MEMORY;
2119 
2120 	parentTeam->LockTeamAndProcessGroup();
2121 	team->Lock();
2122 
2123 	team->SetName(parentTeam->Name());
2124 	team->SetArgs(parentTeam->Args());
2125 
2126 	team->commpage_address = parentTeam->commpage_address;
2127 
2128 	// Inherit the parent's user/group.
2129 	inherit_parent_user_and_group(team, parentTeam);
2130 
2131 	// inherit signal handlers
2132 	team->InheritSignalActions(parentTeam);
2133 
2134 	team->Unlock();
2135 	parentTeam->UnlockTeamAndProcessGroup();
2136 
2137 	// inherit some team debug flags
2138 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2139 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2140 
2141 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2142 	if (forkArgs == NULL) {
2143 		status = B_NO_MEMORY;
2144 		goto err1;
2145 	}
2146 
2147 	// create a new io_context for this team
2148 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2149 	if (!team->io_context) {
2150 		status = B_NO_MEMORY;
2151 		goto err2;
2152 	}
2153 
2154 	// duplicate the realtime sem context
2155 	if (parentTeam->realtime_sem_context) {
2156 		team->realtime_sem_context = clone_realtime_sem_context(
2157 			parentTeam->realtime_sem_context);
2158 		if (team->realtime_sem_context == NULL) {
2159 			status = B_NO_MEMORY;
2160 			goto err2;
2161 		}
2162 	}
2163 
2164 	// create an address space for this team
2165 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2166 		&team->address_space);
2167 	if (status < B_OK)
2168 		goto err3;
2169 
2170 	// copy all areas of the team
2171 	// TODO: should be able to handle stack areas differently (ie. don't have
2172 	// them copy-on-write)
2173 
2174 	areaCookie = 0;
2175 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2176 		if (info.area == parentTeam->user_data_area) {
2177 			// don't clone the user area; just create a new one
2178 			status = create_team_user_data(team, info.address);
2179 			if (status != B_OK)
2180 				break;
2181 
2182 			thread->user_thread = team_allocate_user_thread(team);
2183 		} else {
2184 			void* address;
2185 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2186 				&address, B_CLONE_ADDRESS, info.area);
2187 			if (area < B_OK) {
2188 				status = area;
2189 				break;
2190 			}
2191 
2192 			if (info.area == parentThread->user_stack_area)
2193 				thread->user_stack_area = area;
2194 		}
2195 	}
2196 
2197 	if (status < B_OK)
2198 		goto err4;
2199 
2200 	if (thread->user_thread == NULL) {
2201 #if KDEBUG
2202 		panic("user data area not found, parent area is %" B_PRId32,
2203 			parentTeam->user_data_area);
2204 #endif
2205 		status = B_ERROR;
2206 		goto err4;
2207 	}
2208 
2209 	thread->user_stack_base = parentThread->user_stack_base;
2210 	thread->user_stack_size = parentThread->user_stack_size;
2211 	thread->user_local_storage = parentThread->user_local_storage;
2212 	thread->sig_block_mask = parentThread->sig_block_mask;
2213 	thread->signal_stack_base = parentThread->signal_stack_base;
2214 	thread->signal_stack_size = parentThread->signal_stack_size;
2215 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2216 
2217 	arch_store_fork_frame(forkArgs);
2218 
2219 	// copy image list
2220 	if (copy_images(parentTeam->id, team) != B_OK)
2221 		goto err5;
2222 
2223 	// insert the team into its parent and the teams hash
2224 	parentTeam->LockTeamAndProcessGroup();
2225 	team->Lock();
2226 
2227 	{
2228 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2229 
2230 		sTeamHash.Insert(team);
2231 		teamLimitReached = sUsedTeams >= sMaxTeams;
2232 		if (!teamLimitReached)
2233 			sUsedTeams++;
2234 	}
2235 
2236 	insert_team_into_parent(parentTeam, team);
2237 	insert_team_into_group(parentTeam->group, team);
2238 
2239 	team->Unlock();
2240 	parentTeam->UnlockTeamAndProcessGroup();
2241 
2242 	// notify team listeners
2243 	sNotificationService.Notify(TEAM_ADDED, team);
2244 
2245 	if (teamLimitReached) {
2246 		status = B_NO_MORE_TEAMS;
2247 		goto err6;
2248 	}
2249 
2250 	// create the main thread
2251 	{
2252 		ThreadCreationAttributes threadCreationAttributes(NULL,
2253 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2254 		threadCreationAttributes.forkArgs = forkArgs;
2255 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2256 		threadID = thread_create_thread(threadCreationAttributes, false);
2257 		if (threadID < 0) {
2258 			status = threadID;
2259 			goto err6;
2260 		}
2261 	}
2262 
2263 	// notify the debugger
2264 	user_debug_team_created(team->id);
2265 
2266 	T(TeamForked(threadID));
2267 
2268 	resume_thread(threadID);
2269 	return threadID;
2270 
2271 err6:
2272 	// Remove the team structure from the process group, the parent team, and
2273 	// the team hash table and delete the team structure.
2274 	parentTeam->LockTeamAndProcessGroup();
2275 	team->Lock();
2276 
2277 	remove_team_from_group(team);
2278 	remove_team_from_parent(team->parent, team);
2279 
2280 	team->Unlock();
2281 	parentTeam->UnlockTeamAndProcessGroup();
2282 
2283 	{
2284 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2285 		sTeamHash.Remove(team);
2286 		if (!teamLimitReached)
2287 			sUsedTeams--;
2288 	}
2289 
2290 	sNotificationService.Notify(TEAM_REMOVED, team);
2291 err5:
2292 	remove_images(team);
2293 err4:
2294 	team->address_space->RemoveAndPut();
2295 err3:
2296 	delete_realtime_sem_context(team->realtime_sem_context);
2297 err2:
2298 	free(forkArgs);
2299 err1:
2300 	team->ReleaseReference();
2301 
2302 	return status;
2303 }
2304 
2305 
2306 /*!	Returns if the specified team \a parent has any children belonging to the
2307 	process group with the specified ID \a groupID.
2308 	The caller must hold \a parent's lock.
2309 */
2310 static bool
2311 has_children_in_group(Team* parent, pid_t groupID)
2312 {
2313 	for (Team* child = parent->children; child != NULL;
2314 			child = child->siblings_next) {
2315 		TeamLocker childLocker(child);
2316 		if (child->group_id == groupID)
2317 			return true;
2318 	}
2319 
2320 	return false;
2321 }
2322 
2323 
2324 /*!	Returns the first job control entry from \a children, which matches \a id.
2325 	\a id can be:
2326 	- \code > 0 \endcode: Matching an entry with that team ID.
2327 	- \code == -1 \endcode: Matching any entry.
2328 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2329 	\c 0 is an invalid value for \a id.
2330 
2331 	The caller must hold the lock of the team that \a children belongs to.
2332 
2333 	\param children The job control entry list to check.
2334 	\param id The match criterion.
2335 	\return The first matching entry or \c NULL, if none matches.
2336 */
2337 static job_control_entry*
2338 get_job_control_entry(team_job_control_children& children, pid_t id)
2339 {
2340 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2341 		 job_control_entry* entry = it.Next();) {
2342 
2343 		if (id > 0) {
2344 			if (entry->thread == id)
2345 				return entry;
2346 		} else if (id == -1) {
2347 			return entry;
2348 		} else {
2349 			pid_t processGroup
2350 				= (entry->team ? entry->team->group_id : entry->group_id);
2351 			if (processGroup == -id)
2352 				return entry;
2353 		}
2354 	}
2355 
2356 	return NULL;
2357 }
2358 
2359 
2360 /*!	Returns the first job control entry from one of team's dead, continued, or
2361 	stopped children which matches \a id.
2362 	\a id can be:
2363 	- \code > 0 \endcode: Matching an entry with that team ID.
2364 	- \code == -1 \endcode: Matching any entry.
2365 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2366 	\c 0 is an invalid value for \a id.
2367 
2368 	The caller must hold \a team's lock.
2369 
2370 	\param team The team whose dead, stopped, and continued child lists shall be
2371 		checked.
2372 	\param id The match criterion.
2373 	\param flags Specifies which children shall be considered. Dead children
2374 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2375 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2376 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2377 		\c WCONTINUED.
2378 	\return The first matching entry or \c NULL, if none matches.
2379 */
2380 static job_control_entry*
2381 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2382 {
2383 	job_control_entry* entry = NULL;
2384 
2385 	if ((flags & WEXITED) != 0)
2386 		entry = get_job_control_entry(team->dead_children, id);
2387 
2388 	if (entry == NULL && (flags & WCONTINUED) != 0)
2389 		entry = get_job_control_entry(team->continued_children, id);
2390 
2391 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2392 		entry = get_job_control_entry(team->stopped_children, id);
2393 
2394 	return entry;
2395 }
2396 
2397 
2398 job_control_entry::job_control_entry()
2399 	:
2400 	has_group_ref(false)
2401 {
2402 }
2403 
2404 
2405 job_control_entry::~job_control_entry()
2406 {
2407 	if (has_group_ref) {
2408 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2409 
2410 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2411 		if (group == NULL) {
2412 			panic("job_control_entry::~job_control_entry(): unknown group "
2413 				"ID: %" B_PRId32, group_id);
2414 			return;
2415 		}
2416 
2417 		groupHashLocker.Unlock();
2418 
2419 		group->ReleaseReference();
2420 	}
2421 }
2422 
2423 
2424 /*!	Invoked when the owning team is dying, initializing the entry according to
2425 	the dead state.
2426 
2427 	The caller must hold the owning team's lock and the scheduler lock.
2428 */
2429 void
2430 job_control_entry::InitDeadState()
2431 {
2432 	if (team != NULL) {
2433 		ASSERT(team->exit.initialized);
2434 
2435 		group_id = team->group_id;
2436 		team->group->AcquireReference();
2437 		has_group_ref = true;
2438 
2439 		thread = team->id;
2440 		status = team->exit.status;
2441 		reason = team->exit.reason;
2442 		signal = team->exit.signal;
2443 		signaling_user = team->exit.signaling_user;
2444 		user_time = team->dead_threads_user_time
2445 			+ team->dead_children.user_time;
2446 		kernel_time = team->dead_threads_kernel_time
2447 			+ team->dead_children.kernel_time;
2448 
2449 		team = NULL;
2450 	}
2451 }
2452 
2453 
2454 job_control_entry&
2455 job_control_entry::operator=(const job_control_entry& other)
2456 {
2457 	state = other.state;
2458 	thread = other.thread;
2459 	signal = other.signal;
2460 	has_group_ref = false;
2461 	signaling_user = other.signaling_user;
2462 	team = other.team;
2463 	group_id = other.group_id;
2464 	status = other.status;
2465 	reason = other.reason;
2466 	user_time = other.user_time;
2467 	kernel_time = other.kernel_time;
2468 
2469 	return *this;
2470 }
2471 
2472 
2473 /*! This is the kernel backend for waitid().
2474 */
2475 static thread_id
2476 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2477 	team_usage_info& _usage_info)
2478 {
2479 	Thread* thread = thread_get_current_thread();
2480 	Team* team = thread->team;
2481 	struct job_control_entry foundEntry;
2482 	struct job_control_entry* freeDeathEntry = NULL;
2483 	status_t status = B_OK;
2484 
2485 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2486 		child, flags));
2487 
2488 	T(WaitForChild(child, flags));
2489 
2490 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2491 		T(WaitForChildDone(B_BAD_VALUE));
2492 		return B_BAD_VALUE;
2493 	}
2494 
2495 	pid_t originalChild = child;
2496 
2497 	bool ignoreFoundEntries = false;
2498 	bool ignoreFoundEntriesChecked = false;
2499 
2500 	while (true) {
2501 		// lock the team
2502 		TeamLocker teamLocker(team);
2503 
2504 		// A 0 child argument means to wait for all children in the process
2505 		// group of the calling team.
2506 		child = originalChild == 0 ? -team->group_id : originalChild;
2507 
2508 		// check whether any condition holds
2509 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2510 
2511 		// If we don't have an entry yet, check whether there are any children
2512 		// complying to the process group specification at all.
2513 		if (entry == NULL) {
2514 			// No success yet -- check whether there are any children complying
2515 			// to the process group specification at all.
2516 			bool childrenExist = false;
2517 			if (child == -1) {
2518 				childrenExist = team->children != NULL;
2519 			} else if (child < -1) {
2520 				childrenExist = has_children_in_group(team, -child);
2521 			} else if (child != team->id) {
2522 				if (Team* childTeam = Team::Get(child)) {
2523 					BReference<Team> childTeamReference(childTeam, true);
2524 					TeamLocker childTeamLocker(childTeam);
2525 					childrenExist = childTeam->parent == team;
2526 				}
2527 			}
2528 
2529 			if (!childrenExist) {
2530 				// there is no child we could wait for
2531 				status = ECHILD;
2532 			} else {
2533 				// the children we're waiting for are still running
2534 				status = B_WOULD_BLOCK;
2535 			}
2536 		} else {
2537 			// got something
2538 			foundEntry = *entry;
2539 
2540 			// unless WNOWAIT has been specified, "consume" the wait state
2541 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2542 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2543 					// The child is dead. Reap its death entry.
2544 					freeDeathEntry = entry;
2545 					team->dead_children.entries.Remove(entry);
2546 					team->dead_children.count--;
2547 				} else {
2548 					// The child is well. Reset its job control state.
2549 					team_set_job_control_state(entry->team,
2550 						JOB_CONTROL_STATE_NONE, NULL);
2551 				}
2552 			}
2553 		}
2554 
2555 		// If we haven't got anything yet, prepare for waiting for the
2556 		// condition variable.
2557 		ConditionVariableEntry deadWaitEntry;
2558 
2559 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2560 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2561 
2562 		teamLocker.Unlock();
2563 
2564 		// we got our entry and can return to our caller
2565 		if (status == B_OK) {
2566 			if (ignoreFoundEntries) {
2567 				// ... unless we shall ignore found entries
2568 				delete freeDeathEntry;
2569 				freeDeathEntry = NULL;
2570 				continue;
2571 			}
2572 
2573 			break;
2574 		}
2575 
2576 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2577 			T(WaitForChildDone(status));
2578 			return status;
2579 		}
2580 
2581 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2582 		if (status == B_INTERRUPTED) {
2583 			T(WaitForChildDone(status));
2584 			return status;
2585 		}
2586 
2587 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2588 		// all our children are dead and fail with ECHILD. We check the
2589 		// condition at this point.
2590 		if (!ignoreFoundEntriesChecked) {
2591 			teamLocker.Lock();
2592 
2593 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2594 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2595 				|| handler.sa_handler == SIG_IGN) {
2596 				ignoreFoundEntries = true;
2597 			}
2598 
2599 			teamLocker.Unlock();
2600 
2601 			ignoreFoundEntriesChecked = true;
2602 		}
2603 	}
2604 
2605 	delete freeDeathEntry;
2606 
2607 	// When we got here, we have a valid death entry, and already got
2608 	// unregistered from the team or group. Fill in the returned info.
2609 	memset(&_info, 0, sizeof(_info));
2610 	_info.si_signo = SIGCHLD;
2611 	_info.si_pid = foundEntry.thread;
2612 	_info.si_uid = foundEntry.signaling_user;
2613 	// TODO: Fill in si_errno?
2614 
2615 	switch (foundEntry.state) {
2616 		case JOB_CONTROL_STATE_DEAD:
2617 			_info.si_code = foundEntry.reason;
2618 			_info.si_status = foundEntry.reason == CLD_EXITED
2619 				? foundEntry.status : foundEntry.signal;
2620 			_usage_info.user_time = foundEntry.user_time;
2621 			_usage_info.kernel_time = foundEntry.kernel_time;
2622 			break;
2623 		case JOB_CONTROL_STATE_STOPPED:
2624 			_info.si_code = CLD_STOPPED;
2625 			_info.si_status = foundEntry.signal;
2626 			break;
2627 		case JOB_CONTROL_STATE_CONTINUED:
2628 			_info.si_code = CLD_CONTINUED;
2629 			_info.si_status = 0;
2630 			break;
2631 		case JOB_CONTROL_STATE_NONE:
2632 			// can't happen
2633 			break;
2634 	}
2635 
2636 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2637 	// status is available.
2638 	TeamLocker teamLocker(team);
2639 	InterruptsSpinLocker signalLocker(team->signal_lock);
2640 	SpinLocker threadCreationLocker(gThreadCreationLock);
2641 
2642 	if (is_team_signal_blocked(team, SIGCHLD)) {
2643 		if (get_job_control_entry(team, child, flags) == NULL)
2644 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2645 	}
2646 
2647 	threadCreationLocker.Unlock();
2648 	signalLocker.Unlock();
2649 	teamLocker.Unlock();
2650 
2651 	// When the team is dead, the main thread continues to live in the kernel
2652 	// team for a very short time. To avoid surprises for the caller we rather
2653 	// wait until the thread is really gone.
2654 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2655 		wait_for_thread(foundEntry.thread, NULL);
2656 
2657 	T(WaitForChildDone(foundEntry));
2658 
2659 	return foundEntry.thread;
2660 }
2661 
2662 
2663 /*! Fills the team_info structure with information from the specified team.
2664 	Interrupts must be enabled. The team must not be locked.
2665 */
2666 static status_t
2667 fill_team_info(Team* team, team_info* info, size_t size)
2668 {
2669 	if (size != sizeof(team_info))
2670 		return B_BAD_VALUE;
2671 
2672 	// TODO: Set more informations for team_info
2673 	memset(info, 0, size);
2674 
2675 	info->team = team->id;
2676 		// immutable
2677 	info->image_count = count_images(team);
2678 		// protected by sImageMutex
2679 
2680 	TeamLocker teamLocker(team);
2681 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2682 
2683 	info->thread_count = team->num_threads;
2684 	//info->area_count =
2685 	info->debugger_nub_thread = team->debug_info.nub_thread;
2686 	info->debugger_nub_port = team->debug_info.nub_port;
2687 	info->uid = team->effective_uid;
2688 	info->gid = team->effective_gid;
2689 
2690 	strlcpy(info->args, team->Args(), sizeof(info->args));
2691 	info->argc = 1;
2692 
2693 	return B_OK;
2694 }
2695 
2696 
2697 /*!	Returns whether the process group contains stopped processes.
2698 	The caller must hold the process group's lock.
2699 */
2700 static bool
2701 process_group_has_stopped_processes(ProcessGroup* group)
2702 {
2703 	Team* team = group->teams;
2704 	while (team != NULL) {
2705 		// the parent team's lock guards the job control entry -- acquire it
2706 		team->LockTeamAndParent(false);
2707 
2708 		if (team->job_control_entry != NULL
2709 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2710 			team->UnlockTeamAndParent();
2711 			return true;
2712 		}
2713 
2714 		team->UnlockTeamAndParent();
2715 
2716 		team = team->group_next;
2717 	}
2718 
2719 	return false;
2720 }
2721 
2722 
2723 /*!	Iterates through all process groups queued in team_remove_team() and signals
2724 	those that are orphaned and have stopped processes.
2725 	The caller must not hold any team or process group locks.
2726 */
2727 static void
2728 orphaned_process_group_check()
2729 {
2730 	// process as long as there are groups in the list
2731 	while (true) {
2732 		// remove the head from the list
2733 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2734 
2735 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2736 		if (group == NULL)
2737 			return;
2738 
2739 		group->UnsetOrphanedCheck();
2740 		BReference<ProcessGroup> groupReference(group);
2741 
2742 		orphanedCheckLocker.Unlock();
2743 
2744 		AutoLocker<ProcessGroup> groupLocker(group);
2745 
2746 		// If the group is orphaned and contains stopped processes, we're
2747 		// supposed to send SIGHUP + SIGCONT.
2748 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2749 			Thread* currentThread = thread_get_current_thread();
2750 
2751 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2752 			send_signal_to_process_group_locked(group, signal, 0);
2753 
2754 			signal.SetNumber(SIGCONT);
2755 			send_signal_to_process_group_locked(group, signal, 0);
2756 		}
2757 	}
2758 }
2759 
2760 
2761 static status_t
2762 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2763 	uint32 flags)
2764 {
2765 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2766 		return B_BAD_VALUE;
2767 
2768 	// get the team
2769 	Team* team = Team::GetAndLock(id);
2770 	if (team == NULL)
2771 		return B_BAD_TEAM_ID;
2772 	BReference<Team> teamReference(team, true);
2773 	TeamLocker teamLocker(team, true);
2774 
2775 	if ((flags & B_CHECK_PERMISSION) != 0) {
2776 		uid_t uid = geteuid();
2777 		if (uid != 0 && uid != team->effective_uid)
2778 			return B_NOT_ALLOWED;
2779 	}
2780 
2781 	bigtime_t kernelTime = 0;
2782 	bigtime_t userTime = 0;
2783 
2784 	switch (who) {
2785 		case B_TEAM_USAGE_SELF:
2786 		{
2787 			Thread* thread = team->thread_list;
2788 
2789 			for (; thread != NULL; thread = thread->team_next) {
2790 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2791 				kernelTime += thread->kernel_time;
2792 				userTime += thread->user_time;
2793 			}
2794 
2795 			kernelTime += team->dead_threads_kernel_time;
2796 			userTime += team->dead_threads_user_time;
2797 			break;
2798 		}
2799 
2800 		case B_TEAM_USAGE_CHILDREN:
2801 		{
2802 			Team* child = team->children;
2803 			for (; child != NULL; child = child->siblings_next) {
2804 				TeamLocker childLocker(child);
2805 
2806 				Thread* thread = team->thread_list;
2807 
2808 				for (; thread != NULL; thread = thread->team_next) {
2809 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2810 					kernelTime += thread->kernel_time;
2811 					userTime += thread->user_time;
2812 				}
2813 
2814 				kernelTime += child->dead_threads_kernel_time;
2815 				userTime += child->dead_threads_user_time;
2816 			}
2817 
2818 			kernelTime += team->dead_children.kernel_time;
2819 			userTime += team->dead_children.user_time;
2820 			break;
2821 		}
2822 	}
2823 
2824 	info->kernel_time = kernelTime;
2825 	info->user_time = userTime;
2826 
2827 	return B_OK;
2828 }
2829 
2830 
2831 //	#pragma mark - Private kernel API
2832 
2833 
2834 status_t
2835 team_init(kernel_args* args)
2836 {
2837 	// create the team hash table
2838 	new(&sTeamHash) TeamTable;
2839 	if (sTeamHash.Init(64) != B_OK)
2840 		panic("Failed to init team hash table!");
2841 
2842 	new(&sGroupHash) ProcessGroupHashTable;
2843 	if (sGroupHash.Init() != B_OK)
2844 		panic("Failed to init process group hash table!");
2845 
2846 	// create initial session and process groups
2847 
2848 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2849 	if (session == NULL)
2850 		panic("Could not create initial session.\n");
2851 	BReference<ProcessSession> sessionReference(session, true);
2852 
2853 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2854 	if (group == NULL)
2855 		panic("Could not create initial process group.\n");
2856 	BReference<ProcessGroup> groupReference(group, true);
2857 
2858 	group->Publish(session);
2859 
2860 	// create the kernel team
2861 	sKernelTeam = Team::Create(1, "kernel_team", true);
2862 	if (sKernelTeam == NULL)
2863 		panic("could not create kernel team!\n");
2864 
2865 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2866 	sKernelTeam->SetArgs(sKernelTeam->Name());
2867 	sKernelTeam->state = TEAM_STATE_NORMAL;
2868 
2869 	sKernelTeam->saved_set_uid = 0;
2870 	sKernelTeam->real_uid = 0;
2871 	sKernelTeam->effective_uid = 0;
2872 	sKernelTeam->saved_set_gid = 0;
2873 	sKernelTeam->real_gid = 0;
2874 	sKernelTeam->effective_gid = 0;
2875 	sKernelTeam->supplementary_groups = NULL;
2876 
2877 	insert_team_into_group(group, sKernelTeam);
2878 
2879 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2880 	if (sKernelTeam->io_context == NULL)
2881 		panic("could not create io_context for kernel team!\n");
2882 
2883 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2884 		dprintf("Failed to resize FD table for kernel team!\n");
2885 
2886 	// stick it in the team hash
2887 	sTeamHash.Insert(sKernelTeam);
2888 
2889 	// check safe mode settings
2890 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2891 		false);
2892 
2893 	add_debugger_command_etc("team", &dump_team_info,
2894 		"Dump info about a particular team",
2895 		"[ <id> | <address> | <name> ]\n"
2896 		"Prints information about the specified team. If no argument is given\n"
2897 		"the current team is selected.\n"
2898 		"  <id>       - The ID of the team.\n"
2899 		"  <address>  - The address of the team structure.\n"
2900 		"  <name>     - The team's name.\n", 0);
2901 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2902 		"\n"
2903 		"Prints a list of all existing teams.\n", 0);
2904 
2905 	new(&sNotificationService) TeamNotificationService();
2906 
2907 	sNotificationService.Register();
2908 
2909 	return B_OK;
2910 }
2911 
2912 
2913 int32
2914 team_max_teams(void)
2915 {
2916 	return sMaxTeams;
2917 }
2918 
2919 
2920 int32
2921 team_used_teams(void)
2922 {
2923 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2924 	return sUsedTeams;
2925 }
2926 
2927 
2928 /*! Returns a death entry of a child team specified by ID (if any).
2929 	The caller must hold the team's lock.
2930 
2931 	\param team The team whose dead children list to check.
2932 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2933 	\param _deleteEntry Return variable, indicating whether the caller needs to
2934 		delete the returned entry.
2935 	\return The death entry of the matching team, or \c NULL, if no death entry
2936 		for the team was found.
2937 */
2938 job_control_entry*
2939 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2940 {
2941 	if (child <= 0)
2942 		return NULL;
2943 
2944 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2945 		child);
2946 	if (entry) {
2947 		// remove the entry only, if the caller is the parent of the found team
2948 		if (team_get_current_team_id() == entry->thread) {
2949 			team->dead_children.entries.Remove(entry);
2950 			team->dead_children.count--;
2951 			*_deleteEntry = true;
2952 		} else {
2953 			*_deleteEntry = false;
2954 		}
2955 	}
2956 
2957 	return entry;
2958 }
2959 
2960 
2961 /*! Quick check to see if we have a valid team ID. */
2962 bool
2963 team_is_valid(team_id id)
2964 {
2965 	if (id <= 0)
2966 		return false;
2967 
2968 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2969 	return team_get_team_struct_locked(id) != NULL;
2970 }
2971 
2972 
2973 Team*
2974 team_get_team_struct_locked(team_id id)
2975 {
2976 	return sTeamHash.Lookup(id);
2977 }
2978 
2979 
2980 void
2981 team_set_controlling_tty(int32 ttyIndex)
2982 {
2983 	// lock the team, so its session won't change while we're playing with it
2984 	Team* team = thread_get_current_thread()->team;
2985 	TeamLocker teamLocker(team);
2986 
2987 	// get and lock the session
2988 	ProcessSession* session = team->group->Session();
2989 	AutoLocker<ProcessSession> sessionLocker(session);
2990 
2991 	// set the session's fields
2992 	session->controlling_tty = ttyIndex;
2993 	session->foreground_group = -1;
2994 }
2995 
2996 
2997 int32
2998 team_get_controlling_tty()
2999 {
3000 	// lock the team, so its session won't change while we're playing with it
3001 	Team* team = thread_get_current_thread()->team;
3002 	TeamLocker teamLocker(team);
3003 
3004 	// get and lock the session
3005 	ProcessSession* session = team->group->Session();
3006 	AutoLocker<ProcessSession> sessionLocker(session);
3007 
3008 	// get the session's field
3009 	return session->controlling_tty;
3010 }
3011 
3012 
3013 status_t
3014 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3015 {
3016 	// lock the team, so its session won't change while we're playing with it
3017 	Thread* thread = thread_get_current_thread();
3018 	Team* team = thread->team;
3019 	TeamLocker teamLocker(team);
3020 
3021 	// get and lock the session
3022 	ProcessSession* session = team->group->Session();
3023 	AutoLocker<ProcessSession> sessionLocker(session);
3024 
3025 	// check given TTY -- must be the controlling tty of the calling process
3026 	if (session->controlling_tty != ttyIndex)
3027 		return ENOTTY;
3028 
3029 	// check given process group -- must belong to our session
3030 	{
3031 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3032 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3033 		if (group == NULL || group->Session() != session)
3034 			return B_BAD_VALUE;
3035 	}
3036 
3037 	// If we are a background group, we can do that unharmed only when we
3038 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3039 	if (session->foreground_group != -1
3040 		&& session->foreground_group != team->group_id
3041 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3042 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3043 		InterruptsSpinLocker signalLocker(team->signal_lock);
3044 
3045 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3046 			pid_t groupID = team->group_id;
3047 
3048 			signalLocker.Unlock();
3049 			sessionLocker.Unlock();
3050 			teamLocker.Unlock();
3051 
3052 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3053 			send_signal_to_process_group(groupID, signal, 0);
3054 			return B_INTERRUPTED;
3055 		}
3056 	}
3057 
3058 	session->foreground_group = processGroupID;
3059 
3060 	return B_OK;
3061 }
3062 
3063 
3064 uid_t
3065 team_geteuid(team_id id)
3066 {
3067 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3068 	Team* team = team_get_team_struct_locked(id);
3069 	if (team == NULL)
3070 		return (uid_t)-1;
3071 	return team->effective_uid;
3072 }
3073 
3074 
3075 /*!	Removes the specified team from the global team hash, from its process
3076 	group, and from its parent.
3077 	It also moves all of its children to the kernel team.
3078 
3079 	The caller must hold the following locks:
3080 	- \a team's process group's lock,
3081 	- the kernel team's lock,
3082 	- \a team's parent team's lock (might be the kernel team), and
3083 	- \a team's lock.
3084 */
3085 void
3086 team_remove_team(Team* team, pid_t& _signalGroup)
3087 {
3088 	Team* parent = team->parent;
3089 
3090 	// remember how long this team lasted
3091 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3092 		+ team->dead_children.kernel_time;
3093 	parent->dead_children.user_time += team->dead_threads_user_time
3094 		+ team->dead_children.user_time;
3095 
3096 	// remove the team from the hash table
3097 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3098 	sTeamHash.Remove(team);
3099 	sUsedTeams--;
3100 	teamsLocker.Unlock();
3101 
3102 	// The team can no longer be accessed by ID. Navigation to it is still
3103 	// possible from its process group and its parent and children, but that
3104 	// will be rectified shortly.
3105 	team->state = TEAM_STATE_DEATH;
3106 
3107 	// If we're a controlling process (i.e. a session leader with controlling
3108 	// terminal), there's a bit of signalling we have to do. We can't do any of
3109 	// the signaling here due to the bunch of locks we're holding, but we need
3110 	// to determine, whom to signal.
3111 	_signalGroup = -1;
3112 	bool isSessionLeader = false;
3113 	if (team->session_id == team->id
3114 		&& team->group->Session()->controlling_tty >= 0) {
3115 		isSessionLeader = true;
3116 
3117 		ProcessSession* session = team->group->Session();
3118 
3119 		AutoLocker<ProcessSession> sessionLocker(session);
3120 
3121 		session->controlling_tty = -1;
3122 		_signalGroup = session->foreground_group;
3123 	}
3124 
3125 	// remove us from our process group
3126 	remove_team_from_group(team);
3127 
3128 	// move the team's children to the kernel team
3129 	while (Team* child = team->children) {
3130 		// remove the child from the current team and add it to the kernel team
3131 		TeamLocker childLocker(child);
3132 
3133 		remove_team_from_parent(team, child);
3134 		insert_team_into_parent(sKernelTeam, child);
3135 
3136 		// move job control entries too
3137 		sKernelTeam->stopped_children.entries.MoveFrom(
3138 			&team->stopped_children.entries);
3139 		sKernelTeam->continued_children.entries.MoveFrom(
3140 			&team->continued_children.entries);
3141 
3142 		// If the team was a session leader with controlling terminal,
3143 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3144 		// groups with stopped processes. Due to locking complications we can't
3145 		// do that here, so we only check whether we were a reason for the
3146 		// child's process group not being an orphan and, if so, schedule a
3147 		// later check (cf. orphaned_process_group_check()).
3148 		if (isSessionLeader) {
3149 			ProcessGroup* childGroup = child->group;
3150 			if (childGroup->Session()->id == team->session_id
3151 				&& childGroup->id != team->group_id) {
3152 				childGroup->ScheduleOrphanedCheck();
3153 			}
3154 		}
3155 
3156 		// Note, we don't move the dead children entries. Those will be deleted
3157 		// when the team structure is deleted.
3158 	}
3159 
3160 	// remove us from our parent
3161 	remove_team_from_parent(parent, team);
3162 }
3163 
3164 
3165 /*!	Kills all threads but the main thread of the team and shuts down user
3166 	debugging for it.
3167 	To be called on exit of the team's main thread. No locks must be held.
3168 
3169 	\param team The team in question.
3170 	\return The port of the debugger for the team, -1 if none. To be passed to
3171 		team_delete_team().
3172 */
3173 port_id
3174 team_shutdown_team(Team* team)
3175 {
3176 	ASSERT(thread_get_current_thread() == team->main_thread);
3177 
3178 	TeamLocker teamLocker(team);
3179 
3180 	// Make sure debugging changes won't happen anymore.
3181 	port_id debuggerPort = -1;
3182 	while (true) {
3183 		// If a debugger change is in progress for the team, we'll have to
3184 		// wait until it is done.
3185 		ConditionVariableEntry waitForDebuggerEntry;
3186 		bool waitForDebugger = false;
3187 
3188 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3189 
3190 		if (team->debug_info.debugger_changed_condition != NULL) {
3191 			team->debug_info.debugger_changed_condition->Add(
3192 				&waitForDebuggerEntry);
3193 			waitForDebugger = true;
3194 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3195 			// The team is being debugged. That will stop with the termination
3196 			// of the nub thread. Since we set the team state to death, no one
3197 			// can install a debugger anymore. We fetch the debugger's port to
3198 			// send it a message at the bitter end.
3199 			debuggerPort = team->debug_info.debugger_port;
3200 		}
3201 
3202 		debugInfoLocker.Unlock();
3203 
3204 		if (!waitForDebugger)
3205 			break;
3206 
3207 		// wait for the debugger change to be finished
3208 		teamLocker.Unlock();
3209 
3210 		waitForDebuggerEntry.Wait();
3211 
3212 		teamLocker.Lock();
3213 	}
3214 
3215 	// Mark the team as shutting down. That will prevent new threads from being
3216 	// created and debugger changes from taking place.
3217 	team->state = TEAM_STATE_SHUTDOWN;
3218 
3219 	// delete all timers
3220 	team->DeleteUserTimers(false);
3221 
3222 	// deactivate CPU time user timers for the team
3223 	InterruptsSpinLocker timeLocker(team->time_lock);
3224 
3225 	if (team->HasActiveCPUTimeUserTimers())
3226 		team->DeactivateCPUTimeUserTimers();
3227 
3228 	timeLocker.Unlock();
3229 
3230 	// kill all threads but the main thread
3231 	team_death_entry deathEntry;
3232 	deathEntry.condition.Init(team, "team death");
3233 
3234 	while (true) {
3235 		team->death_entry = &deathEntry;
3236 		deathEntry.remaining_threads = 0;
3237 
3238 		Thread* thread = team->thread_list;
3239 		while (thread != NULL) {
3240 			if (thread != team->main_thread) {
3241 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3242 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3243 				deathEntry.remaining_threads++;
3244 			}
3245 
3246 			thread = thread->team_next;
3247 		}
3248 
3249 		if (deathEntry.remaining_threads == 0)
3250 			break;
3251 
3252 		// there are threads to wait for
3253 		ConditionVariableEntry entry;
3254 		deathEntry.condition.Add(&entry);
3255 
3256 		teamLocker.Unlock();
3257 
3258 		entry.Wait();
3259 
3260 		teamLocker.Lock();
3261 	}
3262 
3263 	team->death_entry = NULL;
3264 
3265 	return debuggerPort;
3266 }
3267 
3268 
3269 /*!	Called on team exit to notify threads waiting on the team and free most
3270 	resources associated with it.
3271 	The caller shouldn't hold any locks.
3272 */
3273 void
3274 team_delete_team(Team* team, port_id debuggerPort)
3275 {
3276 	// Not quite in our job description, but work that has been left by
3277 	// team_remove_team() and that can be done now that we're not holding any
3278 	// locks.
3279 	orphaned_process_group_check();
3280 
3281 	team_id teamID = team->id;
3282 
3283 	ASSERT(team->num_threads == 0);
3284 
3285 	// If someone is waiting for this team to be loaded, but it dies
3286 	// unexpectedly before being done, we need to notify the waiting
3287 	// thread now.
3288 
3289 	TeamLocker teamLocker(team);
3290 
3291 	if (team->loading_info) {
3292 		// there's indeed someone waiting
3293 		struct team_loading_info* loadingInfo = team->loading_info;
3294 		team->loading_info = NULL;
3295 
3296 		loadingInfo->result = B_ERROR;
3297 
3298 		// wake up the waiting thread
3299 		loadingInfo->condition.NotifyAll();
3300 	}
3301 
3302 	// notify team watchers
3303 
3304 	{
3305 		// we're not reachable from anyone anymore at this point, so we
3306 		// can safely access the list without any locking
3307 		struct team_watcher* watcher;
3308 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3309 				&team->watcher_list)) != NULL) {
3310 			watcher->hook(teamID, watcher->data);
3311 			free(watcher);
3312 		}
3313 	}
3314 
3315 	teamLocker.Unlock();
3316 
3317 	sNotificationService.Notify(TEAM_REMOVED, team);
3318 
3319 	// free team resources
3320 
3321 	delete_realtime_sem_context(team->realtime_sem_context);
3322 	xsi_sem_undo(team);
3323 	remove_images(team);
3324 	team->address_space->RemoveAndPut();
3325 
3326 	team->ReleaseReference();
3327 
3328 	// notify the debugger, that the team is gone
3329 	user_debug_team_deleted(teamID, debuggerPort);
3330 }
3331 
3332 
3333 Team*
3334 team_get_kernel_team(void)
3335 {
3336 	return sKernelTeam;
3337 }
3338 
3339 
3340 team_id
3341 team_get_kernel_team_id(void)
3342 {
3343 	if (!sKernelTeam)
3344 		return 0;
3345 
3346 	return sKernelTeam->id;
3347 }
3348 
3349 
3350 team_id
3351 team_get_current_team_id(void)
3352 {
3353 	return thread_get_current_thread()->team->id;
3354 }
3355 
3356 
3357 status_t
3358 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3359 {
3360 	if (id == sKernelTeam->id) {
3361 		// we're the kernel team, so we don't have to go through all
3362 		// the hassle (locking and hash lookup)
3363 		*_addressSpace = VMAddressSpace::GetKernel();
3364 		return B_OK;
3365 	}
3366 
3367 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3368 
3369 	Team* team = team_get_team_struct_locked(id);
3370 	if (team == NULL)
3371 		return B_BAD_VALUE;
3372 
3373 	team->address_space->Get();
3374 	*_addressSpace = team->address_space;
3375 	return B_OK;
3376 }
3377 
3378 
3379 /*!	Sets the team's job control state.
3380 	The caller must hold the parent team's lock. Interrupts are allowed to be
3381 	enabled or disabled.
3382 	\a team The team whose job control state shall be set.
3383 	\a newState The new state to be set.
3384 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3385 		the caller is responsible for filling in the following fields of the
3386 		entry before releasing the parent team's lock, unless the new state is
3387 		\c JOB_CONTROL_STATE_NONE:
3388 		- \c signal: The number of the signal causing the state change.
3389 		- \c signaling_user: The real UID of the user sending the signal.
3390 */
3391 void
3392 team_set_job_control_state(Team* team, job_control_state newState,
3393 	Signal* signal)
3394 {
3395 	if (team == NULL || team->job_control_entry == NULL)
3396 		return;
3397 
3398 	// don't touch anything, if the state stays the same or the team is already
3399 	// dead
3400 	job_control_entry* entry = team->job_control_entry;
3401 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3402 		return;
3403 
3404 	T(SetJobControlState(team->id, newState, signal));
3405 
3406 	// remove from the old list
3407 	switch (entry->state) {
3408 		case JOB_CONTROL_STATE_NONE:
3409 			// entry is in no list ATM
3410 			break;
3411 		case JOB_CONTROL_STATE_DEAD:
3412 			// can't get here
3413 			break;
3414 		case JOB_CONTROL_STATE_STOPPED:
3415 			team->parent->stopped_children.entries.Remove(entry);
3416 			break;
3417 		case JOB_CONTROL_STATE_CONTINUED:
3418 			team->parent->continued_children.entries.Remove(entry);
3419 			break;
3420 	}
3421 
3422 	entry->state = newState;
3423 
3424 	if (signal != NULL) {
3425 		entry->signal = signal->Number();
3426 		entry->signaling_user = signal->SendingUser();
3427 	}
3428 
3429 	// add to new list
3430 	team_job_control_children* childList = NULL;
3431 	switch (entry->state) {
3432 		case JOB_CONTROL_STATE_NONE:
3433 			// entry doesn't get into any list
3434 			break;
3435 		case JOB_CONTROL_STATE_DEAD:
3436 			childList = &team->parent->dead_children;
3437 			team->parent->dead_children.count++;
3438 			break;
3439 		case JOB_CONTROL_STATE_STOPPED:
3440 			childList = &team->parent->stopped_children;
3441 			break;
3442 		case JOB_CONTROL_STATE_CONTINUED:
3443 			childList = &team->parent->continued_children;
3444 			break;
3445 	}
3446 
3447 	if (childList != NULL) {
3448 		childList->entries.Add(entry);
3449 		team->parent->dead_children.condition_variable.NotifyAll();
3450 	}
3451 }
3452 
3453 
3454 /*!	Inits the given team's exit information, if not yet initialized, to some
3455 	generic "killed" status.
3456 	The caller must not hold the team's lock. Interrupts must be enabled.
3457 
3458 	\param team The team whose exit info shall be initialized.
3459 */
3460 void
3461 team_init_exit_info_on_error(Team* team)
3462 {
3463 	TeamLocker teamLocker(team);
3464 
3465 	if (!team->exit.initialized) {
3466 		team->exit.reason = CLD_KILLED;
3467 		team->exit.signal = SIGKILL;
3468 		team->exit.signaling_user = geteuid();
3469 		team->exit.status = 0;
3470 		team->exit.initialized = true;
3471 	}
3472 }
3473 
3474 
3475 /*! Adds a hook to the team that is called as soon as this team goes away.
3476 	This call might get public in the future.
3477 */
3478 status_t
3479 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3480 {
3481 	if (hook == NULL || teamID < B_OK)
3482 		return B_BAD_VALUE;
3483 
3484 	// create the watcher object
3485 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3486 	if (watcher == NULL)
3487 		return B_NO_MEMORY;
3488 
3489 	watcher->hook = hook;
3490 	watcher->data = data;
3491 
3492 	// add watcher, if the team isn't already dying
3493 	// get the team
3494 	Team* team = Team::GetAndLock(teamID);
3495 	if (team == NULL) {
3496 		free(watcher);
3497 		return B_BAD_TEAM_ID;
3498 	}
3499 
3500 	list_add_item(&team->watcher_list, watcher);
3501 
3502 	team->UnlockAndReleaseReference();
3503 
3504 	return B_OK;
3505 }
3506 
3507 
3508 status_t
3509 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3510 {
3511 	if (hook == NULL || teamID < 0)
3512 		return B_BAD_VALUE;
3513 
3514 	// get team and remove watcher (if present)
3515 	Team* team = Team::GetAndLock(teamID);
3516 	if (team == NULL)
3517 		return B_BAD_TEAM_ID;
3518 
3519 	// search for watcher
3520 	team_watcher* watcher = NULL;
3521 	while ((watcher = (team_watcher*)list_get_next_item(
3522 			&team->watcher_list, watcher)) != NULL) {
3523 		if (watcher->hook == hook && watcher->data == data) {
3524 			// got it!
3525 			list_remove_item(&team->watcher_list, watcher);
3526 			break;
3527 		}
3528 	}
3529 
3530 	team->UnlockAndReleaseReference();
3531 
3532 	if (watcher == NULL)
3533 		return B_ENTRY_NOT_FOUND;
3534 
3535 	free(watcher);
3536 	return B_OK;
3537 }
3538 
3539 
3540 /*!	Allocates a user_thread structure from the team.
3541 	The team lock must be held, unless the function is called for the team's
3542 	main thread. Interrupts must be enabled.
3543 */
3544 struct user_thread*
3545 team_allocate_user_thread(Team* team)
3546 {
3547 	if (team->user_data == 0)
3548 		return NULL;
3549 
3550 	// take an entry from the free list, if any
3551 	if (struct free_user_thread* entry = team->free_user_threads) {
3552 		user_thread* thread = entry->thread;
3553 		team->free_user_threads = entry->next;
3554 		free(entry);
3555 		return thread;
3556 	}
3557 
3558 	while (true) {
3559 		// enough space left?
3560 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3561 		if (team->user_data_size - team->used_user_data < needed) {
3562 			// try to resize the area
3563 			if (resize_area(team->user_data_area,
3564 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3565 				return NULL;
3566 			}
3567 
3568 			// resized user area successfully -- try to allocate the user_thread
3569 			// again
3570 			team->user_data_size += B_PAGE_SIZE;
3571 			continue;
3572 		}
3573 
3574 		// allocate the user_thread
3575 		user_thread* thread
3576 			= (user_thread*)(team->user_data + team->used_user_data);
3577 		team->used_user_data += needed;
3578 
3579 		return thread;
3580 	}
3581 }
3582 
3583 
3584 /*!	Frees the given user_thread structure.
3585 	The team's lock must not be held. Interrupts must be enabled.
3586 	\param team The team the user thread was allocated from.
3587 	\param userThread The user thread to free.
3588 */
3589 void
3590 team_free_user_thread(Team* team, struct user_thread* userThread)
3591 {
3592 	if (userThread == NULL)
3593 		return;
3594 
3595 	// create a free list entry
3596 	free_user_thread* entry
3597 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3598 	if (entry == NULL) {
3599 		// we have to leak the user thread :-/
3600 		return;
3601 	}
3602 
3603 	// add to free list
3604 	TeamLocker teamLocker(team);
3605 
3606 	entry->thread = userThread;
3607 	entry->next = team->free_user_threads;
3608 	team->free_user_threads = entry;
3609 }
3610 
3611 
3612 //	#pragma mark - Associated data interface
3613 
3614 
3615 AssociatedData::AssociatedData()
3616 	:
3617 	fOwner(NULL)
3618 {
3619 }
3620 
3621 
3622 AssociatedData::~AssociatedData()
3623 {
3624 }
3625 
3626 
3627 void
3628 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3629 {
3630 }
3631 
3632 
3633 AssociatedDataOwner::AssociatedDataOwner()
3634 {
3635 	mutex_init(&fLock, "associated data owner");
3636 }
3637 
3638 
3639 AssociatedDataOwner::~AssociatedDataOwner()
3640 {
3641 	mutex_destroy(&fLock);
3642 }
3643 
3644 
3645 bool
3646 AssociatedDataOwner::AddData(AssociatedData* data)
3647 {
3648 	MutexLocker locker(fLock);
3649 
3650 	if (data->Owner() != NULL)
3651 		return false;
3652 
3653 	data->AcquireReference();
3654 	fList.Add(data);
3655 	data->SetOwner(this);
3656 
3657 	return true;
3658 }
3659 
3660 
3661 bool
3662 AssociatedDataOwner::RemoveData(AssociatedData* data)
3663 {
3664 	MutexLocker locker(fLock);
3665 
3666 	if (data->Owner() != this)
3667 		return false;
3668 
3669 	data->SetOwner(NULL);
3670 	fList.Remove(data);
3671 
3672 	locker.Unlock();
3673 
3674 	data->ReleaseReference();
3675 
3676 	return true;
3677 }
3678 
3679 
3680 void
3681 AssociatedDataOwner::PrepareForDeletion()
3682 {
3683 	MutexLocker locker(fLock);
3684 
3685 	// move all data to a temporary list and unset the owner
3686 	DataList list;
3687 	list.MoveFrom(&fList);
3688 
3689 	for (DataList::Iterator it = list.GetIterator();
3690 		AssociatedData* data = it.Next();) {
3691 		data->SetOwner(NULL);
3692 	}
3693 
3694 	locker.Unlock();
3695 
3696 	// call the notification hooks and release our references
3697 	while (AssociatedData* data = list.RemoveHead()) {
3698 		data->OwnerDeleted(this);
3699 		data->ReleaseReference();
3700 	}
3701 }
3702 
3703 
3704 /*!	Associates data with the current team.
3705 	When the team is deleted, the data object is notified.
3706 	The team acquires a reference to the object.
3707 
3708 	\param data The data object.
3709 	\return \c true on success, \c false otherwise. Fails only when the supplied
3710 		data object is already associated with another owner.
3711 */
3712 bool
3713 team_associate_data(AssociatedData* data)
3714 {
3715 	return thread_get_current_thread()->team->AddData(data);
3716 }
3717 
3718 
3719 /*!	Dissociates data from the current team.
3720 	Balances an earlier call to team_associate_data().
3721 
3722 	\param data The data object.
3723 	\return \c true on success, \c false otherwise. Fails only when the data
3724 		object is not associated with the current team.
3725 */
3726 bool
3727 team_dissociate_data(AssociatedData* data)
3728 {
3729 	return thread_get_current_thread()->team->RemoveData(data);
3730 }
3731 
3732 
3733 //	#pragma mark - Public kernel API
3734 
3735 
3736 thread_id
3737 load_image(int32 argCount, const char** args, const char** env)
3738 {
3739 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3740 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3741 }
3742 
3743 
3744 thread_id
3745 load_image_etc(int32 argCount, const char* const* args,
3746 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3747 {
3748 	// we need to flatten the args and environment
3749 
3750 	if (args == NULL)
3751 		return B_BAD_VALUE;
3752 
3753 	// determine total needed size
3754 	int32 argSize = 0;
3755 	for (int32 i = 0; i < argCount; i++)
3756 		argSize += strlen(args[i]) + 1;
3757 
3758 	int32 envCount = 0;
3759 	int32 envSize = 0;
3760 	while (env != NULL && env[envCount] != NULL)
3761 		envSize += strlen(env[envCount++]) + 1;
3762 
3763 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3764 	if (size > MAX_PROCESS_ARGS_SIZE)
3765 		return B_TOO_MANY_ARGS;
3766 
3767 	// allocate space
3768 	char** flatArgs = (char**)malloc(size);
3769 	if (flatArgs == NULL)
3770 		return B_NO_MEMORY;
3771 
3772 	char** slot = flatArgs;
3773 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3774 
3775 	// copy arguments and environment
3776 	for (int32 i = 0; i < argCount; i++) {
3777 		int32 argSize = strlen(args[i]) + 1;
3778 		memcpy(stringSpace, args[i], argSize);
3779 		*slot++ = stringSpace;
3780 		stringSpace += argSize;
3781 	}
3782 
3783 	*slot++ = NULL;
3784 
3785 	for (int32 i = 0; i < envCount; i++) {
3786 		int32 envSize = strlen(env[i]) + 1;
3787 		memcpy(stringSpace, env[i], envSize);
3788 		*slot++ = stringSpace;
3789 		stringSpace += envSize;
3790 	}
3791 
3792 	*slot++ = NULL;
3793 
3794 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3795 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3796 
3797 	free(flatArgs);
3798 		// load_image_internal() unset our variable if it took over ownership
3799 
3800 	return thread;
3801 }
3802 
3803 
3804 status_t
3805 wait_for_team(team_id id, status_t* _returnCode)
3806 {
3807 	// check whether the team exists
3808 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3809 
3810 	Team* team = team_get_team_struct_locked(id);
3811 	if (team == NULL)
3812 		return B_BAD_TEAM_ID;
3813 
3814 	id = team->id;
3815 
3816 	teamsLocker.Unlock();
3817 
3818 	// wait for the main thread (it has the same ID as the team)
3819 	return wait_for_thread(id, _returnCode);
3820 }
3821 
3822 
3823 status_t
3824 kill_team(team_id id)
3825 {
3826 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3827 
3828 	Team* team = team_get_team_struct_locked(id);
3829 	if (team == NULL)
3830 		return B_BAD_TEAM_ID;
3831 
3832 	id = team->id;
3833 
3834 	teamsLocker.Unlock();
3835 
3836 	if (team == sKernelTeam)
3837 		return B_NOT_ALLOWED;
3838 
3839 	// Just kill the team's main thread (it has same ID as the team). The
3840 	// cleanup code there will take care of the team.
3841 	return kill_thread(id);
3842 }
3843 
3844 
3845 status_t
3846 _get_team_info(team_id id, team_info* info, size_t size)
3847 {
3848 	// get the team
3849 	Team* team = Team::Get(id);
3850 	if (team == NULL)
3851 		return B_BAD_TEAM_ID;
3852 	BReference<Team> teamReference(team, true);
3853 
3854 	// fill in the info
3855 	return fill_team_info(team, info, size);
3856 }
3857 
3858 
3859 status_t
3860 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3861 {
3862 	int32 slot = *cookie;
3863 	if (slot < 1)
3864 		slot = 1;
3865 
3866 	InterruptsReadSpinLocker locker(sTeamHashLock);
3867 
3868 	team_id lastTeamID = peek_next_thread_id();
3869 		// TODO: This is broken, since the id can wrap around!
3870 
3871 	// get next valid team
3872 	Team* team = NULL;
3873 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3874 		slot++;
3875 
3876 	if (team == NULL)
3877 		return B_BAD_TEAM_ID;
3878 
3879 	// get a reference to the team and unlock
3880 	BReference<Team> teamReference(team);
3881 	locker.Unlock();
3882 
3883 	// fill in the info
3884 	*cookie = ++slot;
3885 	return fill_team_info(team, info, size);
3886 }
3887 
3888 
3889 status_t
3890 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3891 {
3892 	if (size != sizeof(team_usage_info))
3893 		return B_BAD_VALUE;
3894 
3895 	return common_get_team_usage_info(id, who, info, 0);
3896 }
3897 
3898 
3899 pid_t
3900 getpid(void)
3901 {
3902 	return thread_get_current_thread()->team->id;
3903 }
3904 
3905 
3906 pid_t
3907 getppid()
3908 {
3909 	return _getppid(0);
3910 }
3911 
3912 
3913 pid_t
3914 getpgid(pid_t id)
3915 {
3916 	if (id < 0) {
3917 		errno = EINVAL;
3918 		return -1;
3919 	}
3920 
3921 	if (id == 0) {
3922 		// get process group of the calling process
3923 		Team* team = thread_get_current_thread()->team;
3924 		TeamLocker teamLocker(team);
3925 		return team->group_id;
3926 	}
3927 
3928 	// get the team
3929 	Team* team = Team::GetAndLock(id);
3930 	if (team == NULL) {
3931 		errno = ESRCH;
3932 		return -1;
3933 	}
3934 
3935 	// get the team's process group ID
3936 	pid_t groupID = team->group_id;
3937 
3938 	team->UnlockAndReleaseReference();
3939 
3940 	return groupID;
3941 }
3942 
3943 
3944 pid_t
3945 getsid(pid_t id)
3946 {
3947 	if (id < 0) {
3948 		errno = EINVAL;
3949 		return -1;
3950 	}
3951 
3952 	if (id == 0) {
3953 		// get session of the calling process
3954 		Team* team = thread_get_current_thread()->team;
3955 		TeamLocker teamLocker(team);
3956 		return team->session_id;
3957 	}
3958 
3959 	// get the team
3960 	Team* team = Team::GetAndLock(id);
3961 	if (team == NULL) {
3962 		errno = ESRCH;
3963 		return -1;
3964 	}
3965 
3966 	// get the team's session ID
3967 	pid_t sessionID = team->session_id;
3968 
3969 	team->UnlockAndReleaseReference();
3970 
3971 	return sessionID;
3972 }
3973 
3974 
3975 //	#pragma mark - User syscalls
3976 
3977 
3978 status_t
3979 _user_exec(const char* userPath, const char* const* userFlatArgs,
3980 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3981 {
3982 	// NOTE: Since this function normally doesn't return, don't use automatic
3983 	// variables that need destruction in the function scope.
3984 	char path[B_PATH_NAME_LENGTH];
3985 
3986 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3987 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3988 		return B_BAD_ADDRESS;
3989 
3990 	// copy and relocate the flat arguments
3991 	char** flatArgs;
3992 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3993 		argCount, envCount, flatArgs);
3994 
3995 	if (error == B_OK) {
3996 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3997 			envCount, umask);
3998 			// this one only returns in case of error
3999 	}
4000 
4001 	free(flatArgs);
4002 	return error;
4003 }
4004 
4005 
4006 thread_id
4007 _user_fork(void)
4008 {
4009 	return fork_team();
4010 }
4011 
4012 
4013 pid_t
4014 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4015 	team_usage_info* usageInfo)
4016 {
4017 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4018 		return B_BAD_ADDRESS;
4019 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4020 		return B_BAD_ADDRESS;
4021 
4022 	siginfo_t info;
4023 	team_usage_info usage_info;
4024 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4025 	if (foundChild < 0)
4026 		return syscall_restart_handle_post(foundChild);
4027 
4028 	// copy info back to userland
4029 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4030 		return B_BAD_ADDRESS;
4031 	// copy usage_info back to userland
4032 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4033 		sizeof(usage_info)) != B_OK) {
4034 		return B_BAD_ADDRESS;
4035 	}
4036 
4037 	return foundChild;
4038 }
4039 
4040 
4041 pid_t
4042 _user_process_info(pid_t process, int32 which)
4043 {
4044 	pid_t result;
4045 	switch (which) {
4046 		case SESSION_ID:
4047 			result = getsid(process);
4048 			break;
4049 		case GROUP_ID:
4050 			result = getpgid(process);
4051 			break;
4052 		case PARENT_ID:
4053 			result = _getppid(process);
4054 			break;
4055 		default:
4056 			return B_BAD_VALUE;
4057 	}
4058 
4059 	return result >= 0 ? result : errno;
4060 }
4061 
4062 
4063 pid_t
4064 _user_setpgid(pid_t processID, pid_t groupID)
4065 {
4066 	// setpgid() can be called either by the parent of the target process or
4067 	// by the process itself to do one of two things:
4068 	// * Create a new process group with the target process' ID and the target
4069 	//   process as group leader.
4070 	// * Set the target process' process group to an already existing one in the
4071 	//   same session.
4072 
4073 	if (groupID < 0)
4074 		return B_BAD_VALUE;
4075 
4076 	Team* currentTeam = thread_get_current_thread()->team;
4077 	if (processID == 0)
4078 		processID = currentTeam->id;
4079 
4080 	// if the group ID is not specified, use the target process' ID
4081 	if (groupID == 0)
4082 		groupID = processID;
4083 
4084 	// We loop when running into the following race condition: We create a new
4085 	// process group, because there isn't one with that ID yet, but later when
4086 	// trying to publish it, we find that someone else created and published
4087 	// a group with that ID in the meantime. In that case we just restart the
4088 	// whole action.
4089 	while (true) {
4090 		// Look up the process group by ID. If it doesn't exist yet and we are
4091 		// allowed to create a new one, do that.
4092 		ProcessGroup* group = ProcessGroup::Get(groupID);
4093 		bool newGroup = false;
4094 		if (group == NULL) {
4095 			if (groupID != processID)
4096 				return B_NOT_ALLOWED;
4097 
4098 			group = new(std::nothrow) ProcessGroup(groupID);
4099 			if (group == NULL)
4100 				return B_NO_MEMORY;
4101 
4102 			newGroup = true;
4103 		}
4104 		BReference<ProcessGroup> groupReference(group, true);
4105 
4106 		// get the target team
4107 		Team* team = Team::Get(processID);
4108 		if (team == NULL)
4109 			return ESRCH;
4110 		BReference<Team> teamReference(team, true);
4111 
4112 		// lock the new process group and the team's current process group
4113 		while (true) {
4114 			// lock the team's current process group
4115 			team->LockProcessGroup();
4116 
4117 			ProcessGroup* oldGroup = team->group;
4118 			if (oldGroup == NULL) {
4119 				// This can only happen if the team is exiting.
4120 				ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4121 				return ESRCH;
4122 			}
4123 
4124 			if (oldGroup == group) {
4125 				// it's the same as the target group, so just bail out
4126 				oldGroup->Unlock();
4127 				return group->id;
4128 			}
4129 
4130 			oldGroup->AcquireReference();
4131 
4132 			// lock the target process group, if locking order allows it
4133 			if (newGroup || group->id > oldGroup->id) {
4134 				group->Lock();
4135 				break;
4136 			}
4137 
4138 			// try to lock
4139 			if (group->TryLock())
4140 				break;
4141 
4142 			// no dice -- unlock the team's current process group and relock in
4143 			// the correct order
4144 			oldGroup->Unlock();
4145 
4146 			group->Lock();
4147 			oldGroup->Lock();
4148 
4149 			// check whether things are still the same
4150 			TeamLocker teamLocker(team);
4151 			if (team->group == oldGroup)
4152 				break;
4153 
4154 			// something changed -- unlock everything and retry
4155 			teamLocker.Unlock();
4156 			oldGroup->Unlock();
4157 			group->Unlock();
4158 			oldGroup->ReleaseReference();
4159 		}
4160 
4161 		// we now have references and locks of both new and old process group
4162 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4163 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4164 		AutoLocker<ProcessGroup> groupLocker(group, true);
4165 
4166 		// also lock the target team and its parent
4167 		team->LockTeamAndParent(false);
4168 		TeamLocker parentLocker(team->parent, true);
4169 		TeamLocker teamLocker(team, true);
4170 
4171 		// perform the checks
4172 		if (team == currentTeam) {
4173 			// we set our own group
4174 
4175 			// we must not change our process group ID if we're a session leader
4176 			if (is_session_leader(currentTeam))
4177 				return B_NOT_ALLOWED;
4178 		} else {
4179 			// Calling team != target team. The target team must be a child of
4180 			// the calling team and in the same session. (If that's the case it
4181 			// isn't a session leader either.)
4182 			if (team->parent != currentTeam
4183 				|| team->session_id != currentTeam->session_id) {
4184 				return B_NOT_ALLOWED;
4185 			}
4186 
4187 			// The call is also supposed to fail on a child, when the child has
4188 			// already executed exec*() [EACCES].
4189 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4190 				return EACCES;
4191 		}
4192 
4193 		// If we created a new process group, publish it now.
4194 		if (newGroup) {
4195 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4196 			if (sGroupHash.Lookup(groupID)) {
4197 				// A group with the group ID appeared since we first checked.
4198 				// Back to square one.
4199 				continue;
4200 			}
4201 
4202 			group->PublishLocked(team->group->Session());
4203 		} else if (group->Session()->id != team->session_id) {
4204 			// The existing target process group belongs to a different session.
4205 			// That's not allowed.
4206 			return B_NOT_ALLOWED;
4207 		}
4208 
4209 		// Everything is ready -- set the group.
4210 		remove_team_from_group(team);
4211 		insert_team_into_group(group, team);
4212 
4213 		// Changing the process group might have changed the situation for a
4214 		// parent waiting in wait_for_child(). Hence we notify it.
4215 		team->parent->dead_children.condition_variable.NotifyAll();
4216 
4217 		return group->id;
4218 	}
4219 }
4220 
4221 
4222 pid_t
4223 _user_setsid(void)
4224 {
4225 	Team* team = thread_get_current_thread()->team;
4226 
4227 	// create a new process group and session
4228 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4229 	if (group == NULL)
4230 		return B_NO_MEMORY;
4231 	BReference<ProcessGroup> groupReference(group, true);
4232 	AutoLocker<ProcessGroup> groupLocker(group);
4233 
4234 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4235 	if (session == NULL)
4236 		return B_NO_MEMORY;
4237 	BReference<ProcessSession> sessionReference(session, true);
4238 
4239 	// lock the team's current process group, parent, and the team itself
4240 	team->LockTeamParentAndProcessGroup();
4241 	BReference<ProcessGroup> oldGroupReference(team->group);
4242 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4243 	TeamLocker parentLocker(team->parent, true);
4244 	TeamLocker teamLocker(team, true);
4245 
4246 	// the team must not already be a process group leader
4247 	if (is_process_group_leader(team))
4248 		return B_NOT_ALLOWED;
4249 
4250 	// remove the team from the old and add it to the new process group
4251 	remove_team_from_group(team);
4252 	group->Publish(session);
4253 	insert_team_into_group(group, team);
4254 
4255 	// Changing the process group might have changed the situation for a
4256 	// parent waiting in wait_for_child(). Hence we notify it.
4257 	team->parent->dead_children.condition_variable.NotifyAll();
4258 
4259 	return group->id;
4260 }
4261 
4262 
4263 status_t
4264 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4265 {
4266 	status_t returnCode;
4267 	status_t status;
4268 
4269 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4270 		return B_BAD_ADDRESS;
4271 
4272 	status = wait_for_team(id, &returnCode);
4273 	if (status >= B_OK && _userReturnCode != NULL) {
4274 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4275 				!= B_OK)
4276 			return B_BAD_ADDRESS;
4277 		return B_OK;
4278 	}
4279 
4280 	return syscall_restart_handle_post(status);
4281 }
4282 
4283 
4284 thread_id
4285 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4286 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4287 	port_id errorPort, uint32 errorToken)
4288 {
4289 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4290 
4291 	if (argCount < 1)
4292 		return B_BAD_VALUE;
4293 
4294 	// copy and relocate the flat arguments
4295 	char** flatArgs;
4296 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4297 		argCount, envCount, flatArgs);
4298 	if (error != B_OK)
4299 		return error;
4300 
4301 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4302 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4303 		errorToken);
4304 
4305 	free(flatArgs);
4306 		// load_image_internal() unset our variable if it took over ownership
4307 
4308 	return thread;
4309 }
4310 
4311 
4312 void
4313 _user_exit_team(status_t returnValue)
4314 {
4315 	Thread* thread = thread_get_current_thread();
4316 	Team* team = thread->team;
4317 
4318 	// set this thread's exit status
4319 	thread->exit.status = returnValue;
4320 
4321 	// set the team exit status
4322 	TeamLocker teamLocker(team);
4323 
4324 	if (!team->exit.initialized) {
4325 		team->exit.reason = CLD_EXITED;
4326 		team->exit.signal = 0;
4327 		team->exit.signaling_user = 0;
4328 		team->exit.status = returnValue;
4329 		team->exit.initialized = true;
4330 	}
4331 
4332 	teamLocker.Unlock();
4333 
4334 	// Stop the thread, if the team is being debugged and that has been
4335 	// requested.
4336 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4337 		user_debug_stop_thread();
4338 
4339 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4340 	// userland. The signal handling code forwards the signal to the main
4341 	// thread (if that's not already this one), which will take the team down.
4342 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4343 	send_signal_to_thread(thread, signal, 0);
4344 }
4345 
4346 
4347 status_t
4348 _user_kill_team(team_id team)
4349 {
4350 	return kill_team(team);
4351 }
4352 
4353 
4354 status_t
4355 _user_get_team_info(team_id id, team_info* userInfo)
4356 {
4357 	status_t status;
4358 	team_info info;
4359 
4360 	if (!IS_USER_ADDRESS(userInfo))
4361 		return B_BAD_ADDRESS;
4362 
4363 	status = _get_team_info(id, &info, sizeof(team_info));
4364 	if (status == B_OK) {
4365 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4366 			return B_BAD_ADDRESS;
4367 	}
4368 
4369 	return status;
4370 }
4371 
4372 
4373 status_t
4374 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4375 {
4376 	status_t status;
4377 	team_info info;
4378 	int32 cookie;
4379 
4380 	if (!IS_USER_ADDRESS(userCookie)
4381 		|| !IS_USER_ADDRESS(userInfo)
4382 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4383 		return B_BAD_ADDRESS;
4384 
4385 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4386 	if (status != B_OK)
4387 		return status;
4388 
4389 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4390 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4391 		return B_BAD_ADDRESS;
4392 
4393 	return status;
4394 }
4395 
4396 
4397 team_id
4398 _user_get_current_team(void)
4399 {
4400 	return team_get_current_team_id();
4401 }
4402 
4403 
4404 status_t
4405 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4406 	size_t size)
4407 {
4408 	if (size != sizeof(team_usage_info))
4409 		return B_BAD_VALUE;
4410 
4411 	team_usage_info info;
4412 	status_t status = common_get_team_usage_info(team, who, &info,
4413 		B_CHECK_PERMISSION);
4414 
4415 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4416 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4417 		return B_BAD_ADDRESS;
4418 	}
4419 
4420 	return status;
4421 }
4422 
4423 
4424 status_t
4425 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4426 	size_t size, size_t* _sizeNeeded)
4427 {
4428 	// check parameters
4429 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4430 		|| (buffer == NULL && size > 0)
4431 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4432 		return B_BAD_ADDRESS;
4433 	}
4434 
4435 	KMessage info;
4436 
4437 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4438 		// allocate memory for a copy of the needed team data
4439 		struct ExtendedTeamData {
4440 			team_id	id;
4441 			pid_t	group_id;
4442 			pid_t	session_id;
4443 			uid_t	real_uid;
4444 			gid_t	real_gid;
4445 			uid_t	effective_uid;
4446 			gid_t	effective_gid;
4447 			char	name[B_OS_NAME_LENGTH];
4448 		} teamClone;
4449 
4450 		io_context* ioContext;
4451 		{
4452 			// get the team structure
4453 			Team* team = Team::GetAndLock(teamID);
4454 			if (team == NULL)
4455 				return B_BAD_TEAM_ID;
4456 			BReference<Team> teamReference(team, true);
4457 			TeamLocker teamLocker(team, true);
4458 
4459 			// copy the data
4460 			teamClone.id = team->id;
4461 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4462 			teamClone.group_id = team->group_id;
4463 			teamClone.session_id = team->session_id;
4464 			teamClone.real_uid = team->real_uid;
4465 			teamClone.real_gid = team->real_gid;
4466 			teamClone.effective_uid = team->effective_uid;
4467 			teamClone.effective_gid = team->effective_gid;
4468 
4469 			// also fetch a reference to the I/O context
4470 			ioContext = team->io_context;
4471 			vfs_get_io_context(ioContext);
4472 		}
4473 		CObjectDeleter<io_context, void, vfs_put_io_context>
4474 			ioContextPutter(ioContext);
4475 
4476 		// add the basic data to the info message
4477 		if (info.AddInt32("id", teamClone.id) != B_OK
4478 			|| info.AddString("name", teamClone.name) != B_OK
4479 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4480 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4481 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4482 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4483 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4484 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4485 			return B_NO_MEMORY;
4486 		}
4487 
4488 		// get the current working directory from the I/O context
4489 		dev_t cwdDevice;
4490 		ino_t cwdDirectory;
4491 		{
4492 			MutexLocker ioContextLocker(ioContext->io_mutex);
4493 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4494 		}
4495 
4496 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4497 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4498 			return B_NO_MEMORY;
4499 		}
4500 	}
4501 
4502 	// TODO: Support the other flags!
4503 
4504 	// copy the needed size and, if it fits, the message back to userland
4505 	size_t sizeNeeded = info.ContentSize();
4506 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4507 		return B_BAD_ADDRESS;
4508 
4509 	if (sizeNeeded > size)
4510 		return B_BUFFER_OVERFLOW;
4511 
4512 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4513 		return B_BAD_ADDRESS;
4514 
4515 	return B_OK;
4516 }
4517