xref: /haiku/src/system/kernel/team.cpp (revision 9e25244c5e9051f6cd333820d6332397361abd6c)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 #include <util/ThreadAutoLock.h>
62 
63 #include "TeamThreadTables.h"
64 
65 
66 //#define TRACE_TEAM
67 #ifdef TRACE_TEAM
68 #	define TRACE(x) dprintf x
69 #else
70 #	define TRACE(x) ;
71 #endif
72 
73 
74 struct team_key {
75 	team_id id;
76 };
77 
78 struct team_arg {
79 	char	*path;
80 	char	**flat_args;
81 	size_t	flat_args_size;
82 	uint32	arg_count;
83 	uint32	env_count;
84 	mode_t	umask;
85 	uint32	flags;
86 	port_id	error_port;
87 	uint32	error_token;
88 };
89 
90 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
91 
92 
93 namespace {
94 
95 
96 class TeamNotificationService : public DefaultNotificationService {
97 public:
98 							TeamNotificationService();
99 
100 			void			Notify(uint32 eventCode, Team* team);
101 };
102 
103 
104 // #pragma mark - TeamTable
105 
106 
107 typedef BKernel::TeamThreadTable<Team> TeamTable;
108 
109 
110 // #pragma mark - ProcessGroupHashDefinition
111 
112 
113 struct ProcessGroupHashDefinition {
114 	typedef pid_t			KeyType;
115 	typedef	ProcessGroup	ValueType;
116 
117 	size_t HashKey(pid_t key) const
118 	{
119 		return key;
120 	}
121 
122 	size_t Hash(ProcessGroup* value) const
123 	{
124 		return HashKey(value->id);
125 	}
126 
127 	bool Compare(pid_t key, ProcessGroup* value) const
128 	{
129 		return value->id == key;
130 	}
131 
132 	ProcessGroup*& GetLink(ProcessGroup* value) const
133 	{
134 		return value->next;
135 	}
136 };
137 
138 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
139 
140 
141 }	// unnamed namespace
142 
143 
144 // #pragma mark -
145 
146 
147 // the team_id -> Team hash table and the lock protecting it
148 static TeamTable sTeamHash;
149 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
150 
151 // the pid_t -> ProcessGroup hash table and the lock protecting it
152 static ProcessGroupHashTable sGroupHash;
153 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
154 
155 static Team* sKernelTeam = NULL;
156 static bool sDisableUserAddOns = false;
157 
158 // A list of process groups of children of dying session leaders that need to
159 // be signalled, if they have become orphaned and contain stopped processes.
160 static ProcessGroupList sOrphanedCheckProcessGroups;
161 static mutex sOrphanedCheckLock
162 	= MUTEX_INITIALIZER("orphaned process group check");
163 
164 // some arbitrarily chosen limits -- should probably depend on the available
165 // memory (the limit is not yet enforced)
166 static int32 sMaxTeams = 2048;
167 static int32 sUsedTeams = 1;
168 
169 static TeamNotificationService sNotificationService;
170 
171 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
172 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
173 
174 
175 // #pragma mark - TeamListIterator
176 
177 
178 TeamListIterator::TeamListIterator()
179 {
180 	// queue the entry
181 	InterruptsWriteSpinLocker locker(sTeamHashLock);
182 	sTeamHash.InsertIteratorEntry(&fEntry);
183 }
184 
185 
186 TeamListIterator::~TeamListIterator()
187 {
188 	// remove the entry
189 	InterruptsWriteSpinLocker locker(sTeamHashLock);
190 	sTeamHash.RemoveIteratorEntry(&fEntry);
191 }
192 
193 
194 Team*
195 TeamListIterator::Next()
196 {
197 	// get the next team -- if there is one, get reference for it
198 	InterruptsWriteSpinLocker locker(sTeamHashLock);
199 	Team* team = sTeamHash.NextElement(&fEntry);
200 	if (team != NULL)
201 		team->AcquireReference();
202 
203 	return team;
204 }
205 
206 
207 // #pragma mark - Tracing
208 
209 
210 #if TEAM_TRACING
211 namespace TeamTracing {
212 
213 class TeamForked : public AbstractTraceEntry {
214 public:
215 	TeamForked(thread_id forkedThread)
216 		:
217 		fForkedThread(forkedThread)
218 	{
219 		Initialized();
220 	}
221 
222 	virtual void AddDump(TraceOutput& out)
223 	{
224 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
225 	}
226 
227 private:
228 	thread_id			fForkedThread;
229 };
230 
231 
232 class ExecTeam : public AbstractTraceEntry {
233 public:
234 	ExecTeam(const char* path, int32 argCount, const char* const* args,
235 			int32 envCount, const char* const* env)
236 		:
237 		fArgCount(argCount),
238 		fArgs(NULL)
239 	{
240 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
241 			false);
242 
243 		// determine the buffer size we need for the args
244 		size_t argBufferSize = 0;
245 		for (int32 i = 0; i < argCount; i++)
246 			argBufferSize += strlen(args[i]) + 1;
247 
248 		// allocate a buffer
249 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
250 		if (fArgs) {
251 			char* buffer = fArgs;
252 			for (int32 i = 0; i < argCount; i++) {
253 				size_t argSize = strlen(args[i]) + 1;
254 				memcpy(buffer, args[i], argSize);
255 				buffer += argSize;
256 			}
257 		}
258 
259 		// ignore env for the time being
260 		(void)envCount;
261 		(void)env;
262 
263 		Initialized();
264 	}
265 
266 	virtual void AddDump(TraceOutput& out)
267 	{
268 		out.Print("team exec, \"%p\", args:", fPath);
269 
270 		if (fArgs != NULL) {
271 			char* args = fArgs;
272 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
273 				out.Print(" \"%s\"", args);
274 				args += strlen(args) + 1;
275 			}
276 		} else
277 			out.Print(" <too long>");
278 	}
279 
280 private:
281 	char*	fPath;
282 	int32	fArgCount;
283 	char*	fArgs;
284 };
285 
286 
287 static const char*
288 job_control_state_name(job_control_state state)
289 {
290 	switch (state) {
291 		case JOB_CONTROL_STATE_NONE:
292 			return "none";
293 		case JOB_CONTROL_STATE_STOPPED:
294 			return "stopped";
295 		case JOB_CONTROL_STATE_CONTINUED:
296 			return "continued";
297 		case JOB_CONTROL_STATE_DEAD:
298 			return "dead";
299 		default:
300 			return "invalid";
301 	}
302 }
303 
304 
305 class SetJobControlState : public AbstractTraceEntry {
306 public:
307 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
308 		:
309 		fTeam(team),
310 		fNewState(newState),
311 		fSignal(signal != NULL ? signal->Number() : 0)
312 	{
313 		Initialized();
314 	}
315 
316 	virtual void AddDump(TraceOutput& out)
317 	{
318 		out.Print("team set job control state, team %" B_PRId32 ", "
319 			"new state: %s, signal: %d",
320 			fTeam, job_control_state_name(fNewState), fSignal);
321 	}
322 
323 private:
324 	team_id				fTeam;
325 	job_control_state	fNewState;
326 	int					fSignal;
327 };
328 
329 
330 class WaitForChild : public AbstractTraceEntry {
331 public:
332 	WaitForChild(pid_t child, uint32 flags)
333 		:
334 		fChild(child),
335 		fFlags(flags)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("team wait for child, child: %" B_PRId32 ", "
343 			"flags: %#" B_PRIx32, fChild, fFlags);
344 	}
345 
346 private:
347 	pid_t	fChild;
348 	uint32	fFlags;
349 };
350 
351 
352 class WaitForChildDone : public AbstractTraceEntry {
353 public:
354 	WaitForChildDone(const job_control_entry& entry)
355 		:
356 		fState(entry.state),
357 		fTeam(entry.thread),
358 		fStatus(entry.status),
359 		fReason(entry.reason),
360 		fSignal(entry.signal)
361 	{
362 		Initialized();
363 	}
364 
365 	WaitForChildDone(status_t error)
366 		:
367 		fTeam(error)
368 	{
369 		Initialized();
370 	}
371 
372 	virtual void AddDump(TraceOutput& out)
373 	{
374 		if (fTeam >= 0) {
375 			out.Print("team wait for child done, team: %" B_PRId32 ", "
376 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
377 				fTeam, job_control_state_name(fState), fStatus, fReason,
378 				fSignal);
379 		} else {
380 			out.Print("team wait for child failed, error: "
381 				"%#" B_PRIx32 ", ", fTeam);
382 		}
383 	}
384 
385 private:
386 	job_control_state	fState;
387 	team_id				fTeam;
388 	status_t			fStatus;
389 	uint16				fReason;
390 	uint16				fSignal;
391 };
392 
393 }	// namespace TeamTracing
394 
395 #	define T(x) new(std::nothrow) TeamTracing::x;
396 #else
397 #	define T(x) ;
398 #endif
399 
400 
401 //	#pragma mark - TeamNotificationService
402 
403 
404 TeamNotificationService::TeamNotificationService()
405 	: DefaultNotificationService("teams")
406 {
407 }
408 
409 
410 void
411 TeamNotificationService::Notify(uint32 eventCode, Team* team)
412 {
413 	char eventBuffer[128];
414 	KMessage event;
415 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
416 	event.AddInt32("event", eventCode);
417 	event.AddInt32("team", team->id);
418 	event.AddPointer("teamStruct", team);
419 
420 	DefaultNotificationService::Notify(event, eventCode);
421 }
422 
423 
424 //	#pragma mark - Team
425 
426 
427 Team::Team(team_id id, bool kernel)
428 {
429 	// allocate an ID
430 	this->id = id;
431 	visible = true;
432 
433 	hash_next = siblings_next = parent = children = group_next = NULL;
434 	serial_number = -1;
435 
436 	group_id = session_id = -1;
437 	group = NULL;
438 
439 	num_threads = 0;
440 	state = TEAM_STATE_BIRTH;
441 	flags = 0;
442 	io_context = NULL;
443 	realtime_sem_context = NULL;
444 	xsi_sem_context = NULL;
445 	death_entry = NULL;
446 	list_init(&dead_threads);
447 
448 	dead_children.condition_variable.Init(&dead_children, "team children");
449 	dead_children.count = 0;
450 	dead_children.kernel_time = 0;
451 	dead_children.user_time = 0;
452 
453 	job_control_entry = new(nothrow) ::job_control_entry;
454 	if (job_control_entry != NULL) {
455 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
456 		job_control_entry->thread = id;
457 		job_control_entry->team = this;
458 	}
459 
460 	address_space = NULL;
461 	main_thread = NULL;
462 	thread_list = NULL;
463 	loading_info = NULL;
464 
465 	list_init(&image_list);
466 	list_init(&watcher_list);
467 	list_init(&sem_list);
468 	list_init_etc(&port_list, port_team_link_offset());
469 
470 	user_data = 0;
471 	user_data_area = -1;
472 	used_user_data = 0;
473 	user_data_size = 0;
474 	free_user_threads = NULL;
475 
476 	commpage_address = NULL;
477 
478 	clear_team_debug_info(&debug_info, true);
479 
480 	dead_threads_kernel_time = 0;
481 	dead_threads_user_time = 0;
482 	cpu_clock_offset = 0;
483 	B_INITIALIZE_SPINLOCK(&time_lock);
484 
485 	saved_set_uid = real_uid = effective_uid = -1;
486 	saved_set_gid = real_gid = effective_gid = -1;
487 
488 	// exit status -- setting initialized to false suffices
489 	exit.initialized = false;
490 
491 	B_INITIALIZE_SPINLOCK(&signal_lock);
492 
493 	// init mutex
494 	if (kernel) {
495 		mutex_init(&fLock, "Team:kernel");
496 	} else {
497 		char lockName[16];
498 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
499 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
500 	}
501 
502 	fName[0] = '\0';
503 	fArgs[0] = '\0';
504 
505 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
506 		kernel ? -1 : MAX_QUEUED_SIGNALS);
507 	memset(fSignalActions, 0, sizeof(fSignalActions));
508 	fUserDefinedTimerCount = 0;
509 
510 	fCoreDumpCondition = NULL;
511 }
512 
513 
514 Team::~Team()
515 {
516 	// get rid of all associated data
517 	PrepareForDeletion();
518 
519 	if (io_context != NULL)
520 		vfs_put_io_context(io_context);
521 	delete_owned_ports(this);
522 	sem_delete_owned_sems(this);
523 
524 	DeleteUserTimers(false);
525 
526 	fPendingSignals.Clear();
527 
528 	if (fQueuedSignalsCounter != NULL)
529 		fQueuedSignalsCounter->ReleaseReference();
530 
531 	while (thread_death_entry* threadDeathEntry
532 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
533 		free(threadDeathEntry);
534 	}
535 
536 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
537 		delete entry;
538 
539 	while (free_user_thread* entry = free_user_threads) {
540 		free_user_threads = entry->next;
541 		free(entry);
542 	}
543 
544 	delete job_control_entry;
545 		// usually already NULL and transferred to the parent
546 
547 	mutex_destroy(&fLock);
548 }
549 
550 
551 /*static*/ Team*
552 Team::Create(team_id id, const char* name, bool kernel)
553 {
554 	// create the team object
555 	Team* team = new(std::nothrow) Team(id, kernel);
556 	if (team == NULL)
557 		return NULL;
558 	ObjectDeleter<Team> teamDeleter(team);
559 
560 	if (name != NULL)
561 		team->SetName(name);
562 
563 	// check initialization
564 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
565 		return NULL;
566 
567 	// finish initialization (arch specifics)
568 	if (arch_team_init_team_struct(team, kernel) != B_OK)
569 		return NULL;
570 
571 	if (!kernel) {
572 		status_t error = user_timer_create_team_timers(team);
573 		if (error != B_OK)
574 			return NULL;
575 	}
576 
577 	// everything went fine
578 	return teamDeleter.Detach();
579 }
580 
581 
582 /*!	\brief Returns the team with the given ID.
583 	Returns a reference to the team.
584 	Team and thread spinlock must not be held.
585 */
586 /*static*/ Team*
587 Team::Get(team_id id)
588 {
589 	if (id == B_CURRENT_TEAM) {
590 		Team* team = thread_get_current_thread()->team;
591 		team->AcquireReference();
592 		return team;
593 	}
594 
595 	InterruptsReadSpinLocker locker(sTeamHashLock);
596 	Team* team = sTeamHash.Lookup(id);
597 	if (team != NULL)
598 		team->AcquireReference();
599 	return team;
600 }
601 
602 
603 /*!	\brief Returns the team with the given ID in a locked state.
604 	Returns a reference to the team.
605 	Team and thread spinlock must not be held.
606 */
607 /*static*/ Team*
608 Team::GetAndLock(team_id id)
609 {
610 	// get the team
611 	Team* team = Get(id);
612 	if (team == NULL)
613 		return NULL;
614 
615 	// lock it
616 	team->Lock();
617 
618 	// only return the team, when it isn't already dying
619 	if (team->state >= TEAM_STATE_SHUTDOWN) {
620 		team->Unlock();
621 		team->ReleaseReference();
622 		return NULL;
623 	}
624 
625 	return team;
626 }
627 
628 
629 /*!	Locks the team and its parent team (if any).
630 	The caller must hold a reference to the team or otherwise make sure that
631 	it won't be deleted.
632 	If the team doesn't have a parent, only the team itself is locked. If the
633 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
634 	only the team itself is locked.
635 
636 	\param dontLockParentIfKernel If \c true, the team's parent team is only
637 		locked, if it is not the kernel team.
638 */
639 void
640 Team::LockTeamAndParent(bool dontLockParentIfKernel)
641 {
642 	// The locking order is parent -> child. Since the parent can change as long
643 	// as we don't lock the team, we need to do a trial and error loop.
644 	Lock();
645 
646 	while (true) {
647 		// If the team doesn't have a parent, we're done. Otherwise try to lock
648 		// the parent.This will succeed in most cases, simplifying things.
649 		Team* parent = this->parent;
650 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
651 			|| parent->TryLock()) {
652 			return;
653 		}
654 
655 		// get a temporary reference to the parent, unlock this team, lock the
656 		// parent, and re-lock this team
657 		BReference<Team> parentReference(parent);
658 
659 		Unlock();
660 		parent->Lock();
661 		Lock();
662 
663 		// If the parent hasn't changed in the meantime, we're done.
664 		if (this->parent == parent)
665 			return;
666 
667 		// The parent has changed -- unlock and retry.
668 		parent->Unlock();
669 	}
670 }
671 
672 
673 /*!	Unlocks the team and its parent team (if any).
674 */
675 void
676 Team::UnlockTeamAndParent()
677 {
678 	if (parent != NULL)
679 		parent->Unlock();
680 
681 	Unlock();
682 }
683 
684 
685 /*!	Locks the team, its parent team (if any), and the team's process group.
686 	The caller must hold a reference to the team or otherwise make sure that
687 	it won't be deleted.
688 	If the team doesn't have a parent, only the team itself is locked.
689 */
690 void
691 Team::LockTeamParentAndProcessGroup()
692 {
693 	LockTeamAndProcessGroup();
694 
695 	// We hold the group's and the team's lock, but not the parent team's lock.
696 	// If we have a parent, try to lock it.
697 	if (this->parent == NULL || this->parent->TryLock())
698 		return;
699 
700 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
701 	// the job.
702 	Unlock();
703 	LockTeamAndParent(false);
704 }
705 
706 
707 /*!	Unlocks the team, its parent team (if any), and the team's process group.
708 */
709 void
710 Team::UnlockTeamParentAndProcessGroup()
711 {
712 	group->Unlock();
713 
714 	if (parent != NULL)
715 		parent->Unlock();
716 
717 	Unlock();
718 }
719 
720 
721 void
722 Team::LockTeamAndProcessGroup()
723 {
724 	// The locking order is process group -> child. Since the process group can
725 	// change as long as we don't lock the team, we need to do a trial and error
726 	// loop.
727 	Lock();
728 
729 	while (true) {
730 		// Try to lock the group. This will succeed in most cases, simplifying
731 		// things.
732 		ProcessGroup* group = this->group;
733 		if (group->TryLock())
734 			return;
735 
736 		// get a temporary reference to the group, unlock this team, lock the
737 		// group, and re-lock this team
738 		BReference<ProcessGroup> groupReference(group);
739 
740 		Unlock();
741 		group->Lock();
742 		Lock();
743 
744 		// If the group hasn't changed in the meantime, we're done.
745 		if (this->group == group)
746 			return;
747 
748 		// The group has changed -- unlock and retry.
749 		group->Unlock();
750 	}
751 }
752 
753 
754 void
755 Team::UnlockTeamAndProcessGroup()
756 {
757 	group->Unlock();
758 	Unlock();
759 }
760 
761 
762 void
763 Team::SetName(const char* name)
764 {
765 	if (const char* lastSlash = strrchr(name, '/'))
766 		name = lastSlash + 1;
767 
768 	strlcpy(fName, name, B_OS_NAME_LENGTH);
769 }
770 
771 
772 void
773 Team::SetArgs(const char* args)
774 {
775 	strlcpy(fArgs, args, sizeof(fArgs));
776 }
777 
778 
779 void
780 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
781 {
782 	fArgs[0] = '\0';
783 	strlcpy(fArgs, path, sizeof(fArgs));
784 	for (int i = 0; i < otherArgCount; i++) {
785 		strlcat(fArgs, " ", sizeof(fArgs));
786 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
787 	}
788 }
789 
790 
791 void
792 Team::ResetSignalsOnExec()
793 {
794 	// We are supposed to keep pending signals. Signal actions shall be reset
795 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
796 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
797 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
798 	// flags, but since there aren't any handlers, they make little sense, so
799 	// we clear them.
800 
801 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
802 		struct sigaction& action = SignalActionFor(i);
803 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
804 			action.sa_handler = SIG_DFL;
805 
806 		action.sa_mask = 0;
807 		action.sa_flags = 0;
808 		action.sa_userdata = NULL;
809 	}
810 }
811 
812 
813 void
814 Team::InheritSignalActions(Team* parent)
815 {
816 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
817 }
818 
819 
820 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
821 	ID.
822 
823 	The caller must hold the team's lock.
824 
825 	\param timer The timer to be added. If it doesn't have an ID yet, it is
826 		considered user-defined and will be assigned an ID.
827 	\return \c B_OK, if the timer was added successfully, another error code
828 		otherwise.
829 */
830 status_t
831 Team::AddUserTimer(UserTimer* timer)
832 {
833 	// don't allow addition of timers when already shutting the team down
834 	if (state >= TEAM_STATE_SHUTDOWN)
835 		return B_BAD_TEAM_ID;
836 
837 	// If the timer is user-defined, check timer limit and increment
838 	// user-defined count.
839 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
840 		return EAGAIN;
841 
842 	fUserTimers.AddTimer(timer);
843 
844 	return B_OK;
845 }
846 
847 
848 /*!	Removes the given user timer from the team.
849 
850 	The caller must hold the team's lock.
851 
852 	\param timer The timer to be removed.
853 
854 */
855 void
856 Team::RemoveUserTimer(UserTimer* timer)
857 {
858 	fUserTimers.RemoveTimer(timer);
859 
860 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
861 		UserDefinedTimersRemoved(1);
862 }
863 
864 
865 /*!	Deletes all (or all user-defined) user timers of the team.
866 
867 	Timer's belonging to the team's threads are not affected.
868 	The caller must hold the team's lock.
869 
870 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
871 		otherwise all timers are deleted.
872 */
873 void
874 Team::DeleteUserTimers(bool userDefinedOnly)
875 {
876 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
877 	UserDefinedTimersRemoved(count);
878 }
879 
880 
881 /*!	If not at the limit yet, increments the team's user-defined timer count.
882 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
883 */
884 bool
885 Team::CheckAddUserDefinedTimer()
886 {
887 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
888 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
889 		atomic_add(&fUserDefinedTimerCount, -1);
890 		return false;
891 	}
892 
893 	return true;
894 }
895 
896 
897 /*!	Subtracts the given count for the team's user-defined timer count.
898 	\param count The count to subtract.
899 */
900 void
901 Team::UserDefinedTimersRemoved(int32 count)
902 {
903 	atomic_add(&fUserDefinedTimerCount, -count);
904 }
905 
906 
907 void
908 Team::DeactivateCPUTimeUserTimers()
909 {
910 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
911 		timer->Deactivate();
912 
913 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
914 		timer->Deactivate();
915 }
916 
917 
918 /*!	Returns the team's current total CPU time (kernel + user + offset).
919 
920 	The caller must hold \c time_lock.
921 
922 	\param ignoreCurrentRun If \c true and the current thread is one team's
923 		threads, don't add the time since the last time \c last_time was
924 		updated. Should be used in "thread unscheduled" scheduler callbacks,
925 		since although the thread is still running at that time, its time has
926 		already been stopped.
927 	\return The team's current total CPU time.
928 */
929 bigtime_t
930 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
931 {
932 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
933 		+ dead_threads_user_time;
934 
935 	Thread* currentThread = thread_get_current_thread();
936 	bigtime_t now = system_time();
937 
938 	for (Thread* thread = thread_list; thread != NULL;
939 			thread = thread->team_next) {
940 		bool alreadyLocked = thread == lockedThread;
941 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
942 		time += thread->kernel_time + thread->user_time;
943 
944 		if (thread->last_time != 0) {
945 			if (!ignoreCurrentRun || thread != currentThread)
946 				time += now - thread->last_time;
947 		}
948 
949 		if (alreadyLocked)
950 			threadTimeLocker.Detach();
951 	}
952 
953 	return time;
954 }
955 
956 
957 /*!	Returns the team's current user CPU time.
958 
959 	The caller must hold \c time_lock.
960 
961 	\return The team's current user CPU time.
962 */
963 bigtime_t
964 Team::UserCPUTime() const
965 {
966 	bigtime_t time = dead_threads_user_time;
967 
968 	bigtime_t now = system_time();
969 
970 	for (Thread* thread = thread_list; thread != NULL;
971 			thread = thread->team_next) {
972 		SpinLocker threadTimeLocker(thread->time_lock);
973 		time += thread->user_time;
974 
975 		if (thread->last_time != 0 && !thread->in_kernel)
976 			time += now - thread->last_time;
977 	}
978 
979 	return time;
980 }
981 
982 
983 //	#pragma mark - ProcessGroup
984 
985 
986 ProcessGroup::ProcessGroup(pid_t id)
987 	:
988 	id(id),
989 	teams(NULL),
990 	fSession(NULL),
991 	fInOrphanedCheckList(false)
992 {
993 	char lockName[32];
994 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
995 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
996 }
997 
998 
999 ProcessGroup::~ProcessGroup()
1000 {
1001 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1002 
1003 	// If the group is in the orphaned check list, remove it.
1004 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1005 
1006 	if (fInOrphanedCheckList)
1007 		sOrphanedCheckProcessGroups.Remove(this);
1008 
1009 	orphanedCheckLocker.Unlock();
1010 
1011 	// remove group from the hash table and from the session
1012 	if (fSession != NULL) {
1013 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1014 		sGroupHash.RemoveUnchecked(this);
1015 		groupHashLocker.Unlock();
1016 
1017 		fSession->ReleaseReference();
1018 	}
1019 
1020 	mutex_destroy(&fLock);
1021 }
1022 
1023 
1024 /*static*/ ProcessGroup*
1025 ProcessGroup::Get(pid_t id)
1026 {
1027 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1028 	ProcessGroup* group = sGroupHash.Lookup(id);
1029 	if (group != NULL)
1030 		group->AcquireReference();
1031 	return group;
1032 }
1033 
1034 
1035 /*!	Adds the group the given session and makes it publicly accessible.
1036 	The caller must not hold the process group hash lock.
1037 */
1038 void
1039 ProcessGroup::Publish(ProcessSession* session)
1040 {
1041 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1042 	PublishLocked(session);
1043 }
1044 
1045 
1046 /*!	Adds the group to the given session and makes it publicly accessible.
1047 	The caller must hold the process group hash lock.
1048 */
1049 void
1050 ProcessGroup::PublishLocked(ProcessSession* session)
1051 {
1052 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1053 
1054 	fSession = session;
1055 	fSession->AcquireReference();
1056 
1057 	sGroupHash.InsertUnchecked(this);
1058 }
1059 
1060 
1061 /*!	Checks whether the process group is orphaned.
1062 	The caller must hold the group's lock.
1063 	\return \c true, if the group is orphaned, \c false otherwise.
1064 */
1065 bool
1066 ProcessGroup::IsOrphaned() const
1067 {
1068 	// Orphaned Process Group: "A process group in which the parent of every
1069 	// member is either itself a member of the group or is not a member of the
1070 	// group's session." (Open Group Base Specs Issue 7)
1071 	bool orphaned = true;
1072 
1073 	Team* team = teams;
1074 	while (orphaned && team != NULL) {
1075 		team->LockTeamAndParent(false);
1076 
1077 		Team* parent = team->parent;
1078 		if (parent != NULL && parent->group_id != id
1079 			&& parent->session_id == fSession->id) {
1080 			orphaned = false;
1081 		}
1082 
1083 		team->UnlockTeamAndParent();
1084 
1085 		team = team->group_next;
1086 	}
1087 
1088 	return orphaned;
1089 }
1090 
1091 
1092 void
1093 ProcessGroup::ScheduleOrphanedCheck()
1094 {
1095 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1096 
1097 	if (!fInOrphanedCheckList) {
1098 		sOrphanedCheckProcessGroups.Add(this);
1099 		fInOrphanedCheckList = true;
1100 	}
1101 }
1102 
1103 
1104 void
1105 ProcessGroup::UnsetOrphanedCheck()
1106 {
1107 	fInOrphanedCheckList = false;
1108 }
1109 
1110 
1111 //	#pragma mark - ProcessSession
1112 
1113 
1114 ProcessSession::ProcessSession(pid_t id)
1115 	:
1116 	id(id),
1117 	controlling_tty(-1),
1118 	foreground_group(-1)
1119 {
1120 	char lockName[32];
1121 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1122 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1123 }
1124 
1125 
1126 ProcessSession::~ProcessSession()
1127 {
1128 	mutex_destroy(&fLock);
1129 }
1130 
1131 
1132 //	#pragma mark - KDL functions
1133 
1134 
1135 static void
1136 _dump_team_info(Team* team)
1137 {
1138 	kprintf("TEAM: %p\n", team);
1139 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1140 		team->id);
1141 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1142 	kprintf("name:             '%s'\n", team->Name());
1143 	kprintf("args:             '%s'\n", team->Args());
1144 	kprintf("hash_next:        %p\n", team->hash_next);
1145 	kprintf("parent:           %p", team->parent);
1146 	if (team->parent != NULL) {
1147 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1148 	} else
1149 		kprintf("\n");
1150 
1151 	kprintf("children:         %p\n", team->children);
1152 	kprintf("num_threads:      %d\n", team->num_threads);
1153 	kprintf("state:            %d\n", team->state);
1154 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1155 	kprintf("io_context:       %p\n", team->io_context);
1156 	if (team->address_space)
1157 		kprintf("address_space:    %p\n", team->address_space);
1158 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1159 		(void*)team->user_data, team->user_data_area);
1160 	kprintf("free user thread: %p\n", team->free_user_threads);
1161 	kprintf("main_thread:      %p\n", team->main_thread);
1162 	kprintf("thread_list:      %p\n", team->thread_list);
1163 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1164 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1165 }
1166 
1167 
1168 static int
1169 dump_team_info(int argc, char** argv)
1170 {
1171 	ulong arg;
1172 	bool found = false;
1173 
1174 	if (argc < 2) {
1175 		Thread* thread = thread_get_current_thread();
1176 		if (thread != NULL && thread->team != NULL)
1177 			_dump_team_info(thread->team);
1178 		else
1179 			kprintf("No current team!\n");
1180 		return 0;
1181 	}
1182 
1183 	arg = strtoul(argv[1], NULL, 0);
1184 	if (IS_KERNEL_ADDRESS(arg)) {
1185 		// semi-hack
1186 		_dump_team_info((Team*)arg);
1187 		return 0;
1188 	}
1189 
1190 	// walk through the thread list, trying to match name or id
1191 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1192 		Team* team = it.Next();) {
1193 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1194 			|| team->id == (team_id)arg) {
1195 			_dump_team_info(team);
1196 			found = true;
1197 			break;
1198 		}
1199 	}
1200 
1201 	if (!found)
1202 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1203 	return 0;
1204 }
1205 
1206 
1207 static int
1208 dump_teams(int argc, char** argv)
1209 {
1210 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1211 		B_PRINTF_POINTER_WIDTH, "parent");
1212 
1213 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1214 		Team* team = it.Next();) {
1215 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 
1222 //	#pragma mark - Private functions
1223 
1224 
1225 /*! Get the parent of a given process.
1226 
1227 	Used in the implementation of getppid (where a process can get its own
1228 	parent, only) as well as in user_process_info where the information is
1229 	available to anyone (allowing to display a tree of running processes)
1230 */
1231 static pid_t
1232 _getppid(pid_t id)
1233 {
1234 	if (id < 0) {
1235 		errno = EINVAL;
1236 		return -1;
1237 	}
1238 
1239 	if (id == 0) {
1240 		Team* team = thread_get_current_thread()->team;
1241 		TeamLocker teamLocker(team);
1242 		if (team->parent == NULL) {
1243 			errno = EINVAL;
1244 			return -1;
1245 		}
1246 		return team->parent->id;
1247 	}
1248 
1249 	Team* team = Team::GetAndLock(id);
1250 	if (team == NULL) {
1251 		errno = ESRCH;
1252 		return -1;
1253 	}
1254 
1255 	pid_t parentID;
1256 
1257 	if (team->parent == NULL) {
1258 		errno = EINVAL;
1259 		parentID = -1;
1260 	} else
1261 		parentID = team->parent->id;
1262 
1263 	team->UnlockAndReleaseReference();
1264 
1265 	return parentID;
1266 }
1267 
1268 
1269 /*!	Inserts team \a team into the child list of team \a parent.
1270 
1271 	The caller must hold the lock of both \a parent and \a team.
1272 
1273 	\param parent The parent team.
1274 	\param team The team to be inserted into \a parent's child list.
1275 */
1276 static void
1277 insert_team_into_parent(Team* parent, Team* team)
1278 {
1279 	ASSERT(parent != NULL);
1280 
1281 	team->siblings_next = parent->children;
1282 	parent->children = team;
1283 	team->parent = parent;
1284 }
1285 
1286 
1287 /*!	Removes team \a team from the child list of team \a parent.
1288 
1289 	The caller must hold the lock of both \a parent and \a team.
1290 
1291 	\param parent The parent team.
1292 	\param team The team to be removed from \a parent's child list.
1293 */
1294 static void
1295 remove_team_from_parent(Team* parent, Team* team)
1296 {
1297 	Team* child;
1298 	Team* last = NULL;
1299 
1300 	for (child = parent->children; child != NULL;
1301 			child = child->siblings_next) {
1302 		if (child == team) {
1303 			if (last == NULL)
1304 				parent->children = child->siblings_next;
1305 			else
1306 				last->siblings_next = child->siblings_next;
1307 
1308 			team->parent = NULL;
1309 			break;
1310 		}
1311 		last = child;
1312 	}
1313 }
1314 
1315 
1316 /*!	Returns whether the given team is a session leader.
1317 	The caller must hold the team's lock or its process group's lock.
1318 */
1319 static bool
1320 is_session_leader(Team* team)
1321 {
1322 	return team->session_id == team->id;
1323 }
1324 
1325 
1326 /*!	Returns whether the given team is a process group leader.
1327 	The caller must hold the team's lock or its process group's lock.
1328 */
1329 static bool
1330 is_process_group_leader(Team* team)
1331 {
1332 	return team->group_id == team->id;
1333 }
1334 
1335 
1336 /*!	Inserts the given team into the given process group.
1337 	The caller must hold the process group's lock, the team's lock, and the
1338 	team's parent's lock.
1339 */
1340 static void
1341 insert_team_into_group(ProcessGroup* group, Team* team)
1342 {
1343 	team->group = group;
1344 	team->group_id = group->id;
1345 	team->session_id = group->Session()->id;
1346 
1347 	team->group_next = group->teams;
1348 	group->teams = team;
1349 	group->AcquireReference();
1350 }
1351 
1352 
1353 /*!	Removes the given team from its process group.
1354 
1355 	The caller must hold the process group's lock, the team's lock, and the
1356 	team's parent's lock. Interrupts must be enabled.
1357 
1358 	\param team The team that'll be removed from its process group.
1359 */
1360 static void
1361 remove_team_from_group(Team* team)
1362 {
1363 	ProcessGroup* group = team->group;
1364 	Team* current;
1365 	Team* last = NULL;
1366 
1367 	// the team must be in a process group to let this function have any effect
1368 	if (group == NULL)
1369 		return;
1370 
1371 	for (current = group->teams; current != NULL;
1372 			current = current->group_next) {
1373 		if (current == team) {
1374 			if (last == NULL)
1375 				group->teams = current->group_next;
1376 			else
1377 				last->group_next = current->group_next;
1378 
1379 			break;
1380 		}
1381 		last = current;
1382 	}
1383 
1384 	team->group = NULL;
1385 	team->group_next = NULL;
1386 	team->group_id = -1;
1387 
1388 	group->ReleaseReference();
1389 }
1390 
1391 
1392 static status_t
1393 create_team_user_data(Team* team, void* exactAddress = NULL)
1394 {
1395 	void* address;
1396 	uint32 addressSpec;
1397 
1398 	if (exactAddress != NULL) {
1399 		address = exactAddress;
1400 		addressSpec = B_EXACT_ADDRESS;
1401 	} else {
1402 		address = (void*)KERNEL_USER_DATA_BASE;
1403 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1404 	}
1405 
1406 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1407 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1408 
1409 	virtual_address_restrictions virtualRestrictions = {};
1410 	if (result == B_OK || exactAddress != NULL) {
1411 		if (exactAddress != NULL)
1412 			virtualRestrictions.address = exactAddress;
1413 		else
1414 			virtualRestrictions.address = address;
1415 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1416 	} else {
1417 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1418 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1419 	}
1420 
1421 	physical_address_restrictions physicalRestrictions = {};
1422 	team->user_data_area = create_area_etc(team->id, "user area",
1423 		kTeamUserDataInitialSize, B_FULL_LOCK,
1424 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1425 		&virtualRestrictions, &physicalRestrictions, &address);
1426 	if (team->user_data_area < 0)
1427 		return team->user_data_area;
1428 
1429 	team->user_data = (addr_t)address;
1430 	team->used_user_data = 0;
1431 	team->user_data_size = kTeamUserDataInitialSize;
1432 	team->free_user_threads = NULL;
1433 
1434 	return B_OK;
1435 }
1436 
1437 
1438 static void
1439 delete_team_user_data(Team* team)
1440 {
1441 	if (team->user_data_area >= 0) {
1442 		vm_delete_area(team->id, team->user_data_area, true);
1443 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1444 			kTeamUserDataReservedSize);
1445 
1446 		team->user_data = 0;
1447 		team->used_user_data = 0;
1448 		team->user_data_size = 0;
1449 		team->user_data_area = -1;
1450 		while (free_user_thread* entry = team->free_user_threads) {
1451 			team->free_user_threads = entry->next;
1452 			free(entry);
1453 		}
1454 	}
1455 }
1456 
1457 
1458 static status_t
1459 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1460 	int32 argCount, int32 envCount, char**& _flatArgs)
1461 {
1462 	if (argCount < 0 || envCount < 0)
1463 		return B_BAD_VALUE;
1464 
1465 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1466 		return B_TOO_MANY_ARGS;
1467 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1468 		return B_BAD_VALUE;
1469 
1470 	if (!IS_USER_ADDRESS(userFlatArgs))
1471 		return B_BAD_ADDRESS;
1472 
1473 	// allocate kernel memory
1474 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1475 	if (flatArgs == NULL)
1476 		return B_NO_MEMORY;
1477 
1478 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1479 		free(flatArgs);
1480 		return B_BAD_ADDRESS;
1481 	}
1482 
1483 	// check and relocate the array
1484 	status_t error = B_OK;
1485 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1486 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1487 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1488 		if (i == argCount || i == argCount + envCount + 1) {
1489 			// check array null termination
1490 			if (flatArgs[i] != NULL) {
1491 				error = B_BAD_VALUE;
1492 				break;
1493 			}
1494 		} else {
1495 			// check string
1496 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1497 			size_t maxLen = stringEnd - arg;
1498 			if (arg < stringBase || arg >= stringEnd
1499 					|| strnlen(arg, maxLen) == maxLen) {
1500 				error = B_BAD_VALUE;
1501 				break;
1502 			}
1503 
1504 			flatArgs[i] = arg;
1505 		}
1506 	}
1507 
1508 	if (error == B_OK)
1509 		_flatArgs = flatArgs;
1510 	else
1511 		free(flatArgs);
1512 
1513 	return error;
1514 }
1515 
1516 
1517 static void
1518 free_team_arg(struct team_arg* teamArg)
1519 {
1520 	if (teamArg != NULL) {
1521 		free(teamArg->flat_args);
1522 		free(teamArg->path);
1523 		free(teamArg);
1524 	}
1525 }
1526 
1527 
1528 static status_t
1529 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1530 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1531 	port_id port, uint32 token)
1532 {
1533 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1534 	if (teamArg == NULL)
1535 		return B_NO_MEMORY;
1536 
1537 	teamArg->path = strdup(path);
1538 	if (teamArg->path == NULL) {
1539 		free(teamArg);
1540 		return B_NO_MEMORY;
1541 	}
1542 
1543 	// copy the args over
1544 	teamArg->flat_args = flatArgs;
1545 	teamArg->flat_args_size = flatArgsSize;
1546 	teamArg->arg_count = argCount;
1547 	teamArg->env_count = envCount;
1548 	teamArg->flags = 0;
1549 	teamArg->umask = umask;
1550 	teamArg->error_port = port;
1551 	teamArg->error_token = token;
1552 
1553 	// determine the flags from the environment
1554 	const char* const* env = flatArgs + argCount + 1;
1555 	for (int32 i = 0; i < envCount; i++) {
1556 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1557 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1558 			break;
1559 		}
1560 	}
1561 
1562 	*_teamArg = teamArg;
1563 	return B_OK;
1564 }
1565 
1566 
1567 static status_t
1568 team_create_thread_start_internal(void* args)
1569 {
1570 	status_t err;
1571 	Thread* thread;
1572 	Team* team;
1573 	struct team_arg* teamArgs = (struct team_arg*)args;
1574 	const char* path;
1575 	addr_t entry;
1576 	char** userArgs;
1577 	char** userEnv;
1578 	struct user_space_program_args* programArgs;
1579 	uint32 argCount, envCount;
1580 
1581 	thread = thread_get_current_thread();
1582 	team = thread->team;
1583 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1584 
1585 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1586 		thread->id));
1587 
1588 	// Main stack area layout is currently as follows (starting from 0):
1589 	//
1590 	// size								| usage
1591 	// ---------------------------------+--------------------------------
1592 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1593 	// TLS_SIZE							| TLS data
1594 	// sizeof(user_space_program_args)	| argument structure for the runtime
1595 	//									| loader
1596 	// flat arguments size				| flat process arguments and environment
1597 
1598 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1599 	// the heap
1600 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1601 
1602 	argCount = teamArgs->arg_count;
1603 	envCount = teamArgs->env_count;
1604 
1605 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1606 		+ thread->user_stack_size + TLS_SIZE);
1607 
1608 	userArgs = (char**)(programArgs + 1);
1609 	userEnv = userArgs + argCount + 1;
1610 	path = teamArgs->path;
1611 
1612 	if (user_strlcpy(programArgs->program_path, path,
1613 				sizeof(programArgs->program_path)) < B_OK
1614 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1615 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1616 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1617 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1618 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1619 				sizeof(port_id)) < B_OK
1620 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1621 				sizeof(uint32)) < B_OK
1622 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1623 		|| user_memcpy(&programArgs->disable_user_addons,
1624 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1625 		|| user_memcpy(userArgs, teamArgs->flat_args,
1626 				teamArgs->flat_args_size) < B_OK) {
1627 		// the team deletion process will clean this mess
1628 		free_team_arg(teamArgs);
1629 		return B_BAD_ADDRESS;
1630 	}
1631 
1632 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1633 
1634 	// set team args and update state
1635 	team->Lock();
1636 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1637 	team->state = TEAM_STATE_NORMAL;
1638 	team->Unlock();
1639 
1640 	free_team_arg(teamArgs);
1641 		// the arguments are already on the user stack, we no longer need
1642 		// them in this form
1643 
1644 	// Clone commpage area
1645 	area_id commPageArea = clone_commpage_area(team->id,
1646 		&team->commpage_address);
1647 	if (commPageArea  < B_OK) {
1648 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1649 			strerror(commPageArea)));
1650 		return commPageArea;
1651 	}
1652 
1653 	// Register commpage image
1654 	image_id commPageImage = get_commpage_image();
1655 	extended_image_info imageInfo;
1656 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1657 	if (err != B_OK) {
1658 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1659 			strerror(err)));
1660 		return err;
1661 	}
1662 	imageInfo.basic_info.text = team->commpage_address;
1663 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1664 	imageInfo.symbol_table = NULL;
1665 	imageInfo.symbol_hash = NULL;
1666 	imageInfo.string_table = NULL;
1667 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1668 	if (image < 0) {
1669 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1670 			strerror(image)));
1671 		return image;
1672 	}
1673 
1674 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1675 	// automatic variables with function scope will never be destroyed.
1676 	{
1677 		// find runtime_loader path
1678 		KPath runtimeLoaderPath;
1679 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1680 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1681 		if (err < B_OK) {
1682 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1683 				strerror(err)));
1684 			return err;
1685 		}
1686 		runtimeLoaderPath.UnlockBuffer();
1687 		err = runtimeLoaderPath.Append("runtime_loader");
1688 
1689 		if (err == B_OK) {
1690 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1691 				&entry);
1692 		}
1693 	}
1694 
1695 	if (err < B_OK) {
1696 		// Luckily, we don't have to clean up the mess we created - that's
1697 		// done for us by the normal team deletion process
1698 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1699 			"%s\n", strerror(err)));
1700 		return err;
1701 	}
1702 
1703 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1704 
1705 	// enter userspace -- returns only in case of error
1706 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1707 		programArgs, team->commpage_address);
1708 }
1709 
1710 
1711 static status_t
1712 team_create_thread_start(void* args)
1713 {
1714 	team_create_thread_start_internal(args);
1715 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1716 	thread_exit();
1717 		// does not return
1718 	return B_OK;
1719 }
1720 
1721 
1722 static thread_id
1723 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1724 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1725 	port_id errorPort, uint32 errorToken)
1726 {
1727 	char** flatArgs = _flatArgs;
1728 	thread_id thread;
1729 	status_t status;
1730 	struct team_arg* teamArgs;
1731 	struct team_loading_info loadingInfo;
1732 	ConditionVariableEntry loadingWaitEntry;
1733 	io_context* parentIOContext = NULL;
1734 	team_id teamID;
1735 	bool teamLimitReached = false;
1736 
1737 	if (flatArgs == NULL || argCount == 0)
1738 		return B_BAD_VALUE;
1739 
1740 	const char* path = flatArgs[0];
1741 
1742 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1743 		"\n", path, flatArgs, argCount));
1744 
1745 	// cut the path from the main thread name
1746 	const char* threadName = strrchr(path, '/');
1747 	if (threadName != NULL)
1748 		threadName++;
1749 	else
1750 		threadName = path;
1751 
1752 	// create the main thread object
1753 	Thread* mainThread;
1754 	status = Thread::Create(threadName, mainThread);
1755 	if (status != B_OK)
1756 		return status;
1757 	BReference<Thread> mainThreadReference(mainThread, true);
1758 
1759 	// create team object
1760 	Team* team = Team::Create(mainThread->id, path, false);
1761 	if (team == NULL)
1762 		return B_NO_MEMORY;
1763 	BReference<Team> teamReference(team, true);
1764 
1765 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1766 		loadingInfo.condition.Init(team, "image load");
1767 		loadingInfo.condition.Add(&loadingWaitEntry);
1768 		loadingInfo.result = B_ERROR;
1769 		team->loading_info = &loadingInfo;
1770 	}
1771 
1772 	// get the parent team
1773 	Team* parent = Team::Get(parentID);
1774 	if (parent == NULL)
1775 		return B_BAD_TEAM_ID;
1776 	BReference<Team> parentReference(parent, true);
1777 
1778 	parent->LockTeamAndProcessGroup();
1779 	team->Lock();
1780 
1781 	// inherit the parent's user/group
1782 	inherit_parent_user_and_group(team, parent);
1783 
1784 	// get a reference to the parent's I/O context -- we need it to create ours
1785 	parentIOContext = parent->io_context;
1786 	vfs_get_io_context(parentIOContext);
1787 
1788 	team->Unlock();
1789 	parent->UnlockTeamAndProcessGroup();
1790 
1791 	// check the executable's set-user/group-id permission
1792 	update_set_id_user_and_group(team, path);
1793 
1794 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1795 		envCount, (mode_t)-1, errorPort, errorToken);
1796 	if (status != B_OK)
1797 		goto err1;
1798 
1799 	_flatArgs = NULL;
1800 		// args are owned by the team_arg structure now
1801 
1802 	// create a new io_context for this team
1803 	team->io_context = vfs_new_io_context(parentIOContext, true);
1804 	if (!team->io_context) {
1805 		status = B_NO_MEMORY;
1806 		goto err2;
1807 	}
1808 
1809 	// We don't need the parent's I/O context any longer.
1810 	vfs_put_io_context(parentIOContext);
1811 	parentIOContext = NULL;
1812 
1813 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1814 	vfs_exec_io_context(team->io_context);
1815 
1816 	// create an address space for this team
1817 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1818 		&team->address_space);
1819 	if (status != B_OK)
1820 		goto err2;
1821 
1822 	team->address_space->SetRandomizingEnabled(
1823 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1824 
1825 	// create the user data area
1826 	status = create_team_user_data(team);
1827 	if (status != B_OK)
1828 		goto err4;
1829 
1830 	// insert the team into its parent and the teams hash
1831 	parent->LockTeamAndProcessGroup();
1832 	team->Lock();
1833 
1834 	{
1835 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1836 
1837 		sTeamHash.Insert(team);
1838 		teamLimitReached = sUsedTeams >= sMaxTeams;
1839 		if (!teamLimitReached)
1840 			sUsedTeams++;
1841 	}
1842 
1843 	insert_team_into_parent(parent, team);
1844 	insert_team_into_group(parent->group, team);
1845 
1846 	team->Unlock();
1847 	parent->UnlockTeamAndProcessGroup();
1848 
1849 	// notify team listeners
1850 	sNotificationService.Notify(TEAM_ADDED, team);
1851 
1852 	if (teamLimitReached) {
1853 		status = B_NO_MORE_TEAMS;
1854 		goto err6;
1855 	}
1856 
1857 	// In case we start the main thread, we shouldn't access the team object
1858 	// afterwards, so cache the team's ID.
1859 	teamID = team->id;
1860 
1861 	// Create a kernel thread, but under the context of the new team
1862 	// The new thread will take over ownership of teamArgs.
1863 	{
1864 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1865 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1866 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1867 			+ teamArgs->flat_args_size;
1868 		thread = thread_create_thread(threadAttributes, false);
1869 		if (thread < 0) {
1870 			status = thread;
1871 			goto err6;
1872 		}
1873 	}
1874 
1875 	// The team has been created successfully, so we keep the reference. Or
1876 	// more precisely: It's owned by the team's main thread, now.
1877 	teamReference.Detach();
1878 
1879 	// wait for the loader of the new team to finish its work
1880 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1881 		if (mainThread != NULL) {
1882 			// resume the team's main thread
1883 			thread_continue(mainThread);
1884 		}
1885 
1886 		// Now wait until loading is finished. We will be woken either by the
1887 		// thread, when it finished or aborted loading, or when the team is
1888 		// going to die (e.g. is killed). In either case the one notifying is
1889 		// responsible for unsetting `loading_info` in the team structure.
1890 		loadingWaitEntry.Wait();
1891 
1892 		if (loadingInfo.result < B_OK)
1893 			return loadingInfo.result;
1894 	}
1895 
1896 	// notify the debugger
1897 	user_debug_team_created(teamID);
1898 
1899 	return thread;
1900 
1901 err6:
1902 	// Remove the team structure from the process group, the parent team, and
1903 	// the team hash table and delete the team structure.
1904 	parent->LockTeamAndProcessGroup();
1905 	team->Lock();
1906 
1907 	remove_team_from_group(team);
1908 	remove_team_from_parent(team->parent, team);
1909 
1910 	team->Unlock();
1911 	parent->UnlockTeamAndProcessGroup();
1912 
1913 	{
1914 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1915 		sTeamHash.Remove(team);
1916 		if (!teamLimitReached)
1917 			sUsedTeams--;
1918 	}
1919 
1920 	sNotificationService.Notify(TEAM_REMOVED, team);
1921 
1922 	delete_team_user_data(team);
1923 err4:
1924 	team->address_space->Put();
1925 err2:
1926 	free_team_arg(teamArgs);
1927 err1:
1928 	if (parentIOContext != NULL)
1929 		vfs_put_io_context(parentIOContext);
1930 
1931 	return status;
1932 }
1933 
1934 
1935 /*!	Almost shuts down the current team and loads a new image into it.
1936 	If successful, this function does not return and will takeover ownership of
1937 	the arguments provided.
1938 	This function may only be called in a userland team (caused by one of the
1939 	exec*() syscalls).
1940 */
1941 static status_t
1942 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1943 	int32 argCount, int32 envCount, mode_t umask)
1944 {
1945 	// NOTE: Since this function normally doesn't return, don't use automatic
1946 	// variables that need destruction in the function scope.
1947 	char** flatArgs = _flatArgs;
1948 	Team* team = thread_get_current_thread()->team;
1949 	struct team_arg* teamArgs;
1950 	const char* threadName;
1951 	thread_id nubThreadID = -1;
1952 
1953 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1954 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1955 		team->id));
1956 
1957 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1958 
1959 	// switching the kernel at run time is probably not a good idea :)
1960 	if (team == team_get_kernel_team())
1961 		return B_NOT_ALLOWED;
1962 
1963 	// we currently need to be single threaded here
1964 	// TODO: maybe we should just kill all other threads and
1965 	//	make the current thread the team's main thread?
1966 	Thread* currentThread = thread_get_current_thread();
1967 	if (currentThread != team->main_thread)
1968 		return B_NOT_ALLOWED;
1969 
1970 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1971 	// We iterate through the thread list to make sure that there's no other
1972 	// thread.
1973 	TeamLocker teamLocker(team);
1974 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1975 
1976 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1977 		nubThreadID = team->debug_info.nub_thread;
1978 
1979 	debugInfoLocker.Unlock();
1980 
1981 	for (Thread* thread = team->thread_list; thread != NULL;
1982 			thread = thread->team_next) {
1983 		if (thread != team->main_thread && thread->id != nubThreadID)
1984 			return B_NOT_ALLOWED;
1985 	}
1986 
1987 	team->DeleteUserTimers(true);
1988 	team->ResetSignalsOnExec();
1989 
1990 	teamLocker.Unlock();
1991 
1992 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1993 		argCount, envCount, umask, -1, 0);
1994 	if (status != B_OK)
1995 		return status;
1996 
1997 	_flatArgs = NULL;
1998 		// args are owned by the team_arg structure now
1999 
2000 	// TODO: remove team resources if there are any left
2001 	// thread_atkernel_exit() might not be called at all
2002 
2003 	thread_reset_for_exec();
2004 
2005 	user_debug_prepare_for_exec();
2006 
2007 	delete_team_user_data(team);
2008 	vm_delete_areas(team->address_space, false);
2009 	xsi_sem_undo(team);
2010 	delete_owned_ports(team);
2011 	sem_delete_owned_sems(team);
2012 	remove_images(team);
2013 	vfs_exec_io_context(team->io_context);
2014 	delete_realtime_sem_context(team->realtime_sem_context);
2015 	team->realtime_sem_context = NULL;
2016 
2017 	// update ASLR
2018 	team->address_space->SetRandomizingEnabled(
2019 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2020 
2021 	status = create_team_user_data(team);
2022 	if (status != B_OK) {
2023 		// creating the user data failed -- we're toast
2024 		free_team_arg(teamArgs);
2025 		exit_thread(status);
2026 		return status;
2027 	}
2028 
2029 	user_debug_finish_after_exec();
2030 
2031 	// rename the team
2032 
2033 	team->Lock();
2034 	team->SetName(path);
2035 	team->Unlock();
2036 
2037 	// cut the path from the team name and rename the main thread, too
2038 	threadName = strrchr(path, '/');
2039 	if (threadName != NULL)
2040 		threadName++;
2041 	else
2042 		threadName = path;
2043 	rename_thread(thread_get_current_thread_id(), threadName);
2044 
2045 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2046 
2047 	// Update user/group according to the executable's set-user/group-id
2048 	// permission.
2049 	update_set_id_user_and_group(team, path);
2050 
2051 	user_debug_team_exec();
2052 
2053 	// notify team listeners
2054 	sNotificationService.Notify(TEAM_EXEC, team);
2055 
2056 	// get a user thread for the thread
2057 	user_thread* userThread = team_allocate_user_thread(team);
2058 		// cannot fail (the allocation for the team would have failed already)
2059 	ThreadLocker currentThreadLocker(currentThread);
2060 	currentThread->user_thread = userThread;
2061 	currentThreadLocker.Unlock();
2062 
2063 	// create the user stack for the thread
2064 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2065 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2066 	if (status == B_OK) {
2067 		// prepare the stack, load the runtime loader, and enter userspace
2068 		team_create_thread_start(teamArgs);
2069 			// does never return
2070 	} else
2071 		free_team_arg(teamArgs);
2072 
2073 	// Sorry, we have to kill ourselves, there is no way out anymore
2074 	// (without any areas left and all that).
2075 	exit_thread(status);
2076 
2077 	// We return a status here since the signal that is sent by the
2078 	// call above is not immediately handled.
2079 	return B_ERROR;
2080 }
2081 
2082 
2083 static thread_id
2084 fork_team(void)
2085 {
2086 	Thread* parentThread = thread_get_current_thread();
2087 	Team* parentTeam = parentThread->team;
2088 	Team* team;
2089 	arch_fork_arg* forkArgs;
2090 	struct area_info info;
2091 	thread_id threadID;
2092 	status_t status;
2093 	ssize_t areaCookie;
2094 	bool teamLimitReached = false;
2095 
2096 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2097 
2098 	if (parentTeam == team_get_kernel_team())
2099 		return B_NOT_ALLOWED;
2100 
2101 	// create a new team
2102 	// TODO: this is very similar to load_image_internal() - maybe we can do
2103 	// something about it :)
2104 
2105 	// create the main thread object
2106 	Thread* thread;
2107 	status = Thread::Create(parentThread->name, thread);
2108 	if (status != B_OK)
2109 		return status;
2110 	BReference<Thread> threadReference(thread, true);
2111 
2112 	// create the team object
2113 	team = Team::Create(thread->id, NULL, false);
2114 	if (team == NULL)
2115 		return B_NO_MEMORY;
2116 
2117 	parentTeam->LockTeamAndProcessGroup();
2118 	team->Lock();
2119 
2120 	team->SetName(parentTeam->Name());
2121 	team->SetArgs(parentTeam->Args());
2122 
2123 	team->commpage_address = parentTeam->commpage_address;
2124 
2125 	// Inherit the parent's user/group.
2126 	inherit_parent_user_and_group(team, parentTeam);
2127 
2128 	// inherit signal handlers
2129 	team->InheritSignalActions(parentTeam);
2130 
2131 	team->Unlock();
2132 	parentTeam->UnlockTeamAndProcessGroup();
2133 
2134 	// inherit some team debug flags
2135 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2136 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2137 
2138 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2139 	if (forkArgs == NULL) {
2140 		status = B_NO_MEMORY;
2141 		goto err1;
2142 	}
2143 
2144 	// create a new io_context for this team
2145 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2146 	if (!team->io_context) {
2147 		status = B_NO_MEMORY;
2148 		goto err2;
2149 	}
2150 
2151 	// duplicate the realtime sem context
2152 	if (parentTeam->realtime_sem_context) {
2153 		team->realtime_sem_context = clone_realtime_sem_context(
2154 			parentTeam->realtime_sem_context);
2155 		if (team->realtime_sem_context == NULL) {
2156 			status = B_NO_MEMORY;
2157 			goto err2;
2158 		}
2159 	}
2160 
2161 	// create an address space for this team
2162 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2163 		&team->address_space);
2164 	if (status < B_OK)
2165 		goto err3;
2166 
2167 	// copy all areas of the team
2168 	// TODO: should be able to handle stack areas differently (ie. don't have
2169 	// them copy-on-write)
2170 
2171 	areaCookie = 0;
2172 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2173 		if (info.area == parentTeam->user_data_area) {
2174 			// don't clone the user area; just create a new one
2175 			status = create_team_user_data(team, info.address);
2176 			if (status != B_OK)
2177 				break;
2178 
2179 			thread->user_thread = team_allocate_user_thread(team);
2180 		} else {
2181 			void* address;
2182 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2183 				&address, B_CLONE_ADDRESS, info.area);
2184 			if (area < B_OK) {
2185 				status = area;
2186 				break;
2187 			}
2188 
2189 			if (info.area == parentThread->user_stack_area)
2190 				thread->user_stack_area = area;
2191 		}
2192 	}
2193 
2194 	if (status < B_OK)
2195 		goto err4;
2196 
2197 	if (thread->user_thread == NULL) {
2198 #if KDEBUG
2199 		panic("user data area not found, parent area is %" B_PRId32,
2200 			parentTeam->user_data_area);
2201 #endif
2202 		status = B_ERROR;
2203 		goto err4;
2204 	}
2205 
2206 	thread->user_stack_base = parentThread->user_stack_base;
2207 	thread->user_stack_size = parentThread->user_stack_size;
2208 	thread->user_local_storage = parentThread->user_local_storage;
2209 	thread->sig_block_mask = parentThread->sig_block_mask;
2210 	thread->signal_stack_base = parentThread->signal_stack_base;
2211 	thread->signal_stack_size = parentThread->signal_stack_size;
2212 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2213 
2214 	arch_store_fork_frame(forkArgs);
2215 
2216 	// copy image list
2217 	if (copy_images(parentTeam->id, team) != B_OK)
2218 		goto err5;
2219 
2220 	// insert the team into its parent and the teams hash
2221 	parentTeam->LockTeamAndProcessGroup();
2222 	team->Lock();
2223 
2224 	{
2225 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2226 
2227 		sTeamHash.Insert(team);
2228 		teamLimitReached = sUsedTeams >= sMaxTeams;
2229 		if (!teamLimitReached)
2230 			sUsedTeams++;
2231 	}
2232 
2233 	insert_team_into_parent(parentTeam, team);
2234 	insert_team_into_group(parentTeam->group, team);
2235 
2236 	team->Unlock();
2237 	parentTeam->UnlockTeamAndProcessGroup();
2238 
2239 	// notify team listeners
2240 	sNotificationService.Notify(TEAM_ADDED, team);
2241 
2242 	if (teamLimitReached) {
2243 		status = B_NO_MORE_TEAMS;
2244 		goto err6;
2245 	}
2246 
2247 	// create the main thread
2248 	{
2249 		ThreadCreationAttributes threadCreationAttributes(NULL,
2250 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2251 		threadCreationAttributes.forkArgs = forkArgs;
2252 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2253 		threadID = thread_create_thread(threadCreationAttributes, false);
2254 		if (threadID < 0) {
2255 			status = threadID;
2256 			goto err6;
2257 		}
2258 	}
2259 
2260 	// notify the debugger
2261 	user_debug_team_created(team->id);
2262 
2263 	T(TeamForked(threadID));
2264 
2265 	resume_thread(threadID);
2266 	return threadID;
2267 
2268 err6:
2269 	// Remove the team structure from the process group, the parent team, and
2270 	// the team hash table and delete the team structure.
2271 	parentTeam->LockTeamAndProcessGroup();
2272 	team->Lock();
2273 
2274 	remove_team_from_group(team);
2275 	remove_team_from_parent(team->parent, team);
2276 
2277 	team->Unlock();
2278 	parentTeam->UnlockTeamAndProcessGroup();
2279 
2280 	{
2281 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2282 		sTeamHash.Remove(team);
2283 		if (!teamLimitReached)
2284 			sUsedTeams--;
2285 	}
2286 
2287 	sNotificationService.Notify(TEAM_REMOVED, team);
2288 err5:
2289 	remove_images(team);
2290 err4:
2291 	team->address_space->RemoveAndPut();
2292 err3:
2293 	delete_realtime_sem_context(team->realtime_sem_context);
2294 err2:
2295 	free(forkArgs);
2296 err1:
2297 	team->ReleaseReference();
2298 
2299 	return status;
2300 }
2301 
2302 
2303 /*!	Returns if the specified team \a parent has any children belonging to the
2304 	process group with the specified ID \a groupID.
2305 	The caller must hold \a parent's lock.
2306 */
2307 static bool
2308 has_children_in_group(Team* parent, pid_t groupID)
2309 {
2310 	for (Team* child = parent->children; child != NULL;
2311 			child = child->siblings_next) {
2312 		TeamLocker childLocker(child);
2313 		if (child->group_id == groupID)
2314 			return true;
2315 	}
2316 
2317 	return false;
2318 }
2319 
2320 
2321 /*!	Returns the first job control entry from \a children, which matches \a id.
2322 	\a id can be:
2323 	- \code > 0 \endcode: Matching an entry with that team ID.
2324 	- \code == -1 \endcode: Matching any entry.
2325 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2326 	\c 0 is an invalid value for \a id.
2327 
2328 	The caller must hold the lock of the team that \a children belongs to.
2329 
2330 	\param children The job control entry list to check.
2331 	\param id The match criterion.
2332 	\return The first matching entry or \c NULL, if none matches.
2333 */
2334 static job_control_entry*
2335 get_job_control_entry(team_job_control_children& children, pid_t id)
2336 {
2337 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2338 		 job_control_entry* entry = it.Next();) {
2339 
2340 		if (id > 0) {
2341 			if (entry->thread == id)
2342 				return entry;
2343 		} else if (id == -1) {
2344 			return entry;
2345 		} else {
2346 			pid_t processGroup
2347 				= (entry->team ? entry->team->group_id : entry->group_id);
2348 			if (processGroup == -id)
2349 				return entry;
2350 		}
2351 	}
2352 
2353 	return NULL;
2354 }
2355 
2356 
2357 /*!	Returns the first job control entry from one of team's dead, continued, or
2358 	stopped children which matches \a id.
2359 	\a id can be:
2360 	- \code > 0 \endcode: Matching an entry with that team ID.
2361 	- \code == -1 \endcode: Matching any entry.
2362 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2363 	\c 0 is an invalid value for \a id.
2364 
2365 	The caller must hold \a team's lock.
2366 
2367 	\param team The team whose dead, stopped, and continued child lists shall be
2368 		checked.
2369 	\param id The match criterion.
2370 	\param flags Specifies which children shall be considered. Dead children
2371 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2372 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2373 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2374 		\c WCONTINUED.
2375 	\return The first matching entry or \c NULL, if none matches.
2376 */
2377 static job_control_entry*
2378 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2379 {
2380 	job_control_entry* entry = NULL;
2381 
2382 	if ((flags & WEXITED) != 0)
2383 		entry = get_job_control_entry(team->dead_children, id);
2384 
2385 	if (entry == NULL && (flags & WCONTINUED) != 0)
2386 		entry = get_job_control_entry(team->continued_children, id);
2387 
2388 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2389 		entry = get_job_control_entry(team->stopped_children, id);
2390 
2391 	return entry;
2392 }
2393 
2394 
2395 job_control_entry::job_control_entry()
2396 	:
2397 	has_group_ref(false)
2398 {
2399 }
2400 
2401 
2402 job_control_entry::~job_control_entry()
2403 {
2404 	if (has_group_ref) {
2405 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2406 
2407 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2408 		if (group == NULL) {
2409 			panic("job_control_entry::~job_control_entry(): unknown group "
2410 				"ID: %" B_PRId32, group_id);
2411 			return;
2412 		}
2413 
2414 		groupHashLocker.Unlock();
2415 
2416 		group->ReleaseReference();
2417 	}
2418 }
2419 
2420 
2421 /*!	Invoked when the owning team is dying, initializing the entry according to
2422 	the dead state.
2423 
2424 	The caller must hold the owning team's lock and the scheduler lock.
2425 */
2426 void
2427 job_control_entry::InitDeadState()
2428 {
2429 	if (team != NULL) {
2430 		ASSERT(team->exit.initialized);
2431 
2432 		group_id = team->group_id;
2433 		team->group->AcquireReference();
2434 		has_group_ref = true;
2435 
2436 		thread = team->id;
2437 		status = team->exit.status;
2438 		reason = team->exit.reason;
2439 		signal = team->exit.signal;
2440 		signaling_user = team->exit.signaling_user;
2441 		user_time = team->dead_threads_user_time
2442 			+ team->dead_children.user_time;
2443 		kernel_time = team->dead_threads_kernel_time
2444 			+ team->dead_children.kernel_time;
2445 
2446 		team = NULL;
2447 	}
2448 }
2449 
2450 
2451 job_control_entry&
2452 job_control_entry::operator=(const job_control_entry& other)
2453 {
2454 	state = other.state;
2455 	thread = other.thread;
2456 	signal = other.signal;
2457 	has_group_ref = false;
2458 	signaling_user = other.signaling_user;
2459 	team = other.team;
2460 	group_id = other.group_id;
2461 	status = other.status;
2462 	reason = other.reason;
2463 	user_time = other.user_time;
2464 	kernel_time = other.kernel_time;
2465 
2466 	return *this;
2467 }
2468 
2469 
2470 /*! This is the kernel backend for waitid().
2471 */
2472 static thread_id
2473 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2474 	team_usage_info& _usage_info)
2475 {
2476 	Thread* thread = thread_get_current_thread();
2477 	Team* team = thread->team;
2478 	struct job_control_entry foundEntry;
2479 	struct job_control_entry* freeDeathEntry = NULL;
2480 	status_t status = B_OK;
2481 
2482 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2483 		child, flags));
2484 
2485 	T(WaitForChild(child, flags));
2486 
2487 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2488 		T(WaitForChildDone(B_BAD_VALUE));
2489 		return B_BAD_VALUE;
2490 	}
2491 
2492 	pid_t originalChild = child;
2493 
2494 	bool ignoreFoundEntries = false;
2495 	bool ignoreFoundEntriesChecked = false;
2496 
2497 	while (true) {
2498 		// lock the team
2499 		TeamLocker teamLocker(team);
2500 
2501 		// A 0 child argument means to wait for all children in the process
2502 		// group of the calling team.
2503 		child = originalChild == 0 ? -team->group_id : originalChild;
2504 
2505 		// check whether any condition holds
2506 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2507 
2508 		// If we don't have an entry yet, check whether there are any children
2509 		// complying to the process group specification at all.
2510 		if (entry == NULL) {
2511 			// No success yet -- check whether there are any children complying
2512 			// to the process group specification at all.
2513 			bool childrenExist = false;
2514 			if (child == -1) {
2515 				childrenExist = team->children != NULL;
2516 			} else if (child < -1) {
2517 				childrenExist = has_children_in_group(team, -child);
2518 			} else if (child != team->id) {
2519 				if (Team* childTeam = Team::Get(child)) {
2520 					BReference<Team> childTeamReference(childTeam, true);
2521 					TeamLocker childTeamLocker(childTeam);
2522 					childrenExist = childTeam->parent == team;
2523 				}
2524 			}
2525 
2526 			if (!childrenExist) {
2527 				// there is no child we could wait for
2528 				status = ECHILD;
2529 			} else {
2530 				// the children we're waiting for are still running
2531 				status = B_WOULD_BLOCK;
2532 			}
2533 		} else {
2534 			// got something
2535 			foundEntry = *entry;
2536 
2537 			// unless WNOWAIT has been specified, "consume" the wait state
2538 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2539 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2540 					// The child is dead. Reap its death entry.
2541 					freeDeathEntry = entry;
2542 					team->dead_children.entries.Remove(entry);
2543 					team->dead_children.count--;
2544 				} else {
2545 					// The child is well. Reset its job control state.
2546 					team_set_job_control_state(entry->team,
2547 						JOB_CONTROL_STATE_NONE, NULL);
2548 				}
2549 			}
2550 		}
2551 
2552 		// If we haven't got anything yet, prepare for waiting for the
2553 		// condition variable.
2554 		ConditionVariableEntry deadWaitEntry;
2555 
2556 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2557 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2558 
2559 		teamLocker.Unlock();
2560 
2561 		// we got our entry and can return to our caller
2562 		if (status == B_OK) {
2563 			if (ignoreFoundEntries) {
2564 				// ... unless we shall ignore found entries
2565 				delete freeDeathEntry;
2566 				freeDeathEntry = NULL;
2567 				continue;
2568 			}
2569 
2570 			break;
2571 		}
2572 
2573 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2574 			T(WaitForChildDone(status));
2575 			return status;
2576 		}
2577 
2578 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2579 		if (status == B_INTERRUPTED) {
2580 			T(WaitForChildDone(status));
2581 			return status;
2582 		}
2583 
2584 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2585 		// all our children are dead and fail with ECHILD. We check the
2586 		// condition at this point.
2587 		if (!ignoreFoundEntriesChecked) {
2588 			teamLocker.Lock();
2589 
2590 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2591 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2592 				|| handler.sa_handler == SIG_IGN) {
2593 				ignoreFoundEntries = true;
2594 			}
2595 
2596 			teamLocker.Unlock();
2597 
2598 			ignoreFoundEntriesChecked = true;
2599 		}
2600 	}
2601 
2602 	delete freeDeathEntry;
2603 
2604 	// When we got here, we have a valid death entry, and already got
2605 	// unregistered from the team or group. Fill in the returned info.
2606 	memset(&_info, 0, sizeof(_info));
2607 	_info.si_signo = SIGCHLD;
2608 	_info.si_pid = foundEntry.thread;
2609 	_info.si_uid = foundEntry.signaling_user;
2610 	// TODO: Fill in si_errno?
2611 
2612 	switch (foundEntry.state) {
2613 		case JOB_CONTROL_STATE_DEAD:
2614 			_info.si_code = foundEntry.reason;
2615 			_info.si_status = foundEntry.reason == CLD_EXITED
2616 				? foundEntry.status : foundEntry.signal;
2617 			_usage_info.user_time = foundEntry.user_time;
2618 			_usage_info.kernel_time = foundEntry.kernel_time;
2619 			break;
2620 		case JOB_CONTROL_STATE_STOPPED:
2621 			_info.si_code = CLD_STOPPED;
2622 			_info.si_status = foundEntry.signal;
2623 			break;
2624 		case JOB_CONTROL_STATE_CONTINUED:
2625 			_info.si_code = CLD_CONTINUED;
2626 			_info.si_status = 0;
2627 			break;
2628 		case JOB_CONTROL_STATE_NONE:
2629 			// can't happen
2630 			break;
2631 	}
2632 
2633 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2634 	// status is available.
2635 	TeamLocker teamLocker(team);
2636 	InterruptsSpinLocker signalLocker(team->signal_lock);
2637 	SpinLocker threadCreationLocker(gThreadCreationLock);
2638 
2639 	if (is_team_signal_blocked(team, SIGCHLD)) {
2640 		if (get_job_control_entry(team, child, flags) == NULL)
2641 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2642 	}
2643 
2644 	threadCreationLocker.Unlock();
2645 	signalLocker.Unlock();
2646 	teamLocker.Unlock();
2647 
2648 	// When the team is dead, the main thread continues to live in the kernel
2649 	// team for a very short time. To avoid surprises for the caller we rather
2650 	// wait until the thread is really gone.
2651 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2652 		wait_for_thread(foundEntry.thread, NULL);
2653 
2654 	T(WaitForChildDone(foundEntry));
2655 
2656 	return foundEntry.thread;
2657 }
2658 
2659 
2660 /*! Fills the team_info structure with information from the specified team.
2661 	Interrupts must be enabled. The team must not be locked.
2662 */
2663 static status_t
2664 fill_team_info(Team* team, team_info* info, size_t size)
2665 {
2666 	if (size != sizeof(team_info))
2667 		return B_BAD_VALUE;
2668 
2669 	// TODO: Set more informations for team_info
2670 	memset(info, 0, size);
2671 
2672 	info->team = team->id;
2673 		// immutable
2674 	info->image_count = count_images(team);
2675 		// protected by sImageMutex
2676 
2677 	TeamLocker teamLocker(team);
2678 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2679 
2680 	info->thread_count = team->num_threads;
2681 	//info->area_count =
2682 	info->debugger_nub_thread = team->debug_info.nub_thread;
2683 	info->debugger_nub_port = team->debug_info.nub_port;
2684 	info->uid = team->effective_uid;
2685 	info->gid = team->effective_gid;
2686 
2687 	strlcpy(info->args, team->Args(), sizeof(info->args));
2688 	info->argc = 1;
2689 
2690 	return B_OK;
2691 }
2692 
2693 
2694 /*!	Returns whether the process group contains stopped processes.
2695 	The caller must hold the process group's lock.
2696 */
2697 static bool
2698 process_group_has_stopped_processes(ProcessGroup* group)
2699 {
2700 	Team* team = group->teams;
2701 	while (team != NULL) {
2702 		// the parent team's lock guards the job control entry -- acquire it
2703 		team->LockTeamAndParent(false);
2704 
2705 		if (team->job_control_entry != NULL
2706 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2707 			team->UnlockTeamAndParent();
2708 			return true;
2709 		}
2710 
2711 		team->UnlockTeamAndParent();
2712 
2713 		team = team->group_next;
2714 	}
2715 
2716 	return false;
2717 }
2718 
2719 
2720 /*!	Iterates through all process groups queued in team_remove_team() and signals
2721 	those that are orphaned and have stopped processes.
2722 	The caller must not hold any team or process group locks.
2723 */
2724 static void
2725 orphaned_process_group_check()
2726 {
2727 	// process as long as there are groups in the list
2728 	while (true) {
2729 		// remove the head from the list
2730 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2731 
2732 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2733 		if (group == NULL)
2734 			return;
2735 
2736 		group->UnsetOrphanedCheck();
2737 		BReference<ProcessGroup> groupReference(group);
2738 
2739 		orphanedCheckLocker.Unlock();
2740 
2741 		AutoLocker<ProcessGroup> groupLocker(group);
2742 
2743 		// If the group is orphaned and contains stopped processes, we're
2744 		// supposed to send SIGHUP + SIGCONT.
2745 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2746 			Thread* currentThread = thread_get_current_thread();
2747 
2748 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2749 			send_signal_to_process_group_locked(group, signal, 0);
2750 
2751 			signal.SetNumber(SIGCONT);
2752 			send_signal_to_process_group_locked(group, signal, 0);
2753 		}
2754 	}
2755 }
2756 
2757 
2758 static status_t
2759 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2760 	uint32 flags)
2761 {
2762 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2763 		return B_BAD_VALUE;
2764 
2765 	// get the team
2766 	Team* team = Team::GetAndLock(id);
2767 	if (team == NULL)
2768 		return B_BAD_TEAM_ID;
2769 	BReference<Team> teamReference(team, true);
2770 	TeamLocker teamLocker(team, true);
2771 
2772 	if ((flags & B_CHECK_PERMISSION) != 0) {
2773 		uid_t uid = geteuid();
2774 		if (uid != 0 && uid != team->effective_uid)
2775 			return B_NOT_ALLOWED;
2776 	}
2777 
2778 	bigtime_t kernelTime = 0;
2779 	bigtime_t userTime = 0;
2780 
2781 	switch (who) {
2782 		case B_TEAM_USAGE_SELF:
2783 		{
2784 			Thread* thread = team->thread_list;
2785 
2786 			for (; thread != NULL; thread = thread->team_next) {
2787 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2788 				kernelTime += thread->kernel_time;
2789 				userTime += thread->user_time;
2790 			}
2791 
2792 			kernelTime += team->dead_threads_kernel_time;
2793 			userTime += team->dead_threads_user_time;
2794 			break;
2795 		}
2796 
2797 		case B_TEAM_USAGE_CHILDREN:
2798 		{
2799 			Team* child = team->children;
2800 			for (; child != NULL; child = child->siblings_next) {
2801 				TeamLocker childLocker(child);
2802 
2803 				Thread* thread = team->thread_list;
2804 
2805 				for (; thread != NULL; thread = thread->team_next) {
2806 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2807 					kernelTime += thread->kernel_time;
2808 					userTime += thread->user_time;
2809 				}
2810 
2811 				kernelTime += child->dead_threads_kernel_time;
2812 				userTime += child->dead_threads_user_time;
2813 			}
2814 
2815 			kernelTime += team->dead_children.kernel_time;
2816 			userTime += team->dead_children.user_time;
2817 			break;
2818 		}
2819 	}
2820 
2821 	info->kernel_time = kernelTime;
2822 	info->user_time = userTime;
2823 
2824 	return B_OK;
2825 }
2826 
2827 
2828 //	#pragma mark - Private kernel API
2829 
2830 
2831 status_t
2832 team_init(kernel_args* args)
2833 {
2834 	// create the team hash table
2835 	new(&sTeamHash) TeamTable;
2836 	if (sTeamHash.Init(64) != B_OK)
2837 		panic("Failed to init team hash table!");
2838 
2839 	new(&sGroupHash) ProcessGroupHashTable;
2840 	if (sGroupHash.Init() != B_OK)
2841 		panic("Failed to init process group hash table!");
2842 
2843 	// create initial session and process groups
2844 
2845 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2846 	if (session == NULL)
2847 		panic("Could not create initial session.\n");
2848 	BReference<ProcessSession> sessionReference(session, true);
2849 
2850 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2851 	if (group == NULL)
2852 		panic("Could not create initial process group.\n");
2853 	BReference<ProcessGroup> groupReference(group, true);
2854 
2855 	group->Publish(session);
2856 
2857 	// create the kernel team
2858 	sKernelTeam = Team::Create(1, "kernel_team", true);
2859 	if (sKernelTeam == NULL)
2860 		panic("could not create kernel team!\n");
2861 
2862 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2863 	sKernelTeam->SetArgs(sKernelTeam->Name());
2864 	sKernelTeam->state = TEAM_STATE_NORMAL;
2865 
2866 	sKernelTeam->saved_set_uid = 0;
2867 	sKernelTeam->real_uid = 0;
2868 	sKernelTeam->effective_uid = 0;
2869 	sKernelTeam->saved_set_gid = 0;
2870 	sKernelTeam->real_gid = 0;
2871 	sKernelTeam->effective_gid = 0;
2872 	sKernelTeam->supplementary_groups = NULL;
2873 
2874 	insert_team_into_group(group, sKernelTeam);
2875 
2876 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2877 	if (sKernelTeam->io_context == NULL)
2878 		panic("could not create io_context for kernel team!\n");
2879 
2880 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2881 		dprintf("Failed to resize FD table for kernel team!\n");
2882 
2883 	// stick it in the team hash
2884 	sTeamHash.Insert(sKernelTeam);
2885 
2886 	// check safe mode settings
2887 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2888 		false);
2889 
2890 	add_debugger_command_etc("team", &dump_team_info,
2891 		"Dump info about a particular team",
2892 		"[ <id> | <address> | <name> ]\n"
2893 		"Prints information about the specified team. If no argument is given\n"
2894 		"the current team is selected.\n"
2895 		"  <id>       - The ID of the team.\n"
2896 		"  <address>  - The address of the team structure.\n"
2897 		"  <name>     - The team's name.\n", 0);
2898 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2899 		"\n"
2900 		"Prints a list of all existing teams.\n", 0);
2901 
2902 	new(&sNotificationService) TeamNotificationService();
2903 
2904 	sNotificationService.Register();
2905 
2906 	return B_OK;
2907 }
2908 
2909 
2910 int32
2911 team_max_teams(void)
2912 {
2913 	return sMaxTeams;
2914 }
2915 
2916 
2917 int32
2918 team_used_teams(void)
2919 {
2920 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2921 	return sUsedTeams;
2922 }
2923 
2924 
2925 /*! Returns a death entry of a child team specified by ID (if any).
2926 	The caller must hold the team's lock.
2927 
2928 	\param team The team whose dead children list to check.
2929 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2930 	\param _deleteEntry Return variable, indicating whether the caller needs to
2931 		delete the returned entry.
2932 	\return The death entry of the matching team, or \c NULL, if no death entry
2933 		for the team was found.
2934 */
2935 job_control_entry*
2936 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2937 {
2938 	if (child <= 0)
2939 		return NULL;
2940 
2941 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2942 		child);
2943 	if (entry) {
2944 		// remove the entry only, if the caller is the parent of the found team
2945 		if (team_get_current_team_id() == entry->thread) {
2946 			team->dead_children.entries.Remove(entry);
2947 			team->dead_children.count--;
2948 			*_deleteEntry = true;
2949 		} else {
2950 			*_deleteEntry = false;
2951 		}
2952 	}
2953 
2954 	return entry;
2955 }
2956 
2957 
2958 /*! Quick check to see if we have a valid team ID. */
2959 bool
2960 team_is_valid(team_id id)
2961 {
2962 	if (id <= 0)
2963 		return false;
2964 
2965 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2966 	return team_get_team_struct_locked(id) != NULL;
2967 }
2968 
2969 
2970 Team*
2971 team_get_team_struct_locked(team_id id)
2972 {
2973 	return sTeamHash.Lookup(id);
2974 }
2975 
2976 
2977 void
2978 team_set_controlling_tty(int32 ttyIndex)
2979 {
2980 	// lock the team, so its session won't change while we're playing with it
2981 	Team* team = thread_get_current_thread()->team;
2982 	TeamLocker teamLocker(team);
2983 
2984 	// get and lock the session
2985 	ProcessSession* session = team->group->Session();
2986 	AutoLocker<ProcessSession> sessionLocker(session);
2987 
2988 	// set the session's fields
2989 	session->controlling_tty = ttyIndex;
2990 	session->foreground_group = -1;
2991 }
2992 
2993 
2994 int32
2995 team_get_controlling_tty()
2996 {
2997 	// lock the team, so its session won't change while we're playing with it
2998 	Team* team = thread_get_current_thread()->team;
2999 	TeamLocker teamLocker(team);
3000 
3001 	// get and lock the session
3002 	ProcessSession* session = team->group->Session();
3003 	AutoLocker<ProcessSession> sessionLocker(session);
3004 
3005 	// get the session's field
3006 	return session->controlling_tty;
3007 }
3008 
3009 
3010 status_t
3011 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3012 {
3013 	// lock the team, so its session won't change while we're playing with it
3014 	Thread* thread = thread_get_current_thread();
3015 	Team* team = thread->team;
3016 	TeamLocker teamLocker(team);
3017 
3018 	// get and lock the session
3019 	ProcessSession* session = team->group->Session();
3020 	AutoLocker<ProcessSession> sessionLocker(session);
3021 
3022 	// check given TTY -- must be the controlling tty of the calling process
3023 	if (session->controlling_tty != ttyIndex)
3024 		return ENOTTY;
3025 
3026 	// check given process group -- must belong to our session
3027 	{
3028 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3029 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3030 		if (group == NULL || group->Session() != session)
3031 			return B_BAD_VALUE;
3032 	}
3033 
3034 	// If we are a background group, we can do that unharmed only when we
3035 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3036 	if (session->foreground_group != -1
3037 		&& session->foreground_group != team->group_id
3038 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3039 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3040 		InterruptsSpinLocker signalLocker(team->signal_lock);
3041 
3042 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3043 			pid_t groupID = team->group_id;
3044 
3045 			signalLocker.Unlock();
3046 			sessionLocker.Unlock();
3047 			teamLocker.Unlock();
3048 
3049 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3050 			send_signal_to_process_group(groupID, signal, 0);
3051 			return B_INTERRUPTED;
3052 		}
3053 	}
3054 
3055 	session->foreground_group = processGroupID;
3056 
3057 	return B_OK;
3058 }
3059 
3060 
3061 uid_t
3062 team_geteuid(team_id id)
3063 {
3064 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3065 	Team* team = team_get_team_struct_locked(id);
3066 	if (team == NULL)
3067 		return (uid_t)-1;
3068 	return team->effective_uid;
3069 }
3070 
3071 
3072 /*!	Removes the specified team from the global team hash, from its process
3073 	group, and from its parent.
3074 	It also moves all of its children to the kernel team.
3075 
3076 	The caller must hold the following locks:
3077 	- \a team's process group's lock,
3078 	- the kernel team's lock,
3079 	- \a team's parent team's lock (might be the kernel team), and
3080 	- \a team's lock.
3081 */
3082 void
3083 team_remove_team(Team* team, pid_t& _signalGroup)
3084 {
3085 	Team* parent = team->parent;
3086 
3087 	// remember how long this team lasted
3088 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3089 		+ team->dead_children.kernel_time;
3090 	parent->dead_children.user_time += team->dead_threads_user_time
3091 		+ team->dead_children.user_time;
3092 
3093 	// remove the team from the hash table
3094 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3095 	sTeamHash.Remove(team);
3096 	sUsedTeams--;
3097 	teamsLocker.Unlock();
3098 
3099 	// The team can no longer be accessed by ID. Navigation to it is still
3100 	// possible from its process group and its parent and children, but that
3101 	// will be rectified shortly.
3102 	team->state = TEAM_STATE_DEATH;
3103 
3104 	// If we're a controlling process (i.e. a session leader with controlling
3105 	// terminal), there's a bit of signalling we have to do. We can't do any of
3106 	// the signaling here due to the bunch of locks we're holding, but we need
3107 	// to determine, whom to signal.
3108 	_signalGroup = -1;
3109 	bool isSessionLeader = false;
3110 	if (team->session_id == team->id
3111 		&& team->group->Session()->controlling_tty >= 0) {
3112 		isSessionLeader = true;
3113 
3114 		ProcessSession* session = team->group->Session();
3115 
3116 		AutoLocker<ProcessSession> sessionLocker(session);
3117 
3118 		session->controlling_tty = -1;
3119 		_signalGroup = session->foreground_group;
3120 	}
3121 
3122 	// remove us from our process group
3123 	remove_team_from_group(team);
3124 
3125 	// move the team's children to the kernel team
3126 	while (Team* child = team->children) {
3127 		// remove the child from the current team and add it to the kernel team
3128 		TeamLocker childLocker(child);
3129 
3130 		remove_team_from_parent(team, child);
3131 		insert_team_into_parent(sKernelTeam, child);
3132 
3133 		// move job control entries too
3134 		sKernelTeam->stopped_children.entries.MoveFrom(
3135 			&team->stopped_children.entries);
3136 		sKernelTeam->continued_children.entries.MoveFrom(
3137 			&team->continued_children.entries);
3138 
3139 		// If the team was a session leader with controlling terminal,
3140 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3141 		// groups with stopped processes. Due to locking complications we can't
3142 		// do that here, so we only check whether we were a reason for the
3143 		// child's process group not being an orphan and, if so, schedule a
3144 		// later check (cf. orphaned_process_group_check()).
3145 		if (isSessionLeader) {
3146 			ProcessGroup* childGroup = child->group;
3147 			if (childGroup->Session()->id == team->session_id
3148 				&& childGroup->id != team->group_id) {
3149 				childGroup->ScheduleOrphanedCheck();
3150 			}
3151 		}
3152 
3153 		// Note, we don't move the dead children entries. Those will be deleted
3154 		// when the team structure is deleted.
3155 	}
3156 
3157 	// remove us from our parent
3158 	remove_team_from_parent(parent, team);
3159 }
3160 
3161 
3162 /*!	Kills all threads but the main thread of the team and shuts down user
3163 	debugging for it.
3164 	To be called on exit of the team's main thread. No locks must be held.
3165 
3166 	\param team The team in question.
3167 	\return The port of the debugger for the team, -1 if none. To be passed to
3168 		team_delete_team().
3169 */
3170 port_id
3171 team_shutdown_team(Team* team)
3172 {
3173 	ASSERT(thread_get_current_thread() == team->main_thread);
3174 
3175 	TeamLocker teamLocker(team);
3176 
3177 	// Make sure debugging changes won't happen anymore.
3178 	port_id debuggerPort = -1;
3179 	while (true) {
3180 		// If a debugger change is in progress for the team, we'll have to
3181 		// wait until it is done.
3182 		ConditionVariableEntry waitForDebuggerEntry;
3183 		bool waitForDebugger = false;
3184 
3185 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3186 
3187 		if (team->debug_info.debugger_changed_condition != NULL) {
3188 			team->debug_info.debugger_changed_condition->Add(
3189 				&waitForDebuggerEntry);
3190 			waitForDebugger = true;
3191 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3192 			// The team is being debugged. That will stop with the termination
3193 			// of the nub thread. Since we set the team state to death, no one
3194 			// can install a debugger anymore. We fetch the debugger's port to
3195 			// send it a message at the bitter end.
3196 			debuggerPort = team->debug_info.debugger_port;
3197 		}
3198 
3199 		debugInfoLocker.Unlock();
3200 
3201 		if (!waitForDebugger)
3202 			break;
3203 
3204 		// wait for the debugger change to be finished
3205 		teamLocker.Unlock();
3206 
3207 		waitForDebuggerEntry.Wait();
3208 
3209 		teamLocker.Lock();
3210 	}
3211 
3212 	// Mark the team as shutting down. That will prevent new threads from being
3213 	// created and debugger changes from taking place.
3214 	team->state = TEAM_STATE_SHUTDOWN;
3215 
3216 	// delete all timers
3217 	team->DeleteUserTimers(false);
3218 
3219 	// deactivate CPU time user timers for the team
3220 	InterruptsSpinLocker timeLocker(team->time_lock);
3221 
3222 	if (team->HasActiveCPUTimeUserTimers())
3223 		team->DeactivateCPUTimeUserTimers();
3224 
3225 	timeLocker.Unlock();
3226 
3227 	// kill all threads but the main thread
3228 	team_death_entry deathEntry;
3229 	deathEntry.condition.Init(team, "team death");
3230 
3231 	while (true) {
3232 		team->death_entry = &deathEntry;
3233 		deathEntry.remaining_threads = 0;
3234 
3235 		Thread* thread = team->thread_list;
3236 		while (thread != NULL) {
3237 			if (thread != team->main_thread) {
3238 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3239 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3240 				deathEntry.remaining_threads++;
3241 			}
3242 
3243 			thread = thread->team_next;
3244 		}
3245 
3246 		if (deathEntry.remaining_threads == 0)
3247 			break;
3248 
3249 		// there are threads to wait for
3250 		ConditionVariableEntry entry;
3251 		deathEntry.condition.Add(&entry);
3252 
3253 		teamLocker.Unlock();
3254 
3255 		entry.Wait();
3256 
3257 		teamLocker.Lock();
3258 	}
3259 
3260 	team->death_entry = NULL;
3261 
3262 	return debuggerPort;
3263 }
3264 
3265 
3266 /*!	Called on team exit to notify threads waiting on the team and free most
3267 	resources associated with it.
3268 	The caller shouldn't hold any locks.
3269 */
3270 void
3271 team_delete_team(Team* team, port_id debuggerPort)
3272 {
3273 	// Not quite in our job description, but work that has been left by
3274 	// team_remove_team() and that can be done now that we're not holding any
3275 	// locks.
3276 	orphaned_process_group_check();
3277 
3278 	team_id teamID = team->id;
3279 
3280 	ASSERT(team->num_threads == 0);
3281 
3282 	// If someone is waiting for this team to be loaded, but it dies
3283 	// unexpectedly before being done, we need to notify the waiting
3284 	// thread now.
3285 
3286 	TeamLocker teamLocker(team);
3287 
3288 	if (team->loading_info) {
3289 		// there's indeed someone waiting
3290 		struct team_loading_info* loadingInfo = team->loading_info;
3291 		team->loading_info = NULL;
3292 
3293 		loadingInfo->result = B_ERROR;
3294 
3295 		// wake up the waiting thread
3296 		loadingInfo->condition.NotifyAll();
3297 	}
3298 
3299 	// notify team watchers
3300 
3301 	{
3302 		// we're not reachable from anyone anymore at this point, so we
3303 		// can safely access the list without any locking
3304 		struct team_watcher* watcher;
3305 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3306 				&team->watcher_list)) != NULL) {
3307 			watcher->hook(teamID, watcher->data);
3308 			free(watcher);
3309 		}
3310 	}
3311 
3312 	teamLocker.Unlock();
3313 
3314 	sNotificationService.Notify(TEAM_REMOVED, team);
3315 
3316 	// free team resources
3317 
3318 	delete_realtime_sem_context(team->realtime_sem_context);
3319 	xsi_sem_undo(team);
3320 	remove_images(team);
3321 	team->address_space->RemoveAndPut();
3322 
3323 	team->ReleaseReference();
3324 
3325 	// notify the debugger, that the team is gone
3326 	user_debug_team_deleted(teamID, debuggerPort);
3327 }
3328 
3329 
3330 Team*
3331 team_get_kernel_team(void)
3332 {
3333 	return sKernelTeam;
3334 }
3335 
3336 
3337 team_id
3338 team_get_kernel_team_id(void)
3339 {
3340 	if (!sKernelTeam)
3341 		return 0;
3342 
3343 	return sKernelTeam->id;
3344 }
3345 
3346 
3347 team_id
3348 team_get_current_team_id(void)
3349 {
3350 	return thread_get_current_thread()->team->id;
3351 }
3352 
3353 
3354 status_t
3355 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3356 {
3357 	if (id == sKernelTeam->id) {
3358 		// we're the kernel team, so we don't have to go through all
3359 		// the hassle (locking and hash lookup)
3360 		*_addressSpace = VMAddressSpace::GetKernel();
3361 		return B_OK;
3362 	}
3363 
3364 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3365 
3366 	Team* team = team_get_team_struct_locked(id);
3367 	if (team == NULL)
3368 		return B_BAD_VALUE;
3369 
3370 	team->address_space->Get();
3371 	*_addressSpace = team->address_space;
3372 	return B_OK;
3373 }
3374 
3375 
3376 /*!	Sets the team's job control state.
3377 	The caller must hold the parent team's lock. Interrupts are allowed to be
3378 	enabled or disabled.
3379 	\a team The team whose job control state shall be set.
3380 	\a newState The new state to be set.
3381 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3382 		the caller is responsible for filling in the following fields of the
3383 		entry before releasing the parent team's lock, unless the new state is
3384 		\c JOB_CONTROL_STATE_NONE:
3385 		- \c signal: The number of the signal causing the state change.
3386 		- \c signaling_user: The real UID of the user sending the signal.
3387 */
3388 void
3389 team_set_job_control_state(Team* team, job_control_state newState,
3390 	Signal* signal)
3391 {
3392 	if (team == NULL || team->job_control_entry == NULL)
3393 		return;
3394 
3395 	// don't touch anything, if the state stays the same or the team is already
3396 	// dead
3397 	job_control_entry* entry = team->job_control_entry;
3398 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3399 		return;
3400 
3401 	T(SetJobControlState(team->id, newState, signal));
3402 
3403 	// remove from the old list
3404 	switch (entry->state) {
3405 		case JOB_CONTROL_STATE_NONE:
3406 			// entry is in no list ATM
3407 			break;
3408 		case JOB_CONTROL_STATE_DEAD:
3409 			// can't get here
3410 			break;
3411 		case JOB_CONTROL_STATE_STOPPED:
3412 			team->parent->stopped_children.entries.Remove(entry);
3413 			break;
3414 		case JOB_CONTROL_STATE_CONTINUED:
3415 			team->parent->continued_children.entries.Remove(entry);
3416 			break;
3417 	}
3418 
3419 	entry->state = newState;
3420 
3421 	if (signal != NULL) {
3422 		entry->signal = signal->Number();
3423 		entry->signaling_user = signal->SendingUser();
3424 	}
3425 
3426 	// add to new list
3427 	team_job_control_children* childList = NULL;
3428 	switch (entry->state) {
3429 		case JOB_CONTROL_STATE_NONE:
3430 			// entry doesn't get into any list
3431 			break;
3432 		case JOB_CONTROL_STATE_DEAD:
3433 			childList = &team->parent->dead_children;
3434 			team->parent->dead_children.count++;
3435 			break;
3436 		case JOB_CONTROL_STATE_STOPPED:
3437 			childList = &team->parent->stopped_children;
3438 			break;
3439 		case JOB_CONTROL_STATE_CONTINUED:
3440 			childList = &team->parent->continued_children;
3441 			break;
3442 	}
3443 
3444 	if (childList != NULL) {
3445 		childList->entries.Add(entry);
3446 		team->parent->dead_children.condition_variable.NotifyAll();
3447 	}
3448 }
3449 
3450 
3451 /*!	Inits the given team's exit information, if not yet initialized, to some
3452 	generic "killed" status.
3453 	The caller must not hold the team's lock. Interrupts must be enabled.
3454 
3455 	\param team The team whose exit info shall be initialized.
3456 */
3457 void
3458 team_init_exit_info_on_error(Team* team)
3459 {
3460 	TeamLocker teamLocker(team);
3461 
3462 	if (!team->exit.initialized) {
3463 		team->exit.reason = CLD_KILLED;
3464 		team->exit.signal = SIGKILL;
3465 		team->exit.signaling_user = geteuid();
3466 		team->exit.status = 0;
3467 		team->exit.initialized = true;
3468 	}
3469 }
3470 
3471 
3472 /*! Adds a hook to the team that is called as soon as this team goes away.
3473 	This call might get public in the future.
3474 */
3475 status_t
3476 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3477 {
3478 	if (hook == NULL || teamID < B_OK)
3479 		return B_BAD_VALUE;
3480 
3481 	// create the watcher object
3482 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3483 	if (watcher == NULL)
3484 		return B_NO_MEMORY;
3485 
3486 	watcher->hook = hook;
3487 	watcher->data = data;
3488 
3489 	// add watcher, if the team isn't already dying
3490 	// get the team
3491 	Team* team = Team::GetAndLock(teamID);
3492 	if (team == NULL) {
3493 		free(watcher);
3494 		return B_BAD_TEAM_ID;
3495 	}
3496 
3497 	list_add_item(&team->watcher_list, watcher);
3498 
3499 	team->UnlockAndReleaseReference();
3500 
3501 	return B_OK;
3502 }
3503 
3504 
3505 status_t
3506 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3507 {
3508 	if (hook == NULL || teamID < 0)
3509 		return B_BAD_VALUE;
3510 
3511 	// get team and remove watcher (if present)
3512 	Team* team = Team::GetAndLock(teamID);
3513 	if (team == NULL)
3514 		return B_BAD_TEAM_ID;
3515 
3516 	// search for watcher
3517 	team_watcher* watcher = NULL;
3518 	while ((watcher = (team_watcher*)list_get_next_item(
3519 			&team->watcher_list, watcher)) != NULL) {
3520 		if (watcher->hook == hook && watcher->data == data) {
3521 			// got it!
3522 			list_remove_item(&team->watcher_list, watcher);
3523 			break;
3524 		}
3525 	}
3526 
3527 	team->UnlockAndReleaseReference();
3528 
3529 	if (watcher == NULL)
3530 		return B_ENTRY_NOT_FOUND;
3531 
3532 	free(watcher);
3533 	return B_OK;
3534 }
3535 
3536 
3537 /*!	Allocates a user_thread structure from the team.
3538 	The team lock must be held, unless the function is called for the team's
3539 	main thread. Interrupts must be enabled.
3540 */
3541 struct user_thread*
3542 team_allocate_user_thread(Team* team)
3543 {
3544 	if (team->user_data == 0)
3545 		return NULL;
3546 
3547 	// take an entry from the free list, if any
3548 	if (struct free_user_thread* entry = team->free_user_threads) {
3549 		user_thread* thread = entry->thread;
3550 		team->free_user_threads = entry->next;
3551 		free(entry);
3552 		return thread;
3553 	}
3554 
3555 	while (true) {
3556 		// enough space left?
3557 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3558 		if (team->user_data_size - team->used_user_data < needed) {
3559 			// try to resize the area
3560 			if (resize_area(team->user_data_area,
3561 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3562 				return NULL;
3563 			}
3564 
3565 			// resized user area successfully -- try to allocate the user_thread
3566 			// again
3567 			team->user_data_size += B_PAGE_SIZE;
3568 			continue;
3569 		}
3570 
3571 		// allocate the user_thread
3572 		user_thread* thread
3573 			= (user_thread*)(team->user_data + team->used_user_data);
3574 		team->used_user_data += needed;
3575 
3576 		return thread;
3577 	}
3578 }
3579 
3580 
3581 /*!	Frees the given user_thread structure.
3582 	The team's lock must not be held. Interrupts must be enabled.
3583 	\param team The team the user thread was allocated from.
3584 	\param userThread The user thread to free.
3585 */
3586 void
3587 team_free_user_thread(Team* team, struct user_thread* userThread)
3588 {
3589 	if (userThread == NULL)
3590 		return;
3591 
3592 	// create a free list entry
3593 	free_user_thread* entry
3594 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3595 	if (entry == NULL) {
3596 		// we have to leak the user thread :-/
3597 		return;
3598 	}
3599 
3600 	// add to free list
3601 	TeamLocker teamLocker(team);
3602 
3603 	entry->thread = userThread;
3604 	entry->next = team->free_user_threads;
3605 	team->free_user_threads = entry;
3606 }
3607 
3608 
3609 //	#pragma mark - Associated data interface
3610 
3611 
3612 AssociatedData::AssociatedData()
3613 	:
3614 	fOwner(NULL)
3615 {
3616 }
3617 
3618 
3619 AssociatedData::~AssociatedData()
3620 {
3621 }
3622 
3623 
3624 void
3625 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3626 {
3627 }
3628 
3629 
3630 AssociatedDataOwner::AssociatedDataOwner()
3631 {
3632 	mutex_init(&fLock, "associated data owner");
3633 }
3634 
3635 
3636 AssociatedDataOwner::~AssociatedDataOwner()
3637 {
3638 	mutex_destroy(&fLock);
3639 }
3640 
3641 
3642 bool
3643 AssociatedDataOwner::AddData(AssociatedData* data)
3644 {
3645 	MutexLocker locker(fLock);
3646 
3647 	if (data->Owner() != NULL)
3648 		return false;
3649 
3650 	data->AcquireReference();
3651 	fList.Add(data);
3652 	data->SetOwner(this);
3653 
3654 	return true;
3655 }
3656 
3657 
3658 bool
3659 AssociatedDataOwner::RemoveData(AssociatedData* data)
3660 {
3661 	MutexLocker locker(fLock);
3662 
3663 	if (data->Owner() != this)
3664 		return false;
3665 
3666 	data->SetOwner(NULL);
3667 	fList.Remove(data);
3668 
3669 	locker.Unlock();
3670 
3671 	data->ReleaseReference();
3672 
3673 	return true;
3674 }
3675 
3676 
3677 void
3678 AssociatedDataOwner::PrepareForDeletion()
3679 {
3680 	MutexLocker locker(fLock);
3681 
3682 	// move all data to a temporary list and unset the owner
3683 	DataList list;
3684 	list.MoveFrom(&fList);
3685 
3686 	for (DataList::Iterator it = list.GetIterator();
3687 		AssociatedData* data = it.Next();) {
3688 		data->SetOwner(NULL);
3689 	}
3690 
3691 	locker.Unlock();
3692 
3693 	// call the notification hooks and release our references
3694 	while (AssociatedData* data = list.RemoveHead()) {
3695 		data->OwnerDeleted(this);
3696 		data->ReleaseReference();
3697 	}
3698 }
3699 
3700 
3701 /*!	Associates data with the current team.
3702 	When the team is deleted, the data object is notified.
3703 	The team acquires a reference to the object.
3704 
3705 	\param data The data object.
3706 	\return \c true on success, \c false otherwise. Fails only when the supplied
3707 		data object is already associated with another owner.
3708 */
3709 bool
3710 team_associate_data(AssociatedData* data)
3711 {
3712 	return thread_get_current_thread()->team->AddData(data);
3713 }
3714 
3715 
3716 /*!	Dissociates data from the current team.
3717 	Balances an earlier call to team_associate_data().
3718 
3719 	\param data The data object.
3720 	\return \c true on success, \c false otherwise. Fails only when the data
3721 		object is not associated with the current team.
3722 */
3723 bool
3724 team_dissociate_data(AssociatedData* data)
3725 {
3726 	return thread_get_current_thread()->team->RemoveData(data);
3727 }
3728 
3729 
3730 //	#pragma mark - Public kernel API
3731 
3732 
3733 thread_id
3734 load_image(int32 argCount, const char** args, const char** env)
3735 {
3736 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3737 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3738 }
3739 
3740 
3741 thread_id
3742 load_image_etc(int32 argCount, const char* const* args,
3743 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3744 {
3745 	// we need to flatten the args and environment
3746 
3747 	if (args == NULL)
3748 		return B_BAD_VALUE;
3749 
3750 	// determine total needed size
3751 	int32 argSize = 0;
3752 	for (int32 i = 0; i < argCount; i++)
3753 		argSize += strlen(args[i]) + 1;
3754 
3755 	int32 envCount = 0;
3756 	int32 envSize = 0;
3757 	while (env != NULL && env[envCount] != NULL)
3758 		envSize += strlen(env[envCount++]) + 1;
3759 
3760 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3761 	if (size > MAX_PROCESS_ARGS_SIZE)
3762 		return B_TOO_MANY_ARGS;
3763 
3764 	// allocate space
3765 	char** flatArgs = (char**)malloc(size);
3766 	if (flatArgs == NULL)
3767 		return B_NO_MEMORY;
3768 
3769 	char** slot = flatArgs;
3770 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3771 
3772 	// copy arguments and environment
3773 	for (int32 i = 0; i < argCount; i++) {
3774 		int32 argSize = strlen(args[i]) + 1;
3775 		memcpy(stringSpace, args[i], argSize);
3776 		*slot++ = stringSpace;
3777 		stringSpace += argSize;
3778 	}
3779 
3780 	*slot++ = NULL;
3781 
3782 	for (int32 i = 0; i < envCount; i++) {
3783 		int32 envSize = strlen(env[i]) + 1;
3784 		memcpy(stringSpace, env[i], envSize);
3785 		*slot++ = stringSpace;
3786 		stringSpace += envSize;
3787 	}
3788 
3789 	*slot++ = NULL;
3790 
3791 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3792 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3793 
3794 	free(flatArgs);
3795 		// load_image_internal() unset our variable if it took over ownership
3796 
3797 	return thread;
3798 }
3799 
3800 
3801 status_t
3802 wait_for_team(team_id id, status_t* _returnCode)
3803 {
3804 	// check whether the team exists
3805 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3806 
3807 	Team* team = team_get_team_struct_locked(id);
3808 	if (team == NULL)
3809 		return B_BAD_TEAM_ID;
3810 
3811 	id = team->id;
3812 
3813 	teamsLocker.Unlock();
3814 
3815 	// wait for the main thread (it has the same ID as the team)
3816 	return wait_for_thread(id, _returnCode);
3817 }
3818 
3819 
3820 status_t
3821 kill_team(team_id id)
3822 {
3823 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3824 
3825 	Team* team = team_get_team_struct_locked(id);
3826 	if (team == NULL)
3827 		return B_BAD_TEAM_ID;
3828 
3829 	id = team->id;
3830 
3831 	teamsLocker.Unlock();
3832 
3833 	if (team == sKernelTeam)
3834 		return B_NOT_ALLOWED;
3835 
3836 	// Just kill the team's main thread (it has same ID as the team). The
3837 	// cleanup code there will take care of the team.
3838 	return kill_thread(id);
3839 }
3840 
3841 
3842 status_t
3843 _get_team_info(team_id id, team_info* info, size_t size)
3844 {
3845 	// get the team
3846 	Team* team = Team::Get(id);
3847 	if (team == NULL)
3848 		return B_BAD_TEAM_ID;
3849 	BReference<Team> teamReference(team, true);
3850 
3851 	// fill in the info
3852 	return fill_team_info(team, info, size);
3853 }
3854 
3855 
3856 status_t
3857 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3858 {
3859 	int32 slot = *cookie;
3860 	if (slot < 1)
3861 		slot = 1;
3862 
3863 	InterruptsReadSpinLocker locker(sTeamHashLock);
3864 
3865 	team_id lastTeamID = peek_next_thread_id();
3866 		// TODO: This is broken, since the id can wrap around!
3867 
3868 	// get next valid team
3869 	Team* team = NULL;
3870 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3871 		slot++;
3872 
3873 	if (team == NULL)
3874 		return B_BAD_TEAM_ID;
3875 
3876 	// get a reference to the team and unlock
3877 	BReference<Team> teamReference(team);
3878 	locker.Unlock();
3879 
3880 	// fill in the info
3881 	*cookie = ++slot;
3882 	return fill_team_info(team, info, size);
3883 }
3884 
3885 
3886 status_t
3887 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3888 {
3889 	if (size != sizeof(team_usage_info))
3890 		return B_BAD_VALUE;
3891 
3892 	return common_get_team_usage_info(id, who, info, 0);
3893 }
3894 
3895 
3896 pid_t
3897 getpid(void)
3898 {
3899 	return thread_get_current_thread()->team->id;
3900 }
3901 
3902 
3903 pid_t
3904 getppid()
3905 {
3906 	return _getppid(0);
3907 }
3908 
3909 
3910 pid_t
3911 getpgid(pid_t id)
3912 {
3913 	if (id < 0) {
3914 		errno = EINVAL;
3915 		return -1;
3916 	}
3917 
3918 	if (id == 0) {
3919 		// get process group of the calling process
3920 		Team* team = thread_get_current_thread()->team;
3921 		TeamLocker teamLocker(team);
3922 		return team->group_id;
3923 	}
3924 
3925 	// get the team
3926 	Team* team = Team::GetAndLock(id);
3927 	if (team == NULL) {
3928 		errno = ESRCH;
3929 		return -1;
3930 	}
3931 
3932 	// get the team's process group ID
3933 	pid_t groupID = team->group_id;
3934 
3935 	team->UnlockAndReleaseReference();
3936 
3937 	return groupID;
3938 }
3939 
3940 
3941 pid_t
3942 getsid(pid_t id)
3943 {
3944 	if (id < 0) {
3945 		errno = EINVAL;
3946 		return -1;
3947 	}
3948 
3949 	if (id == 0) {
3950 		// get session of the calling process
3951 		Team* team = thread_get_current_thread()->team;
3952 		TeamLocker teamLocker(team);
3953 		return team->session_id;
3954 	}
3955 
3956 	// get the team
3957 	Team* team = Team::GetAndLock(id);
3958 	if (team == NULL) {
3959 		errno = ESRCH;
3960 		return -1;
3961 	}
3962 
3963 	// get the team's session ID
3964 	pid_t sessionID = team->session_id;
3965 
3966 	team->UnlockAndReleaseReference();
3967 
3968 	return sessionID;
3969 }
3970 
3971 
3972 //	#pragma mark - User syscalls
3973 
3974 
3975 status_t
3976 _user_exec(const char* userPath, const char* const* userFlatArgs,
3977 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3978 {
3979 	// NOTE: Since this function normally doesn't return, don't use automatic
3980 	// variables that need destruction in the function scope.
3981 	char path[B_PATH_NAME_LENGTH];
3982 
3983 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3984 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3985 		return B_BAD_ADDRESS;
3986 
3987 	// copy and relocate the flat arguments
3988 	char** flatArgs;
3989 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3990 		argCount, envCount, flatArgs);
3991 
3992 	if (error == B_OK) {
3993 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3994 			envCount, umask);
3995 			// this one only returns in case of error
3996 	}
3997 
3998 	free(flatArgs);
3999 	return error;
4000 }
4001 
4002 
4003 thread_id
4004 _user_fork(void)
4005 {
4006 	return fork_team();
4007 }
4008 
4009 
4010 pid_t
4011 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4012 	team_usage_info* usageInfo)
4013 {
4014 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4015 		return B_BAD_ADDRESS;
4016 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4017 		return B_BAD_ADDRESS;
4018 
4019 	siginfo_t info;
4020 	team_usage_info usage_info;
4021 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4022 	if (foundChild < 0)
4023 		return syscall_restart_handle_post(foundChild);
4024 
4025 	// copy info back to userland
4026 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4027 		return B_BAD_ADDRESS;
4028 	// copy usage_info back to userland
4029 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4030 		sizeof(usage_info)) != B_OK) {
4031 		return B_BAD_ADDRESS;
4032 	}
4033 
4034 	return foundChild;
4035 }
4036 
4037 
4038 pid_t
4039 _user_process_info(pid_t process, int32 which)
4040 {
4041 	pid_t result;
4042 	switch (which) {
4043 		case SESSION_ID:
4044 			result = getsid(process);
4045 			break;
4046 		case GROUP_ID:
4047 			result = getpgid(process);
4048 			break;
4049 		case PARENT_ID:
4050 			result = _getppid(process);
4051 			break;
4052 		default:
4053 			return B_BAD_VALUE;
4054 	}
4055 
4056 	return result >= 0 ? result : errno;
4057 }
4058 
4059 
4060 pid_t
4061 _user_setpgid(pid_t processID, pid_t groupID)
4062 {
4063 	// setpgid() can be called either by the parent of the target process or
4064 	// by the process itself to do one of two things:
4065 	// * Create a new process group with the target process' ID and the target
4066 	//   process as group leader.
4067 	// * Set the target process' process group to an already existing one in the
4068 	//   same session.
4069 
4070 	if (groupID < 0)
4071 		return B_BAD_VALUE;
4072 
4073 	Team* currentTeam = thread_get_current_thread()->team;
4074 	if (processID == 0)
4075 		processID = currentTeam->id;
4076 
4077 	// if the group ID is not specified, use the target process' ID
4078 	if (groupID == 0)
4079 		groupID = processID;
4080 
4081 	// We loop when running into the following race condition: We create a new
4082 	// process group, because there isn't one with that ID yet, but later when
4083 	// trying to publish it, we find that someone else created and published
4084 	// a group with that ID in the meantime. In that case we just restart the
4085 	// whole action.
4086 	while (true) {
4087 		// Look up the process group by ID. If it doesn't exist yet and we are
4088 		// allowed to create a new one, do that.
4089 		ProcessGroup* group = ProcessGroup::Get(groupID);
4090 		bool newGroup = false;
4091 		if (group == NULL) {
4092 			if (groupID != processID)
4093 				return B_NOT_ALLOWED;
4094 
4095 			group = new(std::nothrow) ProcessGroup(groupID);
4096 			if (group == NULL)
4097 				return B_NO_MEMORY;
4098 
4099 			newGroup = true;
4100 		}
4101 		BReference<ProcessGroup> groupReference(group, true);
4102 
4103 		// get the target team
4104 		Team* team = Team::Get(processID);
4105 		if (team == NULL)
4106 			return ESRCH;
4107 		BReference<Team> teamReference(team, true);
4108 
4109 		// lock the new process group and the team's current process group
4110 		while (true) {
4111 			// lock the team's current process group
4112 			team->LockProcessGroup();
4113 
4114 			ProcessGroup* oldGroup = team->group;
4115 			if (oldGroup == group) {
4116 				// it's the same as the target group, so just bail out
4117 				oldGroup->Unlock();
4118 				return group->id;
4119 			}
4120 
4121 			oldGroup->AcquireReference();
4122 
4123 			// lock the target process group, if locking order allows it
4124 			if (newGroup || group->id > oldGroup->id) {
4125 				group->Lock();
4126 				break;
4127 			}
4128 
4129 			// try to lock
4130 			if (group->TryLock())
4131 				break;
4132 
4133 			// no dice -- unlock the team's current process group and relock in
4134 			// the correct order
4135 			oldGroup->Unlock();
4136 
4137 			group->Lock();
4138 			oldGroup->Lock();
4139 
4140 			// check whether things are still the same
4141 			TeamLocker teamLocker(team);
4142 			if (team->group == oldGroup)
4143 				break;
4144 
4145 			// something changed -- unlock everything and retry
4146 			teamLocker.Unlock();
4147 			oldGroup->Unlock();
4148 			group->Unlock();
4149 			oldGroup->ReleaseReference();
4150 		}
4151 
4152 		// we now have references and locks of both new and old process group
4153 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4154 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4155 		AutoLocker<ProcessGroup> groupLocker(group, true);
4156 
4157 		// also lock the target team and its parent
4158 		team->LockTeamAndParent(false);
4159 		TeamLocker parentLocker(team->parent, true);
4160 		TeamLocker teamLocker(team, true);
4161 
4162 		// perform the checks
4163 		if (team == currentTeam) {
4164 			// we set our own group
4165 
4166 			// we must not change our process group ID if we're a session leader
4167 			if (is_session_leader(currentTeam))
4168 				return B_NOT_ALLOWED;
4169 		} else {
4170 			// Calling team != target team. The target team must be a child of
4171 			// the calling team and in the same session. (If that's the case it
4172 			// isn't a session leader either.)
4173 			if (team->parent != currentTeam
4174 				|| team->session_id != currentTeam->session_id) {
4175 				return B_NOT_ALLOWED;
4176 			}
4177 
4178 			// The call is also supposed to fail on a child, when the child has
4179 			// already executed exec*() [EACCES].
4180 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4181 				return EACCES;
4182 		}
4183 
4184 		// If we created a new process group, publish it now.
4185 		if (newGroup) {
4186 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4187 			if (sGroupHash.Lookup(groupID)) {
4188 				// A group with the group ID appeared since we first checked.
4189 				// Back to square one.
4190 				continue;
4191 			}
4192 
4193 			group->PublishLocked(team->group->Session());
4194 		} else if (group->Session()->id != team->session_id) {
4195 			// The existing target process group belongs to a different session.
4196 			// That's not allowed.
4197 			return B_NOT_ALLOWED;
4198 		}
4199 
4200 		// Everything is ready -- set the group.
4201 		remove_team_from_group(team);
4202 		insert_team_into_group(group, team);
4203 
4204 		// Changing the process group might have changed the situation for a
4205 		// parent waiting in wait_for_child(). Hence we notify it.
4206 		team->parent->dead_children.condition_variable.NotifyAll();
4207 
4208 		return group->id;
4209 	}
4210 }
4211 
4212 
4213 pid_t
4214 _user_setsid(void)
4215 {
4216 	Team* team = thread_get_current_thread()->team;
4217 
4218 	// create a new process group and session
4219 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4220 	if (group == NULL)
4221 		return B_NO_MEMORY;
4222 	BReference<ProcessGroup> groupReference(group, true);
4223 	AutoLocker<ProcessGroup> groupLocker(group);
4224 
4225 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4226 	if (session == NULL)
4227 		return B_NO_MEMORY;
4228 	BReference<ProcessSession> sessionReference(session, true);
4229 
4230 	// lock the team's current process group, parent, and the team itself
4231 	team->LockTeamParentAndProcessGroup();
4232 	BReference<ProcessGroup> oldGroupReference(team->group);
4233 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4234 	TeamLocker parentLocker(team->parent, true);
4235 	TeamLocker teamLocker(team, true);
4236 
4237 	// the team must not already be a process group leader
4238 	if (is_process_group_leader(team))
4239 		return B_NOT_ALLOWED;
4240 
4241 	// remove the team from the old and add it to the new process group
4242 	remove_team_from_group(team);
4243 	group->Publish(session);
4244 	insert_team_into_group(group, team);
4245 
4246 	// Changing the process group might have changed the situation for a
4247 	// parent waiting in wait_for_child(). Hence we notify it.
4248 	team->parent->dead_children.condition_variable.NotifyAll();
4249 
4250 	return group->id;
4251 }
4252 
4253 
4254 status_t
4255 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4256 {
4257 	status_t returnCode;
4258 	status_t status;
4259 
4260 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4261 		return B_BAD_ADDRESS;
4262 
4263 	status = wait_for_team(id, &returnCode);
4264 	if (status >= B_OK && _userReturnCode != NULL) {
4265 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4266 				!= B_OK)
4267 			return B_BAD_ADDRESS;
4268 		return B_OK;
4269 	}
4270 
4271 	return syscall_restart_handle_post(status);
4272 }
4273 
4274 
4275 thread_id
4276 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4277 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4278 	port_id errorPort, uint32 errorToken)
4279 {
4280 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4281 
4282 	if (argCount < 1)
4283 		return B_BAD_VALUE;
4284 
4285 	// copy and relocate the flat arguments
4286 	char** flatArgs;
4287 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4288 		argCount, envCount, flatArgs);
4289 	if (error != B_OK)
4290 		return error;
4291 
4292 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4293 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4294 		errorToken);
4295 
4296 	free(flatArgs);
4297 		// load_image_internal() unset our variable if it took over ownership
4298 
4299 	return thread;
4300 }
4301 
4302 
4303 void
4304 _user_exit_team(status_t returnValue)
4305 {
4306 	Thread* thread = thread_get_current_thread();
4307 	Team* team = thread->team;
4308 
4309 	// set this thread's exit status
4310 	thread->exit.status = returnValue;
4311 
4312 	// set the team exit status
4313 	TeamLocker teamLocker(team);
4314 
4315 	if (!team->exit.initialized) {
4316 		team->exit.reason = CLD_EXITED;
4317 		team->exit.signal = 0;
4318 		team->exit.signaling_user = 0;
4319 		team->exit.status = returnValue;
4320 		team->exit.initialized = true;
4321 	}
4322 
4323 	teamLocker.Unlock();
4324 
4325 	// Stop the thread, if the team is being debugged and that has been
4326 	// requested.
4327 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4328 		user_debug_stop_thread();
4329 
4330 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4331 	// userland. The signal handling code forwards the signal to the main
4332 	// thread (if that's not already this one), which will take the team down.
4333 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4334 	send_signal_to_thread(thread, signal, 0);
4335 }
4336 
4337 
4338 status_t
4339 _user_kill_team(team_id team)
4340 {
4341 	return kill_team(team);
4342 }
4343 
4344 
4345 status_t
4346 _user_get_team_info(team_id id, team_info* userInfo)
4347 {
4348 	status_t status;
4349 	team_info info;
4350 
4351 	if (!IS_USER_ADDRESS(userInfo))
4352 		return B_BAD_ADDRESS;
4353 
4354 	status = _get_team_info(id, &info, sizeof(team_info));
4355 	if (status == B_OK) {
4356 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4357 			return B_BAD_ADDRESS;
4358 	}
4359 
4360 	return status;
4361 }
4362 
4363 
4364 status_t
4365 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4366 {
4367 	status_t status;
4368 	team_info info;
4369 	int32 cookie;
4370 
4371 	if (!IS_USER_ADDRESS(userCookie)
4372 		|| !IS_USER_ADDRESS(userInfo)
4373 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4374 		return B_BAD_ADDRESS;
4375 
4376 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4377 	if (status != B_OK)
4378 		return status;
4379 
4380 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4381 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4382 		return B_BAD_ADDRESS;
4383 
4384 	return status;
4385 }
4386 
4387 
4388 team_id
4389 _user_get_current_team(void)
4390 {
4391 	return team_get_current_team_id();
4392 }
4393 
4394 
4395 status_t
4396 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4397 	size_t size)
4398 {
4399 	if (size != sizeof(team_usage_info))
4400 		return B_BAD_VALUE;
4401 
4402 	team_usage_info info;
4403 	status_t status = common_get_team_usage_info(team, who, &info,
4404 		B_CHECK_PERMISSION);
4405 
4406 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4407 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4408 		return B_BAD_ADDRESS;
4409 	}
4410 
4411 	return status;
4412 }
4413 
4414 
4415 status_t
4416 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4417 	size_t size, size_t* _sizeNeeded)
4418 {
4419 	// check parameters
4420 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4421 		|| (buffer == NULL && size > 0)
4422 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4423 		return B_BAD_ADDRESS;
4424 	}
4425 
4426 	KMessage info;
4427 
4428 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4429 		// allocate memory for a copy of the needed team data
4430 		struct ExtendedTeamData {
4431 			team_id	id;
4432 			pid_t	group_id;
4433 			pid_t	session_id;
4434 			uid_t	real_uid;
4435 			gid_t	real_gid;
4436 			uid_t	effective_uid;
4437 			gid_t	effective_gid;
4438 			char	name[B_OS_NAME_LENGTH];
4439 		} teamClone;
4440 
4441 		io_context* ioContext;
4442 		{
4443 			// get the team structure
4444 			Team* team = Team::GetAndLock(teamID);
4445 			if (team == NULL)
4446 				return B_BAD_TEAM_ID;
4447 			BReference<Team> teamReference(team, true);
4448 			TeamLocker teamLocker(team, true);
4449 
4450 			// copy the data
4451 			teamClone.id = team->id;
4452 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4453 			teamClone.group_id = team->group_id;
4454 			teamClone.session_id = team->session_id;
4455 			teamClone.real_uid = team->real_uid;
4456 			teamClone.real_gid = team->real_gid;
4457 			teamClone.effective_uid = team->effective_uid;
4458 			teamClone.effective_gid = team->effective_gid;
4459 
4460 			// also fetch a reference to the I/O context
4461 			ioContext = team->io_context;
4462 			vfs_get_io_context(ioContext);
4463 		}
4464 		CObjectDeleter<io_context, void, vfs_put_io_context>
4465 			ioContextPutter(ioContext);
4466 
4467 		// add the basic data to the info message
4468 		if (info.AddInt32("id", teamClone.id) != B_OK
4469 			|| info.AddString("name", teamClone.name) != B_OK
4470 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4471 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4472 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4473 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4474 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4475 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4476 			return B_NO_MEMORY;
4477 		}
4478 
4479 		// get the current working directory from the I/O context
4480 		dev_t cwdDevice;
4481 		ino_t cwdDirectory;
4482 		{
4483 			MutexLocker ioContextLocker(ioContext->io_mutex);
4484 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4485 		}
4486 
4487 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4488 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4489 			return B_NO_MEMORY;
4490 		}
4491 	}
4492 
4493 	// TODO: Support the other flags!
4494 
4495 	// copy the needed size and, if it fits, the message back to userland
4496 	size_t sizeNeeded = info.ContentSize();
4497 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4498 		return B_BAD_ADDRESS;
4499 
4500 	if (sizeNeeded > size)
4501 		return B_BUFFER_OVERFLOW;
4502 
4503 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4504 		return B_BAD_ADDRESS;
4505 
4506 	return B_OK;
4507 }
4508