xref: /haiku/src/system/kernel/team.cpp (revision 3d4afef9cba2f328e238089d4609d00d4b1524f3)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 
62 #include "TeamThreadTables.h"
63 
64 
65 //#define TRACE_TEAM
66 #ifdef TRACE_TEAM
67 #	define TRACE(x) dprintf x
68 #else
69 #	define TRACE(x) ;
70 #endif
71 
72 
73 struct team_key {
74 	team_id id;
75 };
76 
77 struct team_arg {
78 	char	*path;
79 	char	**flat_args;
80 	size_t	flat_args_size;
81 	uint32	arg_count;
82 	uint32	env_count;
83 	mode_t	umask;
84 	uint32	flags;
85 	port_id	error_port;
86 	uint32	error_token;
87 };
88 
89 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
90 
91 
92 namespace {
93 
94 
95 class TeamNotificationService : public DefaultNotificationService {
96 public:
97 							TeamNotificationService();
98 
99 			void			Notify(uint32 eventCode, Team* team);
100 };
101 
102 
103 // #pragma mark - TeamTable
104 
105 
106 typedef BKernel::TeamThreadTable<Team> TeamTable;
107 
108 
109 // #pragma mark - ProcessGroupHashDefinition
110 
111 
112 struct ProcessGroupHashDefinition {
113 	typedef pid_t			KeyType;
114 	typedef	ProcessGroup	ValueType;
115 
116 	size_t HashKey(pid_t key) const
117 	{
118 		return key;
119 	}
120 
121 	size_t Hash(ProcessGroup* value) const
122 	{
123 		return HashKey(value->id);
124 	}
125 
126 	bool Compare(pid_t key, ProcessGroup* value) const
127 	{
128 		return value->id == key;
129 	}
130 
131 	ProcessGroup*& GetLink(ProcessGroup* value) const
132 	{
133 		return value->next;
134 	}
135 };
136 
137 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
138 
139 
140 }	// unnamed namespace
141 
142 
143 // #pragma mark -
144 
145 
146 // the team_id -> Team hash table and the lock protecting it
147 static TeamTable sTeamHash;
148 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
149 
150 // the pid_t -> ProcessGroup hash table and the lock protecting it
151 static ProcessGroupHashTable sGroupHash;
152 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
153 
154 static Team* sKernelTeam = NULL;
155 static bool sDisableUserAddOns = false;
156 
157 // A list of process groups of children of dying session leaders that need to
158 // be signalled, if they have become orphaned and contain stopped processes.
159 static ProcessGroupList sOrphanedCheckProcessGroups;
160 static mutex sOrphanedCheckLock
161 	= MUTEX_INITIALIZER("orphaned process group check");
162 
163 // some arbitrarily chosen limits -- should probably depend on the available
164 // memory (the limit is not yet enforced)
165 static int32 sMaxTeams = 2048;
166 static int32 sUsedTeams = 1;
167 
168 static TeamNotificationService sNotificationService;
169 
170 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
171 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
172 
173 
174 // #pragma mark - TeamListIterator
175 
176 
177 TeamListIterator::TeamListIterator()
178 {
179 	// queue the entry
180 	InterruptsWriteSpinLocker locker(sTeamHashLock);
181 	sTeamHash.InsertIteratorEntry(&fEntry);
182 }
183 
184 
185 TeamListIterator::~TeamListIterator()
186 {
187 	// remove the entry
188 	InterruptsWriteSpinLocker locker(sTeamHashLock);
189 	sTeamHash.RemoveIteratorEntry(&fEntry);
190 }
191 
192 
193 Team*
194 TeamListIterator::Next()
195 {
196 	// get the next team -- if there is one, get reference for it
197 	InterruptsWriteSpinLocker locker(sTeamHashLock);
198 	Team* team = sTeamHash.NextElement(&fEntry);
199 	if (team != NULL)
200 		team->AcquireReference();
201 
202 	return team;
203 }
204 
205 
206 // #pragma mark - Tracing
207 
208 
209 #if TEAM_TRACING
210 namespace TeamTracing {
211 
212 class TeamForked : public AbstractTraceEntry {
213 public:
214 	TeamForked(thread_id forkedThread)
215 		:
216 		fForkedThread(forkedThread)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
224 	}
225 
226 private:
227 	thread_id			fForkedThread;
228 };
229 
230 
231 class ExecTeam : public AbstractTraceEntry {
232 public:
233 	ExecTeam(const char* path, int32 argCount, const char* const* args,
234 			int32 envCount, const char* const* env)
235 		:
236 		fArgCount(argCount),
237 		fArgs(NULL)
238 	{
239 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
240 			false);
241 
242 		// determine the buffer size we need for the args
243 		size_t argBufferSize = 0;
244 		for (int32 i = 0; i < argCount; i++)
245 			argBufferSize += strlen(args[i]) + 1;
246 
247 		// allocate a buffer
248 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
249 		if (fArgs) {
250 			char* buffer = fArgs;
251 			for (int32 i = 0; i < argCount; i++) {
252 				size_t argSize = strlen(args[i]) + 1;
253 				memcpy(buffer, args[i], argSize);
254 				buffer += argSize;
255 			}
256 		}
257 
258 		// ignore env for the time being
259 		(void)envCount;
260 		(void)env;
261 
262 		Initialized();
263 	}
264 
265 	virtual void AddDump(TraceOutput& out)
266 	{
267 		out.Print("team exec, \"%p\", args:", fPath);
268 
269 		if (fArgs != NULL) {
270 			char* args = fArgs;
271 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
272 				out.Print(" \"%s\"", args);
273 				args += strlen(args) + 1;
274 			}
275 		} else
276 			out.Print(" <too long>");
277 	}
278 
279 private:
280 	char*	fPath;
281 	int32	fArgCount;
282 	char*	fArgs;
283 };
284 
285 
286 static const char*
287 job_control_state_name(job_control_state state)
288 {
289 	switch (state) {
290 		case JOB_CONTROL_STATE_NONE:
291 			return "none";
292 		case JOB_CONTROL_STATE_STOPPED:
293 			return "stopped";
294 		case JOB_CONTROL_STATE_CONTINUED:
295 			return "continued";
296 		case JOB_CONTROL_STATE_DEAD:
297 			return "dead";
298 		default:
299 			return "invalid";
300 	}
301 }
302 
303 
304 class SetJobControlState : public AbstractTraceEntry {
305 public:
306 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
307 		:
308 		fTeam(team),
309 		fNewState(newState),
310 		fSignal(signal != NULL ? signal->Number() : 0)
311 	{
312 		Initialized();
313 	}
314 
315 	virtual void AddDump(TraceOutput& out)
316 	{
317 		out.Print("team set job control state, team %" B_PRId32 ", "
318 			"new state: %s, signal: %d",
319 			fTeam, job_control_state_name(fNewState), fSignal);
320 	}
321 
322 private:
323 	team_id				fTeam;
324 	job_control_state	fNewState;
325 	int					fSignal;
326 };
327 
328 
329 class WaitForChild : public AbstractTraceEntry {
330 public:
331 	WaitForChild(pid_t child, uint32 flags)
332 		:
333 		fChild(child),
334 		fFlags(flags)
335 	{
336 		Initialized();
337 	}
338 
339 	virtual void AddDump(TraceOutput& out)
340 	{
341 		out.Print("team wait for child, child: %" B_PRId32 ", "
342 			"flags: %#" B_PRIx32, fChild, fFlags);
343 	}
344 
345 private:
346 	pid_t	fChild;
347 	uint32	fFlags;
348 };
349 
350 
351 class WaitForChildDone : public AbstractTraceEntry {
352 public:
353 	WaitForChildDone(const job_control_entry& entry)
354 		:
355 		fState(entry.state),
356 		fTeam(entry.thread),
357 		fStatus(entry.status),
358 		fReason(entry.reason),
359 		fSignal(entry.signal)
360 	{
361 		Initialized();
362 	}
363 
364 	WaitForChildDone(status_t error)
365 		:
366 		fTeam(error)
367 	{
368 		Initialized();
369 	}
370 
371 	virtual void AddDump(TraceOutput& out)
372 	{
373 		if (fTeam >= 0) {
374 			out.Print("team wait for child done, team: %" B_PRId32 ", "
375 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
376 				fTeam, job_control_state_name(fState), fStatus, fReason,
377 				fSignal);
378 		} else {
379 			out.Print("team wait for child failed, error: "
380 				"%#" B_PRIx32 ", ", fTeam);
381 		}
382 	}
383 
384 private:
385 	job_control_state	fState;
386 	team_id				fTeam;
387 	status_t			fStatus;
388 	uint16				fReason;
389 	uint16				fSignal;
390 };
391 
392 }	// namespace TeamTracing
393 
394 #	define T(x) new(std::nothrow) TeamTracing::x;
395 #else
396 #	define T(x) ;
397 #endif
398 
399 
400 //	#pragma mark - TeamNotificationService
401 
402 
403 TeamNotificationService::TeamNotificationService()
404 	: DefaultNotificationService("teams")
405 {
406 }
407 
408 
409 void
410 TeamNotificationService::Notify(uint32 eventCode, Team* team)
411 {
412 	char eventBuffer[128];
413 	KMessage event;
414 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
415 	event.AddInt32("event", eventCode);
416 	event.AddInt32("team", team->id);
417 	event.AddPointer("teamStruct", team);
418 
419 	DefaultNotificationService::Notify(event, eventCode);
420 }
421 
422 
423 //	#pragma mark - Team
424 
425 
426 Team::Team(team_id id, bool kernel)
427 {
428 	// allocate an ID
429 	this->id = id;
430 	visible = true;
431 	serial_number = -1;
432 
433 	// init mutex
434 	if (kernel) {
435 		mutex_init(&fLock, "Team:kernel");
436 	} else {
437 		char lockName[16];
438 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
439 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
440 	}
441 
442 	hash_next = siblings_next = children = parent = NULL;
443 	fName[0] = '\0';
444 	fArgs[0] = '\0';
445 	num_threads = 0;
446 	io_context = NULL;
447 	address_space = NULL;
448 	realtime_sem_context = NULL;
449 	xsi_sem_context = NULL;
450 	thread_list = NULL;
451 	main_thread = NULL;
452 	loading_info = NULL;
453 	state = TEAM_STATE_BIRTH;
454 	flags = 0;
455 	death_entry = NULL;
456 	user_data_area = -1;
457 	user_data = 0;
458 	used_user_data = 0;
459 	user_data_size = 0;
460 	free_user_threads = NULL;
461 
462 	commpage_address = NULL;
463 
464 	supplementary_groups = NULL;
465 	supplementary_group_count = 0;
466 
467 	dead_threads_kernel_time = 0;
468 	dead_threads_user_time = 0;
469 	cpu_clock_offset = 0;
470 
471 	// dead threads
472 	list_init(&dead_threads);
473 
474 	// dead children
475 	dead_children.count = 0;
476 	dead_children.kernel_time = 0;
477 	dead_children.user_time = 0;
478 
479 	// job control entry
480 	job_control_entry = new(nothrow) ::job_control_entry;
481 	if (job_control_entry != NULL) {
482 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
483 		job_control_entry->thread = id;
484 		job_control_entry->team = this;
485 	}
486 
487 	// exit status -- setting initialized to false suffices
488 	exit.initialized = false;
489 
490 	list_init(&sem_list);
491 	list_init_etc(&port_list, port_team_link_offset());
492 	list_init(&image_list);
493 	list_init(&watcher_list);
494 
495 	clear_team_debug_info(&debug_info, true);
496 
497 	// init dead/stopped/continued children condition vars
498 	dead_children.condition_variable.Init(&dead_children, "team children");
499 
500 	B_INITIALIZE_SPINLOCK(&time_lock);
501 	B_INITIALIZE_SPINLOCK(&signal_lock);
502 
503 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
504 		kernel ? -1 : MAX_QUEUED_SIGNALS);
505 	memset(fSignalActions, 0, sizeof(fSignalActions));
506 
507 	fUserDefinedTimerCount = 0;
508 
509 	fCoreDumpCondition = NULL;
510 }
511 
512 
513 Team::~Team()
514 {
515 	// get rid of all associated data
516 	PrepareForDeletion();
517 
518 	if (io_context != NULL)
519 		vfs_put_io_context(io_context);
520 	delete_owned_ports(this);
521 	sem_delete_owned_sems(this);
522 
523 	DeleteUserTimers(false);
524 
525 	fPendingSignals.Clear();
526 
527 	if (fQueuedSignalsCounter != NULL)
528 		fQueuedSignalsCounter->ReleaseReference();
529 
530 	while (thread_death_entry* threadDeathEntry
531 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
532 		free(threadDeathEntry);
533 	}
534 
535 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
536 		delete entry;
537 
538 	while (free_user_thread* entry = free_user_threads) {
539 		free_user_threads = entry->next;
540 		free(entry);
541 	}
542 
543 	malloc_referenced_release(supplementary_groups);
544 
545 	delete job_control_entry;
546 		// usually already NULL and transferred to the parent
547 
548 	mutex_destroy(&fLock);
549 }
550 
551 
552 /*static*/ Team*
553 Team::Create(team_id id, const char* name, bool kernel)
554 {
555 	// create the team object
556 	Team* team = new(std::nothrow) Team(id, kernel);
557 	if (team == NULL)
558 		return NULL;
559 	ObjectDeleter<Team> teamDeleter(team);
560 
561 	if (name != NULL)
562 		team->SetName(name);
563 
564 	// check initialization
565 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
566 		return NULL;
567 
568 	// finish initialization (arch specifics)
569 	if (arch_team_init_team_struct(team, kernel) != B_OK)
570 		return NULL;
571 
572 	if (!kernel) {
573 		status_t error = user_timer_create_team_timers(team);
574 		if (error != B_OK)
575 			return NULL;
576 	}
577 
578 	// everything went fine
579 	return teamDeleter.Detach();
580 }
581 
582 
583 /*!	\brief Returns the team with the given ID.
584 	Returns a reference to the team.
585 	Team and thread spinlock must not be held.
586 */
587 /*static*/ Team*
588 Team::Get(team_id id)
589 {
590 	if (id == B_CURRENT_TEAM) {
591 		Team* team = thread_get_current_thread()->team;
592 		team->AcquireReference();
593 		return team;
594 	}
595 
596 	InterruptsReadSpinLocker locker(sTeamHashLock);
597 	Team* team = sTeamHash.Lookup(id);
598 	if (team != NULL)
599 		team->AcquireReference();
600 	return team;
601 }
602 
603 
604 /*!	\brief Returns the team with the given ID in a locked state.
605 	Returns a reference to the team.
606 	Team and thread spinlock must not be held.
607 */
608 /*static*/ Team*
609 Team::GetAndLock(team_id id)
610 {
611 	// get the team
612 	Team* team = Get(id);
613 	if (team == NULL)
614 		return NULL;
615 
616 	// lock it
617 	team->Lock();
618 
619 	// only return the team, when it isn't already dying
620 	if (team->state >= TEAM_STATE_SHUTDOWN) {
621 		team->Unlock();
622 		team->ReleaseReference();
623 		return NULL;
624 	}
625 
626 	return team;
627 }
628 
629 
630 /*!	Locks the team and its parent team (if any).
631 	The caller must hold a reference to the team or otherwise make sure that
632 	it won't be deleted.
633 	If the team doesn't have a parent, only the team itself is locked. If the
634 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
635 	only the team itself is locked.
636 
637 	\param dontLockParentIfKernel If \c true, the team's parent team is only
638 		locked, if it is not the kernel team.
639 */
640 void
641 Team::LockTeamAndParent(bool dontLockParentIfKernel)
642 {
643 	// The locking order is parent -> child. Since the parent can change as long
644 	// as we don't lock the team, we need to do a trial and error loop.
645 	Lock();
646 
647 	while (true) {
648 		// If the team doesn't have a parent, we're done. Otherwise try to lock
649 		// the parent.This will succeed in most cases, simplifying things.
650 		Team* parent = this->parent;
651 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
652 			|| parent->TryLock()) {
653 			return;
654 		}
655 
656 		// get a temporary reference to the parent, unlock this team, lock the
657 		// parent, and re-lock this team
658 		BReference<Team> parentReference(parent);
659 
660 		Unlock();
661 		parent->Lock();
662 		Lock();
663 
664 		// If the parent hasn't changed in the meantime, we're done.
665 		if (this->parent == parent)
666 			return;
667 
668 		// The parent has changed -- unlock and retry.
669 		parent->Unlock();
670 	}
671 }
672 
673 
674 /*!	Unlocks the team and its parent team (if any).
675 */
676 void
677 Team::UnlockTeamAndParent()
678 {
679 	if (parent != NULL)
680 		parent->Unlock();
681 
682 	Unlock();
683 }
684 
685 
686 /*!	Locks the team, its parent team (if any), and the team's process group.
687 	The caller must hold a reference to the team or otherwise make sure that
688 	it won't be deleted.
689 	If the team doesn't have a parent, only the team itself is locked.
690 */
691 void
692 Team::LockTeamParentAndProcessGroup()
693 {
694 	LockTeamAndProcessGroup();
695 
696 	// We hold the group's and the team's lock, but not the parent team's lock.
697 	// If we have a parent, try to lock it.
698 	if (this->parent == NULL || this->parent->TryLock())
699 		return;
700 
701 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
702 	// the job.
703 	Unlock();
704 	LockTeamAndParent(false);
705 }
706 
707 
708 /*!	Unlocks the team, its parent team (if any), and the team's process group.
709 */
710 void
711 Team::UnlockTeamParentAndProcessGroup()
712 {
713 	group->Unlock();
714 
715 	if (parent != NULL)
716 		parent->Unlock();
717 
718 	Unlock();
719 }
720 
721 
722 void
723 Team::LockTeamAndProcessGroup()
724 {
725 	// The locking order is process group -> child. Since the process group can
726 	// change as long as we don't lock the team, we need to do a trial and error
727 	// loop.
728 	Lock();
729 
730 	while (true) {
731 		// Try to lock the group. This will succeed in most cases, simplifying
732 		// things.
733 		ProcessGroup* group = this->group;
734 		if (group->TryLock())
735 			return;
736 
737 		// get a temporary reference to the group, unlock this team, lock the
738 		// group, and re-lock this team
739 		BReference<ProcessGroup> groupReference(group);
740 
741 		Unlock();
742 		group->Lock();
743 		Lock();
744 
745 		// If the group hasn't changed in the meantime, we're done.
746 		if (this->group == group)
747 			return;
748 
749 		// The group has changed -- unlock and retry.
750 		group->Unlock();
751 	}
752 }
753 
754 
755 void
756 Team::UnlockTeamAndProcessGroup()
757 {
758 	group->Unlock();
759 	Unlock();
760 }
761 
762 
763 void
764 Team::SetName(const char* name)
765 {
766 	if (const char* lastSlash = strrchr(name, '/'))
767 		name = lastSlash + 1;
768 
769 	strlcpy(fName, name, B_OS_NAME_LENGTH);
770 }
771 
772 
773 void
774 Team::SetArgs(const char* args)
775 {
776 	strlcpy(fArgs, args, sizeof(fArgs));
777 }
778 
779 
780 void
781 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
782 {
783 	fArgs[0] = '\0';
784 	strlcpy(fArgs, path, sizeof(fArgs));
785 	for (int i = 0; i < otherArgCount; i++) {
786 		strlcat(fArgs, " ", sizeof(fArgs));
787 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
788 	}
789 }
790 
791 
792 void
793 Team::ResetSignalsOnExec()
794 {
795 	// We are supposed to keep pending signals. Signal actions shall be reset
796 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
797 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
798 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
799 	// flags, but since there aren't any handlers, they make little sense, so
800 	// we clear them.
801 
802 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
803 		struct sigaction& action = SignalActionFor(i);
804 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
805 			action.sa_handler = SIG_DFL;
806 
807 		action.sa_mask = 0;
808 		action.sa_flags = 0;
809 		action.sa_userdata = NULL;
810 	}
811 }
812 
813 
814 void
815 Team::InheritSignalActions(Team* parent)
816 {
817 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
818 }
819 
820 
821 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
822 	ID.
823 
824 	The caller must hold the team's lock.
825 
826 	\param timer The timer to be added. If it doesn't have an ID yet, it is
827 		considered user-defined and will be assigned an ID.
828 	\return \c B_OK, if the timer was added successfully, another error code
829 		otherwise.
830 */
831 status_t
832 Team::AddUserTimer(UserTimer* timer)
833 {
834 	// don't allow addition of timers when already shutting the team down
835 	if (state >= TEAM_STATE_SHUTDOWN)
836 		return B_BAD_TEAM_ID;
837 
838 	// If the timer is user-defined, check timer limit and increment
839 	// user-defined count.
840 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
841 		return EAGAIN;
842 
843 	fUserTimers.AddTimer(timer);
844 
845 	return B_OK;
846 }
847 
848 
849 /*!	Removes the given user timer from the team.
850 
851 	The caller must hold the team's lock.
852 
853 	\param timer The timer to be removed.
854 
855 */
856 void
857 Team::RemoveUserTimer(UserTimer* timer)
858 {
859 	fUserTimers.RemoveTimer(timer);
860 
861 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
862 		UserDefinedTimersRemoved(1);
863 }
864 
865 
866 /*!	Deletes all (or all user-defined) user timers of the team.
867 
868 	Timer's belonging to the team's threads are not affected.
869 	The caller must hold the team's lock.
870 
871 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
872 		otherwise all timers are deleted.
873 */
874 void
875 Team::DeleteUserTimers(bool userDefinedOnly)
876 {
877 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
878 	UserDefinedTimersRemoved(count);
879 }
880 
881 
882 /*!	If not at the limit yet, increments the team's user-defined timer count.
883 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
884 */
885 bool
886 Team::CheckAddUserDefinedTimer()
887 {
888 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
889 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
890 		atomic_add(&fUserDefinedTimerCount, -1);
891 		return false;
892 	}
893 
894 	return true;
895 }
896 
897 
898 /*!	Subtracts the given count for the team's user-defined timer count.
899 	\param count The count to subtract.
900 */
901 void
902 Team::UserDefinedTimersRemoved(int32 count)
903 {
904 	atomic_add(&fUserDefinedTimerCount, -count);
905 }
906 
907 
908 void
909 Team::DeactivateCPUTimeUserTimers()
910 {
911 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
912 		timer->Deactivate();
913 
914 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
915 		timer->Deactivate();
916 }
917 
918 
919 /*!	Returns the team's current total CPU time (kernel + user + offset).
920 
921 	The caller must hold \c time_lock.
922 
923 	\param ignoreCurrentRun If \c true and the current thread is one team's
924 		threads, don't add the time since the last time \c last_time was
925 		updated. Should be used in "thread unscheduled" scheduler callbacks,
926 		since although the thread is still running at that time, its time has
927 		already been stopped.
928 	\return The team's current total CPU time.
929 */
930 bigtime_t
931 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
932 {
933 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
934 		+ dead_threads_user_time;
935 
936 	Thread* currentThread = thread_get_current_thread();
937 	bigtime_t now = system_time();
938 
939 	for (Thread* thread = thread_list; thread != NULL;
940 			thread = thread->team_next) {
941 		bool alreadyLocked = thread == lockedThread;
942 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
943 		time += thread->kernel_time + thread->user_time;
944 
945 		if (thread->last_time != 0) {
946 			if (!ignoreCurrentRun || thread != currentThread)
947 				time += now - thread->last_time;
948 		}
949 
950 		if (alreadyLocked)
951 			threadTimeLocker.Detach();
952 	}
953 
954 	return time;
955 }
956 
957 
958 /*!	Returns the team's current user CPU time.
959 
960 	The caller must hold \c time_lock.
961 
962 	\return The team's current user CPU time.
963 */
964 bigtime_t
965 Team::UserCPUTime() const
966 {
967 	bigtime_t time = dead_threads_user_time;
968 
969 	bigtime_t now = system_time();
970 
971 	for (Thread* thread = thread_list; thread != NULL;
972 			thread = thread->team_next) {
973 		SpinLocker threadTimeLocker(thread->time_lock);
974 		time += thread->user_time;
975 
976 		if (thread->last_time != 0 && !thread->in_kernel)
977 			time += now - thread->last_time;
978 	}
979 
980 	return time;
981 }
982 
983 
984 //	#pragma mark - ProcessGroup
985 
986 
987 ProcessGroup::ProcessGroup(pid_t id)
988 	:
989 	id(id),
990 	teams(NULL),
991 	fSession(NULL),
992 	fInOrphanedCheckList(false)
993 {
994 	char lockName[32];
995 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
996 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
997 }
998 
999 
1000 ProcessGroup::~ProcessGroup()
1001 {
1002 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1003 
1004 	// If the group is in the orphaned check list, remove it.
1005 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1006 
1007 	if (fInOrphanedCheckList)
1008 		sOrphanedCheckProcessGroups.Remove(this);
1009 
1010 	orphanedCheckLocker.Unlock();
1011 
1012 	// remove group from the hash table and from the session
1013 	if (fSession != NULL) {
1014 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1015 		sGroupHash.RemoveUnchecked(this);
1016 		groupHashLocker.Unlock();
1017 
1018 		fSession->ReleaseReference();
1019 	}
1020 
1021 	mutex_destroy(&fLock);
1022 }
1023 
1024 
1025 /*static*/ ProcessGroup*
1026 ProcessGroup::Get(pid_t id)
1027 {
1028 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1029 	ProcessGroup* group = sGroupHash.Lookup(id);
1030 	if (group != NULL)
1031 		group->AcquireReference();
1032 	return group;
1033 }
1034 
1035 
1036 /*!	Adds the group the given session and makes it publicly accessible.
1037 	The caller must not hold the process group hash lock.
1038 */
1039 void
1040 ProcessGroup::Publish(ProcessSession* session)
1041 {
1042 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1043 	PublishLocked(session);
1044 }
1045 
1046 
1047 /*!	Adds the group to the given session and makes it publicly accessible.
1048 	The caller must hold the process group hash lock.
1049 */
1050 void
1051 ProcessGroup::PublishLocked(ProcessSession* session)
1052 {
1053 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1054 
1055 	fSession = session;
1056 	fSession->AcquireReference();
1057 
1058 	sGroupHash.InsertUnchecked(this);
1059 }
1060 
1061 
1062 /*!	Checks whether the process group is orphaned.
1063 	The caller must hold the group's lock.
1064 	\return \c true, if the group is orphaned, \c false otherwise.
1065 */
1066 bool
1067 ProcessGroup::IsOrphaned() const
1068 {
1069 	// Orphaned Process Group: "A process group in which the parent of every
1070 	// member is either itself a member of the group or is not a member of the
1071 	// group's session." (Open Group Base Specs Issue 7)
1072 	bool orphaned = true;
1073 
1074 	Team* team = teams;
1075 	while (orphaned && team != NULL) {
1076 		team->LockTeamAndParent(false);
1077 
1078 		Team* parent = team->parent;
1079 		if (parent != NULL && parent->group_id != id
1080 			&& parent->session_id == fSession->id) {
1081 			orphaned = false;
1082 		}
1083 
1084 		team->UnlockTeamAndParent();
1085 
1086 		team = team->group_next;
1087 	}
1088 
1089 	return orphaned;
1090 }
1091 
1092 
1093 void
1094 ProcessGroup::ScheduleOrphanedCheck()
1095 {
1096 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1097 
1098 	if (!fInOrphanedCheckList) {
1099 		sOrphanedCheckProcessGroups.Add(this);
1100 		fInOrphanedCheckList = true;
1101 	}
1102 }
1103 
1104 
1105 void
1106 ProcessGroup::UnsetOrphanedCheck()
1107 {
1108 	fInOrphanedCheckList = false;
1109 }
1110 
1111 
1112 //	#pragma mark - ProcessSession
1113 
1114 
1115 ProcessSession::ProcessSession(pid_t id)
1116 	:
1117 	id(id),
1118 	controlling_tty(-1),
1119 	foreground_group(-1)
1120 {
1121 	char lockName[32];
1122 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1123 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1124 }
1125 
1126 
1127 ProcessSession::~ProcessSession()
1128 {
1129 	mutex_destroy(&fLock);
1130 }
1131 
1132 
1133 //	#pragma mark - KDL functions
1134 
1135 
1136 static void
1137 _dump_team_info(Team* team)
1138 {
1139 	kprintf("TEAM: %p\n", team);
1140 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1141 		team->id);
1142 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1143 	kprintf("name:             '%s'\n", team->Name());
1144 	kprintf("args:             '%s'\n", team->Args());
1145 	kprintf("hash_next:        %p\n", team->hash_next);
1146 	kprintf("parent:           %p", team->parent);
1147 	if (team->parent != NULL) {
1148 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1149 	} else
1150 		kprintf("\n");
1151 
1152 	kprintf("children:         %p\n", team->children);
1153 	kprintf("num_threads:      %d\n", team->num_threads);
1154 	kprintf("state:            %d\n", team->state);
1155 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1156 	kprintf("io_context:       %p\n", team->io_context);
1157 	if (team->address_space)
1158 		kprintf("address_space:    %p\n", team->address_space);
1159 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1160 		(void*)team->user_data, team->user_data_area);
1161 	kprintf("free user thread: %p\n", team->free_user_threads);
1162 	kprintf("main_thread:      %p\n", team->main_thread);
1163 	kprintf("thread_list:      %p\n", team->thread_list);
1164 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1165 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1166 }
1167 
1168 
1169 static int
1170 dump_team_info(int argc, char** argv)
1171 {
1172 	ulong arg;
1173 	bool found = false;
1174 
1175 	if (argc < 2) {
1176 		Thread* thread = thread_get_current_thread();
1177 		if (thread != NULL && thread->team != NULL)
1178 			_dump_team_info(thread->team);
1179 		else
1180 			kprintf("No current team!\n");
1181 		return 0;
1182 	}
1183 
1184 	arg = strtoul(argv[1], NULL, 0);
1185 	if (IS_KERNEL_ADDRESS(arg)) {
1186 		// semi-hack
1187 		_dump_team_info((Team*)arg);
1188 		return 0;
1189 	}
1190 
1191 	// walk through the thread list, trying to match name or id
1192 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1193 		Team* team = it.Next();) {
1194 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1195 			|| team->id == (team_id)arg) {
1196 			_dump_team_info(team);
1197 			found = true;
1198 			break;
1199 		}
1200 	}
1201 
1202 	if (!found)
1203 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1204 	return 0;
1205 }
1206 
1207 
1208 static int
1209 dump_teams(int argc, char** argv)
1210 {
1211 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1212 		B_PRINTF_POINTER_WIDTH, "parent");
1213 
1214 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1215 		Team* team = it.Next();) {
1216 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 
1223 //	#pragma mark - Private functions
1224 
1225 
1226 /*! Get the parent of a given process.
1227 
1228 	Used in the implementation of getppid (where a process can get its own
1229 	parent, only) as well as in user_process_info where the information is
1230 	available to anyone (allowing to display a tree of running processes)
1231 */
1232 static pid_t
1233 _getppid(pid_t id)
1234 {
1235 	if (id < 0) {
1236 		errno = EINVAL;
1237 		return -1;
1238 	}
1239 
1240 	if (id == 0) {
1241 		Team* team = thread_get_current_thread()->team;
1242 		TeamLocker teamLocker(team);
1243 		if (team->parent == NULL) {
1244 			errno = EINVAL;
1245 			return -1;
1246 		}
1247 		return team->parent->id;
1248 	}
1249 
1250 	Team* team = Team::GetAndLock(id);
1251 	if (team == NULL) {
1252 		errno = ESRCH;
1253 		return -1;
1254 	}
1255 
1256 	pid_t parentID;
1257 
1258 	if (team->parent == NULL) {
1259 		errno = EINVAL;
1260 		parentID = -1;
1261 	} else
1262 		parentID = team->parent->id;
1263 
1264 	team->UnlockAndReleaseReference();
1265 
1266 	return parentID;
1267 }
1268 
1269 
1270 /*!	Inserts team \a team into the child list of team \a parent.
1271 
1272 	The caller must hold the lock of both \a parent and \a team.
1273 
1274 	\param parent The parent team.
1275 	\param team The team to be inserted into \a parent's child list.
1276 */
1277 static void
1278 insert_team_into_parent(Team* parent, Team* team)
1279 {
1280 	ASSERT(parent != NULL);
1281 
1282 	team->siblings_next = parent->children;
1283 	parent->children = team;
1284 	team->parent = parent;
1285 }
1286 
1287 
1288 /*!	Removes team \a team from the child list of team \a parent.
1289 
1290 	The caller must hold the lock of both \a parent and \a team.
1291 
1292 	\param parent The parent team.
1293 	\param team The team to be removed from \a parent's child list.
1294 */
1295 static void
1296 remove_team_from_parent(Team* parent, Team* team)
1297 {
1298 	Team* child;
1299 	Team* last = NULL;
1300 
1301 	for (child = parent->children; child != NULL;
1302 			child = child->siblings_next) {
1303 		if (child == team) {
1304 			if (last == NULL)
1305 				parent->children = child->siblings_next;
1306 			else
1307 				last->siblings_next = child->siblings_next;
1308 
1309 			team->parent = NULL;
1310 			break;
1311 		}
1312 		last = child;
1313 	}
1314 }
1315 
1316 
1317 /*!	Returns whether the given team is a session leader.
1318 	The caller must hold the team's lock or its process group's lock.
1319 */
1320 static bool
1321 is_session_leader(Team* team)
1322 {
1323 	return team->session_id == team->id;
1324 }
1325 
1326 
1327 /*!	Returns whether the given team is a process group leader.
1328 	The caller must hold the team's lock or its process group's lock.
1329 */
1330 static bool
1331 is_process_group_leader(Team* team)
1332 {
1333 	return team->group_id == team->id;
1334 }
1335 
1336 
1337 /*!	Inserts the given team into the given process group.
1338 	The caller must hold the process group's lock, the team's lock, and the
1339 	team's parent's lock.
1340 */
1341 static void
1342 insert_team_into_group(ProcessGroup* group, Team* team)
1343 {
1344 	team->group = group;
1345 	team->group_id = group->id;
1346 	team->session_id = group->Session()->id;
1347 
1348 	team->group_next = group->teams;
1349 	group->teams = team;
1350 	group->AcquireReference();
1351 }
1352 
1353 
1354 /*!	Removes the given team from its process group.
1355 
1356 	The caller must hold the process group's lock, the team's lock, and the
1357 	team's parent's lock. Interrupts must be enabled.
1358 
1359 	\param team The team that'll be removed from its process group.
1360 */
1361 static void
1362 remove_team_from_group(Team* team)
1363 {
1364 	ProcessGroup* group = team->group;
1365 	Team* current;
1366 	Team* last = NULL;
1367 
1368 	// the team must be in a process group to let this function have any effect
1369 	if  (group == NULL)
1370 		return;
1371 
1372 	for (current = group->teams; current != NULL;
1373 			current = current->group_next) {
1374 		if (current == team) {
1375 			if (last == NULL)
1376 				group->teams = current->group_next;
1377 			else
1378 				last->group_next = current->group_next;
1379 
1380 			team->group = NULL;
1381 			break;
1382 		}
1383 		last = current;
1384 	}
1385 
1386 	team->group = NULL;
1387 	team->group_next = NULL;
1388 
1389 	group->ReleaseReference();
1390 }
1391 
1392 
1393 static status_t
1394 create_team_user_data(Team* team, void* exactAddress = NULL)
1395 {
1396 	void* address;
1397 	uint32 addressSpec;
1398 
1399 	if (exactAddress != NULL) {
1400 		address = exactAddress;
1401 		addressSpec = B_EXACT_ADDRESS;
1402 	} else {
1403 		address = (void*)KERNEL_USER_DATA_BASE;
1404 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1405 	}
1406 
1407 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1408 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1409 
1410 	virtual_address_restrictions virtualRestrictions = {};
1411 	if (result == B_OK || exactAddress != NULL) {
1412 		if (exactAddress != NULL)
1413 			virtualRestrictions.address = exactAddress;
1414 		else
1415 			virtualRestrictions.address = address;
1416 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1417 	} else {
1418 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1419 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1420 	}
1421 
1422 	physical_address_restrictions physicalRestrictions = {};
1423 	team->user_data_area = create_area_etc(team->id, "user area",
1424 		kTeamUserDataInitialSize, B_FULL_LOCK,
1425 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1426 		&virtualRestrictions, &physicalRestrictions, &address);
1427 	if (team->user_data_area < 0)
1428 		return team->user_data_area;
1429 
1430 	team->user_data = (addr_t)address;
1431 	team->used_user_data = 0;
1432 	team->user_data_size = kTeamUserDataInitialSize;
1433 	team->free_user_threads = NULL;
1434 
1435 	return B_OK;
1436 }
1437 
1438 
1439 static void
1440 delete_team_user_data(Team* team)
1441 {
1442 	if (team->user_data_area >= 0) {
1443 		vm_delete_area(team->id, team->user_data_area, true);
1444 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1445 			kTeamUserDataReservedSize);
1446 
1447 		team->user_data = 0;
1448 		team->used_user_data = 0;
1449 		team->user_data_size = 0;
1450 		team->user_data_area = -1;
1451 		while (free_user_thread* entry = team->free_user_threads) {
1452 			team->free_user_threads = entry->next;
1453 			free(entry);
1454 		}
1455 	}
1456 }
1457 
1458 
1459 static status_t
1460 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1461 	int32 argCount, int32 envCount, char**& _flatArgs)
1462 {
1463 	if (argCount < 0 || envCount < 0)
1464 		return B_BAD_VALUE;
1465 
1466 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1467 		return B_TOO_MANY_ARGS;
1468 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1469 		return B_BAD_VALUE;
1470 
1471 	if (!IS_USER_ADDRESS(userFlatArgs))
1472 		return B_BAD_ADDRESS;
1473 
1474 	// allocate kernel memory
1475 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1476 	if (flatArgs == NULL)
1477 		return B_NO_MEMORY;
1478 
1479 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1480 		free(flatArgs);
1481 		return B_BAD_ADDRESS;
1482 	}
1483 
1484 	// check and relocate the array
1485 	status_t error = B_OK;
1486 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1487 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1488 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1489 		if (i == argCount || i == argCount + envCount + 1) {
1490 			// check array null termination
1491 			if (flatArgs[i] != NULL) {
1492 				error = B_BAD_VALUE;
1493 				break;
1494 			}
1495 		} else {
1496 			// check string
1497 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1498 			size_t maxLen = stringEnd - arg;
1499 			if (arg < stringBase || arg >= stringEnd
1500 					|| strnlen(arg, maxLen) == maxLen) {
1501 				error = B_BAD_VALUE;
1502 				break;
1503 			}
1504 
1505 			flatArgs[i] = arg;
1506 		}
1507 	}
1508 
1509 	if (error == B_OK)
1510 		_flatArgs = flatArgs;
1511 	else
1512 		free(flatArgs);
1513 
1514 	return error;
1515 }
1516 
1517 
1518 static void
1519 free_team_arg(struct team_arg* teamArg)
1520 {
1521 	if (teamArg != NULL) {
1522 		free(teamArg->flat_args);
1523 		free(teamArg->path);
1524 		free(teamArg);
1525 	}
1526 }
1527 
1528 
1529 static status_t
1530 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1531 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1532 	port_id port, uint32 token)
1533 {
1534 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1535 	if (teamArg == NULL)
1536 		return B_NO_MEMORY;
1537 
1538 	teamArg->path = strdup(path);
1539 	if (teamArg->path == NULL) {
1540 		free(teamArg);
1541 		return B_NO_MEMORY;
1542 	}
1543 
1544 	// copy the args over
1545 	teamArg->flat_args = flatArgs;
1546 	teamArg->flat_args_size = flatArgsSize;
1547 	teamArg->arg_count = argCount;
1548 	teamArg->env_count = envCount;
1549 	teamArg->flags = 0;
1550 	teamArg->umask = umask;
1551 	teamArg->error_port = port;
1552 	teamArg->error_token = token;
1553 
1554 	// determine the flags from the environment
1555 	const char* const* env = flatArgs + argCount + 1;
1556 	for (int32 i = 0; i < envCount; i++) {
1557 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1558 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1559 			break;
1560 		}
1561 	}
1562 
1563 	*_teamArg = teamArg;
1564 	return B_OK;
1565 }
1566 
1567 
1568 static status_t
1569 team_create_thread_start_internal(void* args)
1570 {
1571 	status_t err;
1572 	Thread* thread;
1573 	Team* team;
1574 	struct team_arg* teamArgs = (struct team_arg*)args;
1575 	const char* path;
1576 	addr_t entry;
1577 	char** userArgs;
1578 	char** userEnv;
1579 	struct user_space_program_args* programArgs;
1580 	uint32 argCount, envCount;
1581 
1582 	thread = thread_get_current_thread();
1583 	team = thread->team;
1584 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1585 
1586 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1587 		thread->id));
1588 
1589 	// Main stack area layout is currently as follows (starting from 0):
1590 	//
1591 	// size								| usage
1592 	// ---------------------------------+--------------------------------
1593 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1594 	// TLS_SIZE							| TLS data
1595 	// sizeof(user_space_program_args)	| argument structure for the runtime
1596 	//									| loader
1597 	// flat arguments size				| flat process arguments and environment
1598 
1599 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1600 	// the heap
1601 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1602 
1603 	argCount = teamArgs->arg_count;
1604 	envCount = teamArgs->env_count;
1605 
1606 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1607 		+ thread->user_stack_size + TLS_SIZE);
1608 
1609 	userArgs = (char**)(programArgs + 1);
1610 	userEnv = userArgs + argCount + 1;
1611 	path = teamArgs->path;
1612 
1613 	if (user_strlcpy(programArgs->program_path, path,
1614 				sizeof(programArgs->program_path)) < B_OK
1615 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1616 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1617 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1618 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1619 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1620 				sizeof(port_id)) < B_OK
1621 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1622 				sizeof(uint32)) < B_OK
1623 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1624 		|| user_memcpy(&programArgs->disable_user_addons,
1625 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1626 		|| user_memcpy(userArgs, teamArgs->flat_args,
1627 				teamArgs->flat_args_size) < B_OK) {
1628 		// the team deletion process will clean this mess
1629 		free_team_arg(teamArgs);
1630 		return B_BAD_ADDRESS;
1631 	}
1632 
1633 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1634 
1635 	// set team args and update state
1636 	team->Lock();
1637 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1638 	team->state = TEAM_STATE_NORMAL;
1639 	team->Unlock();
1640 
1641 	free_team_arg(teamArgs);
1642 		// the arguments are already on the user stack, we no longer need
1643 		// them in this form
1644 
1645 	// Clone commpage area
1646 	area_id commPageArea = clone_commpage_area(team->id,
1647 		&team->commpage_address);
1648 	if (commPageArea  < B_OK) {
1649 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1650 			strerror(commPageArea)));
1651 		return commPageArea;
1652 	}
1653 
1654 	// Register commpage image
1655 	image_id commPageImage = get_commpage_image();
1656 	extended_image_info imageInfo;
1657 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1658 	if (err != B_OK) {
1659 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1660 			strerror(err)));
1661 		return err;
1662 	}
1663 	imageInfo.basic_info.text = team->commpage_address;
1664 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1665 	imageInfo.symbol_table = NULL;
1666 	imageInfo.symbol_hash = NULL;
1667 	imageInfo.string_table = NULL;
1668 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1669 	if (image < 0) {
1670 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1671 			strerror(image)));
1672 		return image;
1673 	}
1674 
1675 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1676 	// automatic variables with function scope will never be destroyed.
1677 	{
1678 		// find runtime_loader path
1679 		KPath runtimeLoaderPath;
1680 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1681 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1682 		if (err < B_OK) {
1683 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1684 				strerror(err)));
1685 			return err;
1686 		}
1687 		runtimeLoaderPath.UnlockBuffer();
1688 		err = runtimeLoaderPath.Append("runtime_loader");
1689 
1690 		if (err == B_OK) {
1691 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1692 				&entry);
1693 		}
1694 	}
1695 
1696 	if (err < B_OK) {
1697 		// Luckily, we don't have to clean up the mess we created - that's
1698 		// done for us by the normal team deletion process
1699 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1700 			"%s\n", strerror(err)));
1701 		return err;
1702 	}
1703 
1704 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1705 
1706 	// enter userspace -- returns only in case of error
1707 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1708 		programArgs, team->commpage_address);
1709 }
1710 
1711 
1712 static status_t
1713 team_create_thread_start(void* args)
1714 {
1715 	team_create_thread_start_internal(args);
1716 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1717 	thread_exit();
1718 		// does not return
1719 	return B_OK;
1720 }
1721 
1722 
1723 static thread_id
1724 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1725 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1726 	port_id errorPort, uint32 errorToken)
1727 {
1728 	char** flatArgs = _flatArgs;
1729 	thread_id thread;
1730 	status_t status;
1731 	struct team_arg* teamArgs;
1732 	struct team_loading_info loadingInfo;
1733 	ConditionVariableEntry loadingWaitEntry;
1734 	io_context* parentIOContext = NULL;
1735 	team_id teamID;
1736 	bool teamLimitReached = false;
1737 
1738 	if (flatArgs == NULL || argCount == 0)
1739 		return B_BAD_VALUE;
1740 
1741 	const char* path = flatArgs[0];
1742 
1743 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1744 		"\n", path, flatArgs, argCount));
1745 
1746 	// cut the path from the main thread name
1747 	const char* threadName = strrchr(path, '/');
1748 	if (threadName != NULL)
1749 		threadName++;
1750 	else
1751 		threadName = path;
1752 
1753 	// create the main thread object
1754 	Thread* mainThread;
1755 	status = Thread::Create(threadName, mainThread);
1756 	if (status != B_OK)
1757 		return status;
1758 	BReference<Thread> mainThreadReference(mainThread, true);
1759 
1760 	// create team object
1761 	Team* team = Team::Create(mainThread->id, path, false);
1762 	if (team == NULL)
1763 		return B_NO_MEMORY;
1764 	BReference<Team> teamReference(team, true);
1765 
1766 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1767 		loadingInfo.condition.Init(team, "image load");
1768 		loadingInfo.condition.Add(&loadingWaitEntry);
1769 		loadingInfo.result = B_ERROR;
1770 		team->loading_info = &loadingInfo;
1771 	}
1772 
1773 	// get the parent team
1774 	Team* parent = Team::Get(parentID);
1775 	if (parent == NULL)
1776 		return B_BAD_TEAM_ID;
1777 	BReference<Team> parentReference(parent, true);
1778 
1779 	parent->LockTeamAndProcessGroup();
1780 	team->Lock();
1781 
1782 	// inherit the parent's user/group
1783 	inherit_parent_user_and_group(team, parent);
1784 
1785 	// get a reference to the parent's I/O context -- we need it to create ours
1786 	parentIOContext = parent->io_context;
1787 	vfs_get_io_context(parentIOContext);
1788 
1789 	team->Unlock();
1790 	parent->UnlockTeamAndProcessGroup();
1791 
1792 	// check the executable's set-user/group-id permission
1793 	update_set_id_user_and_group(team, path);
1794 
1795 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1796 		envCount, (mode_t)-1, errorPort, errorToken);
1797 	if (status != B_OK)
1798 		goto err1;
1799 
1800 	_flatArgs = NULL;
1801 		// args are owned by the team_arg structure now
1802 
1803 	// create a new io_context for this team
1804 	team->io_context = vfs_new_io_context(parentIOContext, true);
1805 	if (!team->io_context) {
1806 		status = B_NO_MEMORY;
1807 		goto err2;
1808 	}
1809 
1810 	// We don't need the parent's I/O context any longer.
1811 	vfs_put_io_context(parentIOContext);
1812 	parentIOContext = NULL;
1813 
1814 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1815 	vfs_exec_io_context(team->io_context);
1816 
1817 	// create an address space for this team
1818 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1819 		&team->address_space);
1820 	if (status != B_OK)
1821 		goto err2;
1822 
1823 	team->address_space->SetRandomizingEnabled(
1824 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1825 
1826 	// create the user data area
1827 	status = create_team_user_data(team);
1828 	if (status != B_OK)
1829 		goto err4;
1830 
1831 	// insert the team into its parent and the teams hash
1832 	parent->LockTeamAndProcessGroup();
1833 	team->Lock();
1834 
1835 	{
1836 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1837 
1838 		sTeamHash.Insert(team);
1839 		teamLimitReached = sUsedTeams >= sMaxTeams;
1840 		if (!teamLimitReached)
1841 			sUsedTeams++;
1842 	}
1843 
1844 	insert_team_into_parent(parent, team);
1845 	insert_team_into_group(parent->group, team);
1846 
1847 	team->Unlock();
1848 	parent->UnlockTeamAndProcessGroup();
1849 
1850 	// notify team listeners
1851 	sNotificationService.Notify(TEAM_ADDED, team);
1852 
1853 	if (teamLimitReached) {
1854 		status = B_NO_MORE_TEAMS;
1855 		goto err6;
1856 	}
1857 
1858 	// In case we start the main thread, we shouldn't access the team object
1859 	// afterwards, so cache the team's ID.
1860 	teamID = team->id;
1861 
1862 	// Create a kernel thread, but under the context of the new team
1863 	// The new thread will take over ownership of teamArgs.
1864 	{
1865 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1866 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1867 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1868 			+ teamArgs->flat_args_size;
1869 		thread = thread_create_thread(threadAttributes, false);
1870 		if (thread < 0) {
1871 			status = thread;
1872 			goto err6;
1873 		}
1874 	}
1875 
1876 	// The team has been created successfully, so we keep the reference. Or
1877 	// more precisely: It's owned by the team's main thread, now.
1878 	teamReference.Detach();
1879 
1880 	// wait for the loader of the new team to finish its work
1881 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1882 		if (mainThread != NULL) {
1883 			// resume the team's main thread
1884 			thread_continue(mainThread);
1885 		}
1886 
1887 		// Now wait until loading is finished. We will be woken either by the
1888 		// thread, when it finished or aborted loading, or when the team is
1889 		// going to die (e.g. is killed). In either case the one notifying is
1890 		// responsible for unsetting `loading_info` in the team structure.
1891 		loadingWaitEntry.Wait();
1892 
1893 		if (loadingInfo.result < B_OK)
1894 			return loadingInfo.result;
1895 	}
1896 
1897 	// notify the debugger
1898 	user_debug_team_created(teamID);
1899 
1900 	return thread;
1901 
1902 err6:
1903 	// Remove the team structure from the process group, the parent team, and
1904 	// the team hash table and delete the team structure.
1905 	parent->LockTeamAndProcessGroup();
1906 	team->Lock();
1907 
1908 	remove_team_from_group(team);
1909 	remove_team_from_parent(team->parent, team);
1910 
1911 	team->Unlock();
1912 	parent->UnlockTeamAndProcessGroup();
1913 
1914 	{
1915 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1916 		sTeamHash.Remove(team);
1917 		if (!teamLimitReached)
1918 			sUsedTeams--;
1919 	}
1920 
1921 	sNotificationService.Notify(TEAM_REMOVED, team);
1922 
1923 	delete_team_user_data(team);
1924 err4:
1925 	team->address_space->Put();
1926 err2:
1927 	free_team_arg(teamArgs);
1928 err1:
1929 	if (parentIOContext != NULL)
1930 		vfs_put_io_context(parentIOContext);
1931 
1932 	return status;
1933 }
1934 
1935 
1936 /*!	Almost shuts down the current team and loads a new image into it.
1937 	If successful, this function does not return and will takeover ownership of
1938 	the arguments provided.
1939 	This function may only be called in a userland team (caused by one of the
1940 	exec*() syscalls).
1941 */
1942 static status_t
1943 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1944 	int32 argCount, int32 envCount, mode_t umask)
1945 {
1946 	// NOTE: Since this function normally doesn't return, don't use automatic
1947 	// variables that need destruction in the function scope.
1948 	char** flatArgs = _flatArgs;
1949 	Team* team = thread_get_current_thread()->team;
1950 	struct team_arg* teamArgs;
1951 	const char* threadName;
1952 	thread_id nubThreadID = -1;
1953 
1954 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1955 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1956 		team->id));
1957 
1958 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1959 
1960 	// switching the kernel at run time is probably not a good idea :)
1961 	if (team == team_get_kernel_team())
1962 		return B_NOT_ALLOWED;
1963 
1964 	// we currently need to be single threaded here
1965 	// TODO: maybe we should just kill all other threads and
1966 	//	make the current thread the team's main thread?
1967 	Thread* currentThread = thread_get_current_thread();
1968 	if (currentThread != team->main_thread)
1969 		return B_NOT_ALLOWED;
1970 
1971 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1972 	// We iterate through the thread list to make sure that there's no other
1973 	// thread.
1974 	TeamLocker teamLocker(team);
1975 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1976 
1977 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1978 		nubThreadID = team->debug_info.nub_thread;
1979 
1980 	debugInfoLocker.Unlock();
1981 
1982 	for (Thread* thread = team->thread_list; thread != NULL;
1983 			thread = thread->team_next) {
1984 		if (thread != team->main_thread && thread->id != nubThreadID)
1985 			return B_NOT_ALLOWED;
1986 	}
1987 
1988 	team->DeleteUserTimers(true);
1989 	team->ResetSignalsOnExec();
1990 
1991 	teamLocker.Unlock();
1992 
1993 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1994 		argCount, envCount, umask, -1, 0);
1995 	if (status != B_OK)
1996 		return status;
1997 
1998 	_flatArgs = NULL;
1999 		// args are owned by the team_arg structure now
2000 
2001 	// TODO: remove team resources if there are any left
2002 	// thread_atkernel_exit() might not be called at all
2003 
2004 	thread_reset_for_exec();
2005 
2006 	user_debug_prepare_for_exec();
2007 
2008 	delete_team_user_data(team);
2009 	vm_delete_areas(team->address_space, false);
2010 	xsi_sem_undo(team);
2011 	delete_owned_ports(team);
2012 	sem_delete_owned_sems(team);
2013 	remove_images(team);
2014 	vfs_exec_io_context(team->io_context);
2015 	delete_realtime_sem_context(team->realtime_sem_context);
2016 	team->realtime_sem_context = NULL;
2017 
2018 	// update ASLR
2019 	team->address_space->SetRandomizingEnabled(
2020 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2021 
2022 	status = create_team_user_data(team);
2023 	if (status != B_OK) {
2024 		// creating the user data failed -- we're toast
2025 		free_team_arg(teamArgs);
2026 		exit_thread(status);
2027 		return status;
2028 	}
2029 
2030 	user_debug_finish_after_exec();
2031 
2032 	// rename the team
2033 
2034 	team->Lock();
2035 	team->SetName(path);
2036 	team->Unlock();
2037 
2038 	// cut the path from the team name and rename the main thread, too
2039 	threadName = strrchr(path, '/');
2040 	if (threadName != NULL)
2041 		threadName++;
2042 	else
2043 		threadName = path;
2044 	rename_thread(thread_get_current_thread_id(), threadName);
2045 
2046 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2047 
2048 	// Update user/group according to the executable's set-user/group-id
2049 	// permission.
2050 	update_set_id_user_and_group(team, path);
2051 
2052 	user_debug_team_exec();
2053 
2054 	// notify team listeners
2055 	sNotificationService.Notify(TEAM_EXEC, team);
2056 
2057 	// get a user thread for the thread
2058 	user_thread* userThread = team_allocate_user_thread(team);
2059 		// cannot fail (the allocation for the team would have failed already)
2060 	ThreadLocker currentThreadLocker(currentThread);
2061 	currentThread->user_thread = userThread;
2062 	currentThreadLocker.Unlock();
2063 
2064 	// create the user stack for the thread
2065 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2066 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2067 	if (status == B_OK) {
2068 		// prepare the stack, load the runtime loader, and enter userspace
2069 		team_create_thread_start(teamArgs);
2070 			// does never return
2071 	} else
2072 		free_team_arg(teamArgs);
2073 
2074 	// Sorry, we have to kill ourselves, there is no way out anymore
2075 	// (without any areas left and all that).
2076 	exit_thread(status);
2077 
2078 	// We return a status here since the signal that is sent by the
2079 	// call above is not immediately handled.
2080 	return B_ERROR;
2081 }
2082 
2083 
2084 static thread_id
2085 fork_team(void)
2086 {
2087 	Thread* parentThread = thread_get_current_thread();
2088 	Team* parentTeam = parentThread->team;
2089 	Team* team;
2090 	arch_fork_arg* forkArgs;
2091 	struct area_info info;
2092 	thread_id threadID;
2093 	status_t status;
2094 	ssize_t areaCookie;
2095 	bool teamLimitReached = false;
2096 
2097 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2098 
2099 	if (parentTeam == team_get_kernel_team())
2100 		return B_NOT_ALLOWED;
2101 
2102 	// create a new team
2103 	// TODO: this is very similar to load_image_internal() - maybe we can do
2104 	// something about it :)
2105 
2106 	// create the main thread object
2107 	Thread* thread;
2108 	status = Thread::Create(parentThread->name, thread);
2109 	if (status != B_OK)
2110 		return status;
2111 	BReference<Thread> threadReference(thread, true);
2112 
2113 	// create the team object
2114 	team = Team::Create(thread->id, NULL, false);
2115 	if (team == NULL)
2116 		return B_NO_MEMORY;
2117 
2118 	parentTeam->LockTeamAndProcessGroup();
2119 	team->Lock();
2120 
2121 	team->SetName(parentTeam->Name());
2122 	team->SetArgs(parentTeam->Args());
2123 
2124 	team->commpage_address = parentTeam->commpage_address;
2125 
2126 	// Inherit the parent's user/group.
2127 	inherit_parent_user_and_group(team, parentTeam);
2128 
2129 	// inherit signal handlers
2130 	team->InheritSignalActions(parentTeam);
2131 
2132 	team->Unlock();
2133 	parentTeam->UnlockTeamAndProcessGroup();
2134 
2135 	// inherit some team debug flags
2136 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2137 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2138 
2139 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2140 	if (forkArgs == NULL) {
2141 		status = B_NO_MEMORY;
2142 		goto err1;
2143 	}
2144 
2145 	// create a new io_context for this team
2146 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2147 	if (!team->io_context) {
2148 		status = B_NO_MEMORY;
2149 		goto err2;
2150 	}
2151 
2152 	// duplicate the realtime sem context
2153 	if (parentTeam->realtime_sem_context) {
2154 		team->realtime_sem_context = clone_realtime_sem_context(
2155 			parentTeam->realtime_sem_context);
2156 		if (team->realtime_sem_context == NULL) {
2157 			status = B_NO_MEMORY;
2158 			goto err2;
2159 		}
2160 	}
2161 
2162 	// create an address space for this team
2163 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2164 		&team->address_space);
2165 	if (status < B_OK)
2166 		goto err3;
2167 
2168 	// copy all areas of the team
2169 	// TODO: should be able to handle stack areas differently (ie. don't have
2170 	// them copy-on-write)
2171 
2172 	areaCookie = 0;
2173 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2174 		if (info.area == parentTeam->user_data_area) {
2175 			// don't clone the user area; just create a new one
2176 			status = create_team_user_data(team, info.address);
2177 			if (status != B_OK)
2178 				break;
2179 
2180 			thread->user_thread = team_allocate_user_thread(team);
2181 		} else {
2182 			void* address;
2183 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2184 				&address, B_CLONE_ADDRESS, info.area);
2185 			if (area < B_OK) {
2186 				status = area;
2187 				break;
2188 			}
2189 
2190 			if (info.area == parentThread->user_stack_area)
2191 				thread->user_stack_area = area;
2192 		}
2193 	}
2194 
2195 	if (status < B_OK)
2196 		goto err4;
2197 
2198 	if (thread->user_thread == NULL) {
2199 #if KDEBUG
2200 		panic("user data area not found, parent area is %" B_PRId32,
2201 			parentTeam->user_data_area);
2202 #endif
2203 		status = B_ERROR;
2204 		goto err4;
2205 	}
2206 
2207 	thread->user_stack_base = parentThread->user_stack_base;
2208 	thread->user_stack_size = parentThread->user_stack_size;
2209 	thread->user_local_storage = parentThread->user_local_storage;
2210 	thread->sig_block_mask = parentThread->sig_block_mask;
2211 	thread->signal_stack_base = parentThread->signal_stack_base;
2212 	thread->signal_stack_size = parentThread->signal_stack_size;
2213 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2214 
2215 	arch_store_fork_frame(forkArgs);
2216 
2217 	// copy image list
2218 	if (copy_images(parentTeam->id, team) != B_OK)
2219 		goto err5;
2220 
2221 	// insert the team into its parent and the teams hash
2222 	parentTeam->LockTeamAndProcessGroup();
2223 	team->Lock();
2224 
2225 	{
2226 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2227 
2228 		sTeamHash.Insert(team);
2229 		teamLimitReached = sUsedTeams >= sMaxTeams;
2230 		if (!teamLimitReached)
2231 			sUsedTeams++;
2232 	}
2233 
2234 	insert_team_into_parent(parentTeam, team);
2235 	insert_team_into_group(parentTeam->group, team);
2236 
2237 	team->Unlock();
2238 	parentTeam->UnlockTeamAndProcessGroup();
2239 
2240 	// notify team listeners
2241 	sNotificationService.Notify(TEAM_ADDED, team);
2242 
2243 	if (teamLimitReached) {
2244 		status = B_NO_MORE_TEAMS;
2245 		goto err6;
2246 	}
2247 
2248 	// create the main thread
2249 	{
2250 		ThreadCreationAttributes threadCreationAttributes(NULL,
2251 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2252 		threadCreationAttributes.forkArgs = forkArgs;
2253 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2254 		threadID = thread_create_thread(threadCreationAttributes, false);
2255 		if (threadID < 0) {
2256 			status = threadID;
2257 			goto err6;
2258 		}
2259 	}
2260 
2261 	// notify the debugger
2262 	user_debug_team_created(team->id);
2263 
2264 	T(TeamForked(threadID));
2265 
2266 	resume_thread(threadID);
2267 	return threadID;
2268 
2269 err6:
2270 	// Remove the team structure from the process group, the parent team, and
2271 	// the team hash table and delete the team structure.
2272 	parentTeam->LockTeamAndProcessGroup();
2273 	team->Lock();
2274 
2275 	remove_team_from_group(team);
2276 	remove_team_from_parent(team->parent, team);
2277 
2278 	team->Unlock();
2279 	parentTeam->UnlockTeamAndProcessGroup();
2280 
2281 	{
2282 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2283 		sTeamHash.Remove(team);
2284 		if (!teamLimitReached)
2285 			sUsedTeams--;
2286 	}
2287 
2288 	sNotificationService.Notify(TEAM_REMOVED, team);
2289 err5:
2290 	remove_images(team);
2291 err4:
2292 	team->address_space->RemoveAndPut();
2293 err3:
2294 	delete_realtime_sem_context(team->realtime_sem_context);
2295 err2:
2296 	free(forkArgs);
2297 err1:
2298 	team->ReleaseReference();
2299 
2300 	return status;
2301 }
2302 
2303 
2304 /*!	Returns if the specified team \a parent has any children belonging to the
2305 	process group with the specified ID \a groupID.
2306 	The caller must hold \a parent's lock.
2307 */
2308 static bool
2309 has_children_in_group(Team* parent, pid_t groupID)
2310 {
2311 	for (Team* child = parent->children; child != NULL;
2312 			child = child->siblings_next) {
2313 		TeamLocker childLocker(child);
2314 		if (child->group_id == groupID)
2315 			return true;
2316 	}
2317 
2318 	return false;
2319 }
2320 
2321 
2322 /*!	Returns the first job control entry from \a children, which matches \a id.
2323 	\a id can be:
2324 	- \code > 0 \endcode: Matching an entry with that team ID.
2325 	- \code == -1 \endcode: Matching any entry.
2326 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2327 	\c 0 is an invalid value for \a id.
2328 
2329 	The caller must hold the lock of the team that \a children belongs to.
2330 
2331 	\param children The job control entry list to check.
2332 	\param id The match criterion.
2333 	\return The first matching entry or \c NULL, if none matches.
2334 */
2335 static job_control_entry*
2336 get_job_control_entry(team_job_control_children& children, pid_t id)
2337 {
2338 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2339 		 job_control_entry* entry = it.Next();) {
2340 
2341 		if (id > 0) {
2342 			if (entry->thread == id)
2343 				return entry;
2344 		} else if (id == -1) {
2345 			return entry;
2346 		} else {
2347 			pid_t processGroup
2348 				= (entry->team ? entry->team->group_id : entry->group_id);
2349 			if (processGroup == -id)
2350 				return entry;
2351 		}
2352 	}
2353 
2354 	return NULL;
2355 }
2356 
2357 
2358 /*!	Returns the first job control entry from one of team's dead, continued, or
2359 	stopped children which matches \a id.
2360 	\a id can be:
2361 	- \code > 0 \endcode: Matching an entry with that team ID.
2362 	- \code == -1 \endcode: Matching any entry.
2363 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2364 	\c 0 is an invalid value for \a id.
2365 
2366 	The caller must hold \a team's lock.
2367 
2368 	\param team The team whose dead, stopped, and continued child lists shall be
2369 		checked.
2370 	\param id The match criterion.
2371 	\param flags Specifies which children shall be considered. Dead children
2372 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2373 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2374 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2375 		\c WCONTINUED.
2376 	\return The first matching entry or \c NULL, if none matches.
2377 */
2378 static job_control_entry*
2379 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2380 {
2381 	job_control_entry* entry = NULL;
2382 
2383 	if ((flags & WEXITED) != 0)
2384 		entry = get_job_control_entry(team->dead_children, id);
2385 
2386 	if (entry == NULL && (flags & WCONTINUED) != 0)
2387 		entry = get_job_control_entry(team->continued_children, id);
2388 
2389 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2390 		entry = get_job_control_entry(team->stopped_children, id);
2391 
2392 	return entry;
2393 }
2394 
2395 
2396 job_control_entry::job_control_entry()
2397 	:
2398 	has_group_ref(false)
2399 {
2400 }
2401 
2402 
2403 job_control_entry::~job_control_entry()
2404 {
2405 	if (has_group_ref) {
2406 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2407 
2408 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2409 		if (group == NULL) {
2410 			panic("job_control_entry::~job_control_entry(): unknown group "
2411 				"ID: %" B_PRId32, group_id);
2412 			return;
2413 		}
2414 
2415 		groupHashLocker.Unlock();
2416 
2417 		group->ReleaseReference();
2418 	}
2419 }
2420 
2421 
2422 /*!	Invoked when the owning team is dying, initializing the entry according to
2423 	the dead state.
2424 
2425 	The caller must hold the owning team's lock and the scheduler lock.
2426 */
2427 void
2428 job_control_entry::InitDeadState()
2429 {
2430 	if (team != NULL) {
2431 		ASSERT(team->exit.initialized);
2432 
2433 		group_id = team->group_id;
2434 		team->group->AcquireReference();
2435 		has_group_ref = true;
2436 
2437 		thread = team->id;
2438 		status = team->exit.status;
2439 		reason = team->exit.reason;
2440 		signal = team->exit.signal;
2441 		signaling_user = team->exit.signaling_user;
2442 		user_time = team->dead_threads_user_time
2443 			+ team->dead_children.user_time;
2444 		kernel_time = team->dead_threads_kernel_time
2445 			+ team->dead_children.kernel_time;
2446 
2447 		team = NULL;
2448 	}
2449 }
2450 
2451 
2452 job_control_entry&
2453 job_control_entry::operator=(const job_control_entry& other)
2454 {
2455 	state = other.state;
2456 	thread = other.thread;
2457 	signal = other.signal;
2458 	has_group_ref = false;
2459 	signaling_user = other.signaling_user;
2460 	team = other.team;
2461 	group_id = other.group_id;
2462 	status = other.status;
2463 	reason = other.reason;
2464 	user_time = other.user_time;
2465 	kernel_time = other.kernel_time;
2466 
2467 	return *this;
2468 }
2469 
2470 
2471 /*! This is the kernel backend for waitid().
2472 */
2473 static thread_id
2474 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2475 	team_usage_info& _usage_info)
2476 {
2477 	Thread* thread = thread_get_current_thread();
2478 	Team* team = thread->team;
2479 	struct job_control_entry foundEntry;
2480 	struct job_control_entry* freeDeathEntry = NULL;
2481 	status_t status = B_OK;
2482 
2483 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2484 		child, flags));
2485 
2486 	T(WaitForChild(child, flags));
2487 
2488 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2489 		T(WaitForChildDone(B_BAD_VALUE));
2490 		return B_BAD_VALUE;
2491 	}
2492 
2493 	pid_t originalChild = child;
2494 
2495 	bool ignoreFoundEntries = false;
2496 	bool ignoreFoundEntriesChecked = false;
2497 
2498 	while (true) {
2499 		// lock the team
2500 		TeamLocker teamLocker(team);
2501 
2502 		// A 0 child argument means to wait for all children in the process
2503 		// group of the calling team.
2504 		child = originalChild == 0 ? -team->group_id : originalChild;
2505 
2506 		// check whether any condition holds
2507 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2508 
2509 		// If we don't have an entry yet, check whether there are any children
2510 		// complying to the process group specification at all.
2511 		if (entry == NULL) {
2512 			// No success yet -- check whether there are any children complying
2513 			// to the process group specification at all.
2514 			bool childrenExist = false;
2515 			if (child == -1) {
2516 				childrenExist = team->children != NULL;
2517 			} else if (child < -1) {
2518 				childrenExist = has_children_in_group(team, -child);
2519 			} else if (child != team->id) {
2520 				if (Team* childTeam = Team::Get(child)) {
2521 					BReference<Team> childTeamReference(childTeam, true);
2522 					TeamLocker childTeamLocker(childTeam);
2523 					childrenExist = childTeam->parent == team;
2524 				}
2525 			}
2526 
2527 			if (!childrenExist) {
2528 				// there is no child we could wait for
2529 				status = ECHILD;
2530 			} else {
2531 				// the children we're waiting for are still running
2532 				status = B_WOULD_BLOCK;
2533 			}
2534 		} else {
2535 			// got something
2536 			foundEntry = *entry;
2537 
2538 			// unless WNOWAIT has been specified, "consume" the wait state
2539 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2540 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2541 					// The child is dead. Reap its death entry.
2542 					freeDeathEntry = entry;
2543 					team->dead_children.entries.Remove(entry);
2544 					team->dead_children.count--;
2545 				} else {
2546 					// The child is well. Reset its job control state.
2547 					team_set_job_control_state(entry->team,
2548 						JOB_CONTROL_STATE_NONE, NULL);
2549 				}
2550 			}
2551 		}
2552 
2553 		// If we haven't got anything yet, prepare for waiting for the
2554 		// condition variable.
2555 		ConditionVariableEntry deadWaitEntry;
2556 
2557 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2558 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2559 
2560 		teamLocker.Unlock();
2561 
2562 		// we got our entry and can return to our caller
2563 		if (status == B_OK) {
2564 			if (ignoreFoundEntries) {
2565 				// ... unless we shall ignore found entries
2566 				delete freeDeathEntry;
2567 				freeDeathEntry = NULL;
2568 				continue;
2569 			}
2570 
2571 			break;
2572 		}
2573 
2574 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2575 			T(WaitForChildDone(status));
2576 			return status;
2577 		}
2578 
2579 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2580 		if (status == B_INTERRUPTED) {
2581 			T(WaitForChildDone(status));
2582 			return status;
2583 		}
2584 
2585 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2586 		// all our children are dead and fail with ECHILD. We check the
2587 		// condition at this point.
2588 		if (!ignoreFoundEntriesChecked) {
2589 			teamLocker.Lock();
2590 
2591 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2592 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2593 				|| handler.sa_handler == SIG_IGN) {
2594 				ignoreFoundEntries = true;
2595 			}
2596 
2597 			teamLocker.Unlock();
2598 
2599 			ignoreFoundEntriesChecked = true;
2600 		}
2601 	}
2602 
2603 	delete freeDeathEntry;
2604 
2605 	// When we got here, we have a valid death entry, and already got
2606 	// unregistered from the team or group. Fill in the returned info.
2607 	memset(&_info, 0, sizeof(_info));
2608 	_info.si_signo = SIGCHLD;
2609 	_info.si_pid = foundEntry.thread;
2610 	_info.si_uid = foundEntry.signaling_user;
2611 	// TODO: Fill in si_errno?
2612 
2613 	switch (foundEntry.state) {
2614 		case JOB_CONTROL_STATE_DEAD:
2615 			_info.si_code = foundEntry.reason;
2616 			_info.si_status = foundEntry.reason == CLD_EXITED
2617 				? foundEntry.status : foundEntry.signal;
2618 			_usage_info.user_time = foundEntry.user_time;
2619 			_usage_info.kernel_time = foundEntry.kernel_time;
2620 			break;
2621 		case JOB_CONTROL_STATE_STOPPED:
2622 			_info.si_code = CLD_STOPPED;
2623 			_info.si_status = foundEntry.signal;
2624 			break;
2625 		case JOB_CONTROL_STATE_CONTINUED:
2626 			_info.si_code = CLD_CONTINUED;
2627 			_info.si_status = 0;
2628 			break;
2629 		case JOB_CONTROL_STATE_NONE:
2630 			// can't happen
2631 			break;
2632 	}
2633 
2634 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2635 	// status is available.
2636 	TeamLocker teamLocker(team);
2637 	InterruptsSpinLocker signalLocker(team->signal_lock);
2638 	SpinLocker threadCreationLocker(gThreadCreationLock);
2639 
2640 	if (is_team_signal_blocked(team, SIGCHLD)) {
2641 		if (get_job_control_entry(team, child, flags) == NULL)
2642 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2643 	}
2644 
2645 	threadCreationLocker.Unlock();
2646 	signalLocker.Unlock();
2647 	teamLocker.Unlock();
2648 
2649 	// When the team is dead, the main thread continues to live in the kernel
2650 	// team for a very short time. To avoid surprises for the caller we rather
2651 	// wait until the thread is really gone.
2652 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2653 		wait_for_thread(foundEntry.thread, NULL);
2654 
2655 	T(WaitForChildDone(foundEntry));
2656 
2657 	return foundEntry.thread;
2658 }
2659 
2660 
2661 /*! Fills the team_info structure with information from the specified team.
2662 	Interrupts must be enabled. The team must not be locked.
2663 */
2664 static status_t
2665 fill_team_info(Team* team, team_info* info, size_t size)
2666 {
2667 	if (size != sizeof(team_info))
2668 		return B_BAD_VALUE;
2669 
2670 	// TODO: Set more informations for team_info
2671 	memset(info, 0, size);
2672 
2673 	info->team = team->id;
2674 		// immutable
2675 	info->image_count = count_images(team);
2676 		// protected by sImageMutex
2677 
2678 	TeamLocker teamLocker(team);
2679 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2680 
2681 	info->thread_count = team->num_threads;
2682 	//info->area_count =
2683 	info->debugger_nub_thread = team->debug_info.nub_thread;
2684 	info->debugger_nub_port = team->debug_info.nub_port;
2685 	info->uid = team->effective_uid;
2686 	info->gid = team->effective_gid;
2687 
2688 	strlcpy(info->args, team->Args(), sizeof(info->args));
2689 	info->argc = 1;
2690 
2691 	return B_OK;
2692 }
2693 
2694 
2695 /*!	Returns whether the process group contains stopped processes.
2696 	The caller must hold the process group's lock.
2697 */
2698 static bool
2699 process_group_has_stopped_processes(ProcessGroup* group)
2700 {
2701 	Team* team = group->teams;
2702 	while (team != NULL) {
2703 		// the parent team's lock guards the job control entry -- acquire it
2704 		team->LockTeamAndParent(false);
2705 
2706 		if (team->job_control_entry != NULL
2707 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2708 			team->UnlockTeamAndParent();
2709 			return true;
2710 		}
2711 
2712 		team->UnlockTeamAndParent();
2713 
2714 		team = team->group_next;
2715 	}
2716 
2717 	return false;
2718 }
2719 
2720 
2721 /*!	Iterates through all process groups queued in team_remove_team() and signals
2722 	those that are orphaned and have stopped processes.
2723 	The caller must not hold any team or process group locks.
2724 */
2725 static void
2726 orphaned_process_group_check()
2727 {
2728 	// process as long as there are groups in the list
2729 	while (true) {
2730 		// remove the head from the list
2731 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2732 
2733 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2734 		if (group == NULL)
2735 			return;
2736 
2737 		group->UnsetOrphanedCheck();
2738 		BReference<ProcessGroup> groupReference(group);
2739 
2740 		orphanedCheckLocker.Unlock();
2741 
2742 		AutoLocker<ProcessGroup> groupLocker(group);
2743 
2744 		// If the group is orphaned and contains stopped processes, we're
2745 		// supposed to send SIGHUP + SIGCONT.
2746 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2747 			Thread* currentThread = thread_get_current_thread();
2748 
2749 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2750 			send_signal_to_process_group_locked(group, signal, 0);
2751 
2752 			signal.SetNumber(SIGCONT);
2753 			send_signal_to_process_group_locked(group, signal, 0);
2754 		}
2755 	}
2756 }
2757 
2758 
2759 static status_t
2760 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2761 	uint32 flags)
2762 {
2763 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2764 		return B_BAD_VALUE;
2765 
2766 	// get the team
2767 	Team* team = Team::GetAndLock(id);
2768 	if (team == NULL)
2769 		return B_BAD_TEAM_ID;
2770 	BReference<Team> teamReference(team, true);
2771 	TeamLocker teamLocker(team, true);
2772 
2773 	if ((flags & B_CHECK_PERMISSION) != 0) {
2774 		uid_t uid = geteuid();
2775 		if (uid != 0 && uid != team->effective_uid)
2776 			return B_NOT_ALLOWED;
2777 	}
2778 
2779 	bigtime_t kernelTime = 0;
2780 	bigtime_t userTime = 0;
2781 
2782 	switch (who) {
2783 		case B_TEAM_USAGE_SELF:
2784 		{
2785 			Thread* thread = team->thread_list;
2786 
2787 			for (; thread != NULL; thread = thread->team_next) {
2788 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2789 				kernelTime += thread->kernel_time;
2790 				userTime += thread->user_time;
2791 			}
2792 
2793 			kernelTime += team->dead_threads_kernel_time;
2794 			userTime += team->dead_threads_user_time;
2795 			break;
2796 		}
2797 
2798 		case B_TEAM_USAGE_CHILDREN:
2799 		{
2800 			Team* child = team->children;
2801 			for (; child != NULL; child = child->siblings_next) {
2802 				TeamLocker childLocker(child);
2803 
2804 				Thread* thread = team->thread_list;
2805 
2806 				for (; thread != NULL; thread = thread->team_next) {
2807 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2808 					kernelTime += thread->kernel_time;
2809 					userTime += thread->user_time;
2810 				}
2811 
2812 				kernelTime += child->dead_threads_kernel_time;
2813 				userTime += child->dead_threads_user_time;
2814 			}
2815 
2816 			kernelTime += team->dead_children.kernel_time;
2817 			userTime += team->dead_children.user_time;
2818 			break;
2819 		}
2820 	}
2821 
2822 	info->kernel_time = kernelTime;
2823 	info->user_time = userTime;
2824 
2825 	return B_OK;
2826 }
2827 
2828 
2829 //	#pragma mark - Private kernel API
2830 
2831 
2832 status_t
2833 team_init(kernel_args* args)
2834 {
2835 	// create the team hash table
2836 	new(&sTeamHash) TeamTable;
2837 	if (sTeamHash.Init(64) != B_OK)
2838 		panic("Failed to init team hash table!");
2839 
2840 	new(&sGroupHash) ProcessGroupHashTable;
2841 	if (sGroupHash.Init() != B_OK)
2842 		panic("Failed to init process group hash table!");
2843 
2844 	// create initial session and process groups
2845 
2846 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2847 	if (session == NULL)
2848 		panic("Could not create initial session.\n");
2849 	BReference<ProcessSession> sessionReference(session, true);
2850 
2851 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2852 	if (group == NULL)
2853 		panic("Could not create initial process group.\n");
2854 	BReference<ProcessGroup> groupReference(group, true);
2855 
2856 	group->Publish(session);
2857 
2858 	// create the kernel team
2859 	sKernelTeam = Team::Create(1, "kernel_team", true);
2860 	if (sKernelTeam == NULL)
2861 		panic("could not create kernel team!\n");
2862 
2863 	sKernelTeam->address_space = VMAddressSpace::Kernel();
2864 	sKernelTeam->SetArgs(sKernelTeam->Name());
2865 	sKernelTeam->state = TEAM_STATE_NORMAL;
2866 
2867 	sKernelTeam->saved_set_uid = 0;
2868 	sKernelTeam->real_uid = 0;
2869 	sKernelTeam->effective_uid = 0;
2870 	sKernelTeam->saved_set_gid = 0;
2871 	sKernelTeam->real_gid = 0;
2872 	sKernelTeam->effective_gid = 0;
2873 	sKernelTeam->supplementary_groups = NULL;
2874 	sKernelTeam->supplementary_group_count = 0;
2875 
2876 	insert_team_into_group(group, sKernelTeam);
2877 
2878 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2879 	if (sKernelTeam->io_context == NULL)
2880 		panic("could not create io_context for kernel team!\n");
2881 
2882 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2883 		dprintf("Failed to resize FD table for kernel team!\n");
2884 
2885 	// stick it in the team hash
2886 	sTeamHash.Insert(sKernelTeam);
2887 
2888 	// check safe mode settings
2889 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2890 		false);
2891 
2892 	add_debugger_command_etc("team", &dump_team_info,
2893 		"Dump info about a particular team",
2894 		"[ <id> | <address> | <name> ]\n"
2895 		"Prints information about the specified team. If no argument is given\n"
2896 		"the current team is selected.\n"
2897 		"  <id>       - The ID of the team.\n"
2898 		"  <address>  - The address of the team structure.\n"
2899 		"  <name>     - The team's name.\n", 0);
2900 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2901 		"\n"
2902 		"Prints a list of all existing teams.\n", 0);
2903 
2904 	new(&sNotificationService) TeamNotificationService();
2905 
2906 	sNotificationService.Register();
2907 
2908 	return B_OK;
2909 }
2910 
2911 
2912 int32
2913 team_max_teams(void)
2914 {
2915 	return sMaxTeams;
2916 }
2917 
2918 
2919 int32
2920 team_used_teams(void)
2921 {
2922 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2923 	return sUsedTeams;
2924 }
2925 
2926 
2927 /*! Returns a death entry of a child team specified by ID (if any).
2928 	The caller must hold the team's lock.
2929 
2930 	\param team The team whose dead children list to check.
2931 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2932 	\param _deleteEntry Return variable, indicating whether the caller needs to
2933 		delete the returned entry.
2934 	\return The death entry of the matching team, or \c NULL, if no death entry
2935 		for the team was found.
2936 */
2937 job_control_entry*
2938 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2939 {
2940 	if (child <= 0)
2941 		return NULL;
2942 
2943 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2944 		child);
2945 	if (entry) {
2946 		// remove the entry only, if the caller is the parent of the found team
2947 		if (team_get_current_team_id() == entry->thread) {
2948 			team->dead_children.entries.Remove(entry);
2949 			team->dead_children.count--;
2950 			*_deleteEntry = true;
2951 		} else {
2952 			*_deleteEntry = false;
2953 		}
2954 	}
2955 
2956 	return entry;
2957 }
2958 
2959 
2960 /*! Quick check to see if we have a valid team ID. */
2961 bool
2962 team_is_valid(team_id id)
2963 {
2964 	if (id <= 0)
2965 		return false;
2966 
2967 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2968 	return team_get_team_struct_locked(id) != NULL;
2969 }
2970 
2971 
2972 Team*
2973 team_get_team_struct_locked(team_id id)
2974 {
2975 	return sTeamHash.Lookup(id);
2976 }
2977 
2978 
2979 void
2980 team_set_controlling_tty(int32 ttyIndex)
2981 {
2982 	// lock the team, so its session won't change while we're playing with it
2983 	Team* team = thread_get_current_thread()->team;
2984 	TeamLocker teamLocker(team);
2985 
2986 	// get and lock the session
2987 	ProcessSession* session = team->group->Session();
2988 	AutoLocker<ProcessSession> sessionLocker(session);
2989 
2990 	// set the session's fields
2991 	session->controlling_tty = ttyIndex;
2992 	session->foreground_group = -1;
2993 }
2994 
2995 
2996 int32
2997 team_get_controlling_tty()
2998 {
2999 	// lock the team, so its session won't change while we're playing with it
3000 	Team* team = thread_get_current_thread()->team;
3001 	TeamLocker teamLocker(team);
3002 
3003 	// get and lock the session
3004 	ProcessSession* session = team->group->Session();
3005 	AutoLocker<ProcessSession> sessionLocker(session);
3006 
3007 	// get the session's field
3008 	return session->controlling_tty;
3009 }
3010 
3011 
3012 status_t
3013 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3014 {
3015 	// lock the team, so its session won't change while we're playing with it
3016 	Thread* thread = thread_get_current_thread();
3017 	Team* team = thread->team;
3018 	TeamLocker teamLocker(team);
3019 
3020 	// get and lock the session
3021 	ProcessSession* session = team->group->Session();
3022 	AutoLocker<ProcessSession> sessionLocker(session);
3023 
3024 	// check given TTY -- must be the controlling tty of the calling process
3025 	if (session->controlling_tty != ttyIndex)
3026 		return ENOTTY;
3027 
3028 	// check given process group -- must belong to our session
3029 	{
3030 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3031 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3032 		if (group == NULL || group->Session() != session)
3033 			return B_BAD_VALUE;
3034 	}
3035 
3036 	// If we are a background group, we can do that unharmed only when we
3037 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3038 	if (session->foreground_group != -1
3039 		&& session->foreground_group != team->group_id
3040 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3041 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3042 		InterruptsSpinLocker signalLocker(team->signal_lock);
3043 
3044 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3045 			pid_t groupID = team->group_id;
3046 
3047 			signalLocker.Unlock();
3048 			sessionLocker.Unlock();
3049 			teamLocker.Unlock();
3050 
3051 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3052 			send_signal_to_process_group(groupID, signal, 0);
3053 			return B_INTERRUPTED;
3054 		}
3055 	}
3056 
3057 	session->foreground_group = processGroupID;
3058 
3059 	return B_OK;
3060 }
3061 
3062 
3063 uid_t
3064 team_geteuid(team_id id)
3065 {
3066 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3067 	Team* team = team_get_team_struct_locked(id);
3068 	if (team == NULL)
3069 		return (uid_t)-1;
3070 	return team->effective_uid;
3071 }
3072 
3073 
3074 /*!	Removes the specified team from the global team hash, from its process
3075 	group, and from its parent.
3076 	It also moves all of its children to the kernel team.
3077 
3078 	The caller must hold the following locks:
3079 	- \a team's process group's lock,
3080 	- the kernel team's lock,
3081 	- \a team's parent team's lock (might be the kernel team), and
3082 	- \a team's lock.
3083 */
3084 void
3085 team_remove_team(Team* team, pid_t& _signalGroup)
3086 {
3087 	Team* parent = team->parent;
3088 
3089 	// remember how long this team lasted
3090 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3091 		+ team->dead_children.kernel_time;
3092 	parent->dead_children.user_time += team->dead_threads_user_time
3093 		+ team->dead_children.user_time;
3094 
3095 	// remove the team from the hash table
3096 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3097 	sTeamHash.Remove(team);
3098 	sUsedTeams--;
3099 	teamsLocker.Unlock();
3100 
3101 	// The team can no longer be accessed by ID. Navigation to it is still
3102 	// possible from its process group and its parent and children, but that
3103 	// will be rectified shortly.
3104 	team->state = TEAM_STATE_DEATH;
3105 
3106 	// If we're a controlling process (i.e. a session leader with controlling
3107 	// terminal), there's a bit of signalling we have to do. We can't do any of
3108 	// the signaling here due to the bunch of locks we're holding, but we need
3109 	// to determine, whom to signal.
3110 	_signalGroup = -1;
3111 	bool isSessionLeader = false;
3112 	if (team->session_id == team->id
3113 		&& team->group->Session()->controlling_tty >= 0) {
3114 		isSessionLeader = true;
3115 
3116 		ProcessSession* session = team->group->Session();
3117 
3118 		AutoLocker<ProcessSession> sessionLocker(session);
3119 
3120 		session->controlling_tty = -1;
3121 		_signalGroup = session->foreground_group;
3122 	}
3123 
3124 	// remove us from our process group
3125 	remove_team_from_group(team);
3126 
3127 	// move the team's children to the kernel team
3128 	while (Team* child = team->children) {
3129 		// remove the child from the current team and add it to the kernel team
3130 		TeamLocker childLocker(child);
3131 
3132 		remove_team_from_parent(team, child);
3133 		insert_team_into_parent(sKernelTeam, child);
3134 
3135 		// move job control entries too
3136 		sKernelTeam->stopped_children.entries.MoveFrom(
3137 			&team->stopped_children.entries);
3138 		sKernelTeam->continued_children.entries.MoveFrom(
3139 			&team->continued_children.entries);
3140 
3141 		// If the team was a session leader with controlling terminal,
3142 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3143 		// groups with stopped processes. Due to locking complications we can't
3144 		// do that here, so we only check whether we were a reason for the
3145 		// child's process group not being an orphan and, if so, schedule a
3146 		// later check (cf. orphaned_process_group_check()).
3147 		if (isSessionLeader) {
3148 			ProcessGroup* childGroup = child->group;
3149 			if (childGroup->Session()->id == team->session_id
3150 				&& childGroup->id != team->group_id) {
3151 				childGroup->ScheduleOrphanedCheck();
3152 			}
3153 		}
3154 
3155 		// Note, we don't move the dead children entries. Those will be deleted
3156 		// when the team structure is deleted.
3157 	}
3158 
3159 	// remove us from our parent
3160 	remove_team_from_parent(parent, team);
3161 }
3162 
3163 
3164 /*!	Kills all threads but the main thread of the team and shuts down user
3165 	debugging for it.
3166 	To be called on exit of the team's main thread. No locks must be held.
3167 
3168 	\param team The team in question.
3169 	\return The port of the debugger for the team, -1 if none. To be passed to
3170 		team_delete_team().
3171 */
3172 port_id
3173 team_shutdown_team(Team* team)
3174 {
3175 	ASSERT(thread_get_current_thread() == team->main_thread);
3176 
3177 	TeamLocker teamLocker(team);
3178 
3179 	// Make sure debugging changes won't happen anymore.
3180 	port_id debuggerPort = -1;
3181 	while (true) {
3182 		// If a debugger change is in progress for the team, we'll have to
3183 		// wait until it is done.
3184 		ConditionVariableEntry waitForDebuggerEntry;
3185 		bool waitForDebugger = false;
3186 
3187 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3188 
3189 		if (team->debug_info.debugger_changed_condition != NULL) {
3190 			team->debug_info.debugger_changed_condition->Add(
3191 				&waitForDebuggerEntry);
3192 			waitForDebugger = true;
3193 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3194 			// The team is being debugged. That will stop with the termination
3195 			// of the nub thread. Since we set the team state to death, no one
3196 			// can install a debugger anymore. We fetch the debugger's port to
3197 			// send it a message at the bitter end.
3198 			debuggerPort = team->debug_info.debugger_port;
3199 		}
3200 
3201 		debugInfoLocker.Unlock();
3202 
3203 		if (!waitForDebugger)
3204 			break;
3205 
3206 		// wait for the debugger change to be finished
3207 		teamLocker.Unlock();
3208 
3209 		waitForDebuggerEntry.Wait();
3210 
3211 		teamLocker.Lock();
3212 	}
3213 
3214 	// Mark the team as shutting down. That will prevent new threads from being
3215 	// created and debugger changes from taking place.
3216 	team->state = TEAM_STATE_SHUTDOWN;
3217 
3218 	// delete all timers
3219 	team->DeleteUserTimers(false);
3220 
3221 	// deactivate CPU time user timers for the team
3222 	InterruptsSpinLocker timeLocker(team->time_lock);
3223 
3224 	if (team->HasActiveCPUTimeUserTimers())
3225 		team->DeactivateCPUTimeUserTimers();
3226 
3227 	timeLocker.Unlock();
3228 
3229 	// kill all threads but the main thread
3230 	team_death_entry deathEntry;
3231 	deathEntry.condition.Init(team, "team death");
3232 
3233 	while (true) {
3234 		team->death_entry = &deathEntry;
3235 		deathEntry.remaining_threads = 0;
3236 
3237 		Thread* thread = team->thread_list;
3238 		while (thread != NULL) {
3239 			if (thread != team->main_thread) {
3240 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3241 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3242 				deathEntry.remaining_threads++;
3243 			}
3244 
3245 			thread = thread->team_next;
3246 		}
3247 
3248 		if (deathEntry.remaining_threads == 0)
3249 			break;
3250 
3251 		// there are threads to wait for
3252 		ConditionVariableEntry entry;
3253 		deathEntry.condition.Add(&entry);
3254 
3255 		teamLocker.Unlock();
3256 
3257 		entry.Wait();
3258 
3259 		teamLocker.Lock();
3260 	}
3261 
3262 	team->death_entry = NULL;
3263 
3264 	return debuggerPort;
3265 }
3266 
3267 
3268 /*!	Called on team exit to notify threads waiting on the team and free most
3269 	resources associated with it.
3270 	The caller shouldn't hold any locks.
3271 */
3272 void
3273 team_delete_team(Team* team, port_id debuggerPort)
3274 {
3275 	// Not quite in our job description, but work that has been left by
3276 	// team_remove_team() and that can be done now that we're not holding any
3277 	// locks.
3278 	orphaned_process_group_check();
3279 
3280 	team_id teamID = team->id;
3281 
3282 	ASSERT(team->num_threads == 0);
3283 
3284 	// If someone is waiting for this team to be loaded, but it dies
3285 	// unexpectedly before being done, we need to notify the waiting
3286 	// thread now.
3287 
3288 	TeamLocker teamLocker(team);
3289 
3290 	if (team->loading_info) {
3291 		// there's indeed someone waiting
3292 		struct team_loading_info* loadingInfo = team->loading_info;
3293 		team->loading_info = NULL;
3294 
3295 		loadingInfo->result = B_ERROR;
3296 
3297 		// wake up the waiting thread
3298 		loadingInfo->condition.NotifyAll();
3299 	}
3300 
3301 	// notify team watchers
3302 
3303 	{
3304 		// we're not reachable from anyone anymore at this point, so we
3305 		// can safely access the list without any locking
3306 		struct team_watcher* watcher;
3307 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3308 				&team->watcher_list)) != NULL) {
3309 			watcher->hook(teamID, watcher->data);
3310 			free(watcher);
3311 		}
3312 	}
3313 
3314 	teamLocker.Unlock();
3315 
3316 	sNotificationService.Notify(TEAM_REMOVED, team);
3317 
3318 	// free team resources
3319 
3320 	delete_realtime_sem_context(team->realtime_sem_context);
3321 	xsi_sem_undo(team);
3322 	remove_images(team);
3323 	team->address_space->RemoveAndPut();
3324 
3325 	team->ReleaseReference();
3326 
3327 	// notify the debugger, that the team is gone
3328 	user_debug_team_deleted(teamID, debuggerPort);
3329 }
3330 
3331 
3332 Team*
3333 team_get_kernel_team(void)
3334 {
3335 	return sKernelTeam;
3336 }
3337 
3338 
3339 team_id
3340 team_get_kernel_team_id(void)
3341 {
3342 	if (!sKernelTeam)
3343 		return 0;
3344 
3345 	return sKernelTeam->id;
3346 }
3347 
3348 
3349 team_id
3350 team_get_current_team_id(void)
3351 {
3352 	return thread_get_current_thread()->team->id;
3353 }
3354 
3355 
3356 status_t
3357 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3358 {
3359 	if (id == sKernelTeam->id) {
3360 		// we're the kernel team, so we don't have to go through all
3361 		// the hassle (locking and hash lookup)
3362 		*_addressSpace = VMAddressSpace::GetKernel();
3363 		return B_OK;
3364 	}
3365 
3366 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3367 
3368 	Team* team = team_get_team_struct_locked(id);
3369 	if (team == NULL)
3370 		return B_BAD_VALUE;
3371 
3372 	team->address_space->Get();
3373 	*_addressSpace = team->address_space;
3374 	return B_OK;
3375 }
3376 
3377 
3378 /*!	Sets the team's job control state.
3379 	The caller must hold the parent team's lock. Interrupts are allowed to be
3380 	enabled or disabled.
3381 	\a team The team whose job control state shall be set.
3382 	\a newState The new state to be set.
3383 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3384 		the caller is responsible for filling in the following fields of the
3385 		entry before releasing the parent team's lock, unless the new state is
3386 		\c JOB_CONTROL_STATE_NONE:
3387 		- \c signal: The number of the signal causing the state change.
3388 		- \c signaling_user: The real UID of the user sending the signal.
3389 */
3390 void
3391 team_set_job_control_state(Team* team, job_control_state newState,
3392 	Signal* signal)
3393 {
3394 	if (team == NULL || team->job_control_entry == NULL)
3395 		return;
3396 
3397 	// don't touch anything, if the state stays the same or the team is already
3398 	// dead
3399 	job_control_entry* entry = team->job_control_entry;
3400 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3401 		return;
3402 
3403 	T(SetJobControlState(team->id, newState, signal));
3404 
3405 	// remove from the old list
3406 	switch (entry->state) {
3407 		case JOB_CONTROL_STATE_NONE:
3408 			// entry is in no list ATM
3409 			break;
3410 		case JOB_CONTROL_STATE_DEAD:
3411 			// can't get here
3412 			break;
3413 		case JOB_CONTROL_STATE_STOPPED:
3414 			team->parent->stopped_children.entries.Remove(entry);
3415 			break;
3416 		case JOB_CONTROL_STATE_CONTINUED:
3417 			team->parent->continued_children.entries.Remove(entry);
3418 			break;
3419 	}
3420 
3421 	entry->state = newState;
3422 
3423 	if (signal != NULL) {
3424 		entry->signal = signal->Number();
3425 		entry->signaling_user = signal->SendingUser();
3426 	}
3427 
3428 	// add to new list
3429 	team_job_control_children* childList = NULL;
3430 	switch (entry->state) {
3431 		case JOB_CONTROL_STATE_NONE:
3432 			// entry doesn't get into any list
3433 			break;
3434 		case JOB_CONTROL_STATE_DEAD:
3435 			childList = &team->parent->dead_children;
3436 			team->parent->dead_children.count++;
3437 			break;
3438 		case JOB_CONTROL_STATE_STOPPED:
3439 			childList = &team->parent->stopped_children;
3440 			break;
3441 		case JOB_CONTROL_STATE_CONTINUED:
3442 			childList = &team->parent->continued_children;
3443 			break;
3444 	}
3445 
3446 	if (childList != NULL) {
3447 		childList->entries.Add(entry);
3448 		team->parent->dead_children.condition_variable.NotifyAll();
3449 	}
3450 }
3451 
3452 
3453 /*!	Inits the given team's exit information, if not yet initialized, to some
3454 	generic "killed" status.
3455 	The caller must not hold the team's lock. Interrupts must be enabled.
3456 
3457 	\param team The team whose exit info shall be initialized.
3458 */
3459 void
3460 team_init_exit_info_on_error(Team* team)
3461 {
3462 	TeamLocker teamLocker(team);
3463 
3464 	if (!team->exit.initialized) {
3465 		team->exit.reason = CLD_KILLED;
3466 		team->exit.signal = SIGKILL;
3467 		team->exit.signaling_user = geteuid();
3468 		team->exit.status = 0;
3469 		team->exit.initialized = true;
3470 	}
3471 }
3472 
3473 
3474 /*! Adds a hook to the team that is called as soon as this team goes away.
3475 	This call might get public in the future.
3476 */
3477 status_t
3478 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3479 {
3480 	if (hook == NULL || teamID < B_OK)
3481 		return B_BAD_VALUE;
3482 
3483 	// create the watcher object
3484 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3485 	if (watcher == NULL)
3486 		return B_NO_MEMORY;
3487 
3488 	watcher->hook = hook;
3489 	watcher->data = data;
3490 
3491 	// add watcher, if the team isn't already dying
3492 	// get the team
3493 	Team* team = Team::GetAndLock(teamID);
3494 	if (team == NULL) {
3495 		free(watcher);
3496 		return B_BAD_TEAM_ID;
3497 	}
3498 
3499 	list_add_item(&team->watcher_list, watcher);
3500 
3501 	team->UnlockAndReleaseReference();
3502 
3503 	return B_OK;
3504 }
3505 
3506 
3507 status_t
3508 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3509 {
3510 	if (hook == NULL || teamID < 0)
3511 		return B_BAD_VALUE;
3512 
3513 	// get team and remove watcher (if present)
3514 	Team* team = Team::GetAndLock(teamID);
3515 	if (team == NULL)
3516 		return B_BAD_TEAM_ID;
3517 
3518 	// search for watcher
3519 	team_watcher* watcher = NULL;
3520 	while ((watcher = (team_watcher*)list_get_next_item(
3521 			&team->watcher_list, watcher)) != NULL) {
3522 		if (watcher->hook == hook && watcher->data == data) {
3523 			// got it!
3524 			list_remove_item(&team->watcher_list, watcher);
3525 			break;
3526 		}
3527 	}
3528 
3529 	team->UnlockAndReleaseReference();
3530 
3531 	if (watcher == NULL)
3532 		return B_ENTRY_NOT_FOUND;
3533 
3534 	free(watcher);
3535 	return B_OK;
3536 }
3537 
3538 
3539 /*!	Allocates a user_thread structure from the team.
3540 	The team lock must be held, unless the function is called for the team's
3541 	main thread. Interrupts must be enabled.
3542 */
3543 struct user_thread*
3544 team_allocate_user_thread(Team* team)
3545 {
3546 	if (team->user_data == 0)
3547 		return NULL;
3548 
3549 	// take an entry from the free list, if any
3550 	if (struct free_user_thread* entry = team->free_user_threads) {
3551 		user_thread* thread = entry->thread;
3552 		team->free_user_threads = entry->next;
3553 		free(entry);
3554 		return thread;
3555 	}
3556 
3557 	while (true) {
3558 		// enough space left?
3559 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3560 		if (team->user_data_size - team->used_user_data < needed) {
3561 			// try to resize the area
3562 			if (resize_area(team->user_data_area,
3563 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3564 				return NULL;
3565 			}
3566 
3567 			// resized user area successfully -- try to allocate the user_thread
3568 			// again
3569 			team->user_data_size += B_PAGE_SIZE;
3570 			continue;
3571 		}
3572 
3573 		// allocate the user_thread
3574 		user_thread* thread
3575 			= (user_thread*)(team->user_data + team->used_user_data);
3576 		team->used_user_data += needed;
3577 
3578 		return thread;
3579 	}
3580 }
3581 
3582 
3583 /*!	Frees the given user_thread structure.
3584 	The team's lock must not be held. Interrupts must be enabled.
3585 	\param team The team the user thread was allocated from.
3586 	\param userThread The user thread to free.
3587 */
3588 void
3589 team_free_user_thread(Team* team, struct user_thread* userThread)
3590 {
3591 	if (userThread == NULL)
3592 		return;
3593 
3594 	// create a free list entry
3595 	free_user_thread* entry
3596 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3597 	if (entry == NULL) {
3598 		// we have to leak the user thread :-/
3599 		return;
3600 	}
3601 
3602 	// add to free list
3603 	TeamLocker teamLocker(team);
3604 
3605 	entry->thread = userThread;
3606 	entry->next = team->free_user_threads;
3607 	team->free_user_threads = entry;
3608 }
3609 
3610 
3611 //	#pragma mark - Associated data interface
3612 
3613 
3614 AssociatedData::AssociatedData()
3615 	:
3616 	fOwner(NULL)
3617 {
3618 }
3619 
3620 
3621 AssociatedData::~AssociatedData()
3622 {
3623 }
3624 
3625 
3626 void
3627 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3628 {
3629 }
3630 
3631 
3632 AssociatedDataOwner::AssociatedDataOwner()
3633 {
3634 	mutex_init(&fLock, "associated data owner");
3635 }
3636 
3637 
3638 AssociatedDataOwner::~AssociatedDataOwner()
3639 {
3640 	mutex_destroy(&fLock);
3641 }
3642 
3643 
3644 bool
3645 AssociatedDataOwner::AddData(AssociatedData* data)
3646 {
3647 	MutexLocker locker(fLock);
3648 
3649 	if (data->Owner() != NULL)
3650 		return false;
3651 
3652 	data->AcquireReference();
3653 	fList.Add(data);
3654 	data->SetOwner(this);
3655 
3656 	return true;
3657 }
3658 
3659 
3660 bool
3661 AssociatedDataOwner::RemoveData(AssociatedData* data)
3662 {
3663 	MutexLocker locker(fLock);
3664 
3665 	if (data->Owner() != this)
3666 		return false;
3667 
3668 	data->SetOwner(NULL);
3669 	fList.Remove(data);
3670 
3671 	locker.Unlock();
3672 
3673 	data->ReleaseReference();
3674 
3675 	return true;
3676 }
3677 
3678 
3679 void
3680 AssociatedDataOwner::PrepareForDeletion()
3681 {
3682 	MutexLocker locker(fLock);
3683 
3684 	// move all data to a temporary list and unset the owner
3685 	DataList list;
3686 	list.MoveFrom(&fList);
3687 
3688 	for (DataList::Iterator it = list.GetIterator();
3689 		AssociatedData* data = it.Next();) {
3690 		data->SetOwner(NULL);
3691 	}
3692 
3693 	locker.Unlock();
3694 
3695 	// call the notification hooks and release our references
3696 	while (AssociatedData* data = list.RemoveHead()) {
3697 		data->OwnerDeleted(this);
3698 		data->ReleaseReference();
3699 	}
3700 }
3701 
3702 
3703 /*!	Associates data with the current team.
3704 	When the team is deleted, the data object is notified.
3705 	The team acquires a reference to the object.
3706 
3707 	\param data The data object.
3708 	\return \c true on success, \c false otherwise. Fails only when the supplied
3709 		data object is already associated with another owner.
3710 */
3711 bool
3712 team_associate_data(AssociatedData* data)
3713 {
3714 	return thread_get_current_thread()->team->AddData(data);
3715 }
3716 
3717 
3718 /*!	Dissociates data from the current team.
3719 	Balances an earlier call to team_associate_data().
3720 
3721 	\param data The data object.
3722 	\return \c true on success, \c false otherwise. Fails only when the data
3723 		object is not associated with the current team.
3724 */
3725 bool
3726 team_dissociate_data(AssociatedData* data)
3727 {
3728 	return thread_get_current_thread()->team->RemoveData(data);
3729 }
3730 
3731 
3732 //	#pragma mark - Public kernel API
3733 
3734 
3735 thread_id
3736 load_image(int32 argCount, const char** args, const char** env)
3737 {
3738 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3739 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3740 }
3741 
3742 
3743 thread_id
3744 load_image_etc(int32 argCount, const char* const* args,
3745 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3746 {
3747 	// we need to flatten the args and environment
3748 
3749 	if (args == NULL)
3750 		return B_BAD_VALUE;
3751 
3752 	// determine total needed size
3753 	int32 argSize = 0;
3754 	for (int32 i = 0; i < argCount; i++)
3755 		argSize += strlen(args[i]) + 1;
3756 
3757 	int32 envCount = 0;
3758 	int32 envSize = 0;
3759 	while (env != NULL && env[envCount] != NULL)
3760 		envSize += strlen(env[envCount++]) + 1;
3761 
3762 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3763 	if (size > MAX_PROCESS_ARGS_SIZE)
3764 		return B_TOO_MANY_ARGS;
3765 
3766 	// allocate space
3767 	char** flatArgs = (char**)malloc(size);
3768 	if (flatArgs == NULL)
3769 		return B_NO_MEMORY;
3770 
3771 	char** slot = flatArgs;
3772 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3773 
3774 	// copy arguments and environment
3775 	for (int32 i = 0; i < argCount; i++) {
3776 		int32 argSize = strlen(args[i]) + 1;
3777 		memcpy(stringSpace, args[i], argSize);
3778 		*slot++ = stringSpace;
3779 		stringSpace += argSize;
3780 	}
3781 
3782 	*slot++ = NULL;
3783 
3784 	for (int32 i = 0; i < envCount; i++) {
3785 		int32 envSize = strlen(env[i]) + 1;
3786 		memcpy(stringSpace, env[i], envSize);
3787 		*slot++ = stringSpace;
3788 		stringSpace += envSize;
3789 	}
3790 
3791 	*slot++ = NULL;
3792 
3793 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3794 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3795 
3796 	free(flatArgs);
3797 		// load_image_internal() unset our variable if it took over ownership
3798 
3799 	return thread;
3800 }
3801 
3802 
3803 status_t
3804 wait_for_team(team_id id, status_t* _returnCode)
3805 {
3806 	// check whether the team exists
3807 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3808 
3809 	Team* team = team_get_team_struct_locked(id);
3810 	if (team == NULL)
3811 		return B_BAD_TEAM_ID;
3812 
3813 	id = team->id;
3814 
3815 	teamsLocker.Unlock();
3816 
3817 	// wait for the main thread (it has the same ID as the team)
3818 	return wait_for_thread(id, _returnCode);
3819 }
3820 
3821 
3822 status_t
3823 kill_team(team_id id)
3824 {
3825 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3826 
3827 	Team* team = team_get_team_struct_locked(id);
3828 	if (team == NULL)
3829 		return B_BAD_TEAM_ID;
3830 
3831 	id = team->id;
3832 
3833 	teamsLocker.Unlock();
3834 
3835 	if (team == sKernelTeam)
3836 		return B_NOT_ALLOWED;
3837 
3838 	// Just kill the team's main thread (it has same ID as the team). The
3839 	// cleanup code there will take care of the team.
3840 	return kill_thread(id);
3841 }
3842 
3843 
3844 status_t
3845 _get_team_info(team_id id, team_info* info, size_t size)
3846 {
3847 	// get the team
3848 	Team* team = Team::Get(id);
3849 	if (team == NULL)
3850 		return B_BAD_TEAM_ID;
3851 	BReference<Team> teamReference(team, true);
3852 
3853 	// fill in the info
3854 	return fill_team_info(team, info, size);
3855 }
3856 
3857 
3858 status_t
3859 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3860 {
3861 	int32 slot = *cookie;
3862 	if (slot < 1)
3863 		slot = 1;
3864 
3865 	InterruptsReadSpinLocker locker(sTeamHashLock);
3866 
3867 	team_id lastTeamID = peek_next_thread_id();
3868 		// TODO: This is broken, since the id can wrap around!
3869 
3870 	// get next valid team
3871 	Team* team = NULL;
3872 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3873 		slot++;
3874 
3875 	if (team == NULL)
3876 		return B_BAD_TEAM_ID;
3877 
3878 	// get a reference to the team and unlock
3879 	BReference<Team> teamReference(team);
3880 	locker.Unlock();
3881 
3882 	// fill in the info
3883 	*cookie = ++slot;
3884 	return fill_team_info(team, info, size);
3885 }
3886 
3887 
3888 status_t
3889 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3890 {
3891 	if (size != sizeof(team_usage_info))
3892 		return B_BAD_VALUE;
3893 
3894 	return common_get_team_usage_info(id, who, info, 0);
3895 }
3896 
3897 
3898 pid_t
3899 getpid(void)
3900 {
3901 	return thread_get_current_thread()->team->id;
3902 }
3903 
3904 
3905 pid_t
3906 getppid()
3907 {
3908 	return _getppid(0);
3909 }
3910 
3911 
3912 pid_t
3913 getpgid(pid_t id)
3914 {
3915 	if (id < 0) {
3916 		errno = EINVAL;
3917 		return -1;
3918 	}
3919 
3920 	if (id == 0) {
3921 		// get process group of the calling process
3922 		Team* team = thread_get_current_thread()->team;
3923 		TeamLocker teamLocker(team);
3924 		return team->group_id;
3925 	}
3926 
3927 	// get the team
3928 	Team* team = Team::GetAndLock(id);
3929 	if (team == NULL) {
3930 		errno = ESRCH;
3931 		return -1;
3932 	}
3933 
3934 	// get the team's process group ID
3935 	pid_t groupID = team->group_id;
3936 
3937 	team->UnlockAndReleaseReference();
3938 
3939 	return groupID;
3940 }
3941 
3942 
3943 pid_t
3944 getsid(pid_t id)
3945 {
3946 	if (id < 0) {
3947 		errno = EINVAL;
3948 		return -1;
3949 	}
3950 
3951 	if (id == 0) {
3952 		// get session of the calling process
3953 		Team* team = thread_get_current_thread()->team;
3954 		TeamLocker teamLocker(team);
3955 		return team->session_id;
3956 	}
3957 
3958 	// get the team
3959 	Team* team = Team::GetAndLock(id);
3960 	if (team == NULL) {
3961 		errno = ESRCH;
3962 		return -1;
3963 	}
3964 
3965 	// get the team's session ID
3966 	pid_t sessionID = team->session_id;
3967 
3968 	team->UnlockAndReleaseReference();
3969 
3970 	return sessionID;
3971 }
3972 
3973 
3974 //	#pragma mark - User syscalls
3975 
3976 
3977 status_t
3978 _user_exec(const char* userPath, const char* const* userFlatArgs,
3979 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3980 {
3981 	// NOTE: Since this function normally doesn't return, don't use automatic
3982 	// variables that need destruction in the function scope.
3983 	char path[B_PATH_NAME_LENGTH];
3984 
3985 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3986 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3987 		return B_BAD_ADDRESS;
3988 
3989 	// copy and relocate the flat arguments
3990 	char** flatArgs;
3991 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3992 		argCount, envCount, flatArgs);
3993 
3994 	if (error == B_OK) {
3995 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3996 			envCount, umask);
3997 			// this one only returns in case of error
3998 	}
3999 
4000 	free(flatArgs);
4001 	return error;
4002 }
4003 
4004 
4005 thread_id
4006 _user_fork(void)
4007 {
4008 	return fork_team();
4009 }
4010 
4011 
4012 pid_t
4013 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4014 	team_usage_info* usageInfo)
4015 {
4016 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4017 		return B_BAD_ADDRESS;
4018 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4019 		return B_BAD_ADDRESS;
4020 
4021 	siginfo_t info;
4022 	team_usage_info usage_info;
4023 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4024 	if (foundChild < 0)
4025 		return syscall_restart_handle_post(foundChild);
4026 
4027 	// copy info back to userland
4028 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4029 		return B_BAD_ADDRESS;
4030 	// copy usage_info back to userland
4031 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4032 		sizeof(usage_info)) != B_OK) {
4033 		return B_BAD_ADDRESS;
4034 	}
4035 
4036 	return foundChild;
4037 }
4038 
4039 
4040 pid_t
4041 _user_process_info(pid_t process, int32 which)
4042 {
4043 	pid_t result;
4044 	switch (which) {
4045 		case SESSION_ID:
4046 			result = getsid(process);
4047 			break;
4048 		case GROUP_ID:
4049 			result = getpgid(process);
4050 			break;
4051 		case PARENT_ID:
4052 			result = _getppid(process);
4053 			break;
4054 		default:
4055 			return B_BAD_VALUE;
4056 	}
4057 
4058 	return result >= 0 ? result : errno;
4059 }
4060 
4061 
4062 pid_t
4063 _user_setpgid(pid_t processID, pid_t groupID)
4064 {
4065 	// setpgid() can be called either by the parent of the target process or
4066 	// by the process itself to do one of two things:
4067 	// * Create a new process group with the target process' ID and the target
4068 	//   process as group leader.
4069 	// * Set the target process' process group to an already existing one in the
4070 	//   same session.
4071 
4072 	if (groupID < 0)
4073 		return B_BAD_VALUE;
4074 
4075 	Team* currentTeam = thread_get_current_thread()->team;
4076 	if (processID == 0)
4077 		processID = currentTeam->id;
4078 
4079 	// if the group ID is not specified, use the target process' ID
4080 	if (groupID == 0)
4081 		groupID = processID;
4082 
4083 	// We loop when running into the following race condition: We create a new
4084 	// process group, because there isn't one with that ID yet, but later when
4085 	// trying to publish it, we find that someone else created and published
4086 	// a group with that ID in the meantime. In that case we just restart the
4087 	// whole action.
4088 	while (true) {
4089 		// Look up the process group by ID. If it doesn't exist yet and we are
4090 		// allowed to create a new one, do that.
4091 		ProcessGroup* group = ProcessGroup::Get(groupID);
4092 		bool newGroup = false;
4093 		if (group == NULL) {
4094 			if (groupID != processID)
4095 				return B_NOT_ALLOWED;
4096 
4097 			group = new(std::nothrow) ProcessGroup(groupID);
4098 			if (group == NULL)
4099 				return B_NO_MEMORY;
4100 
4101 			newGroup = true;
4102 		}
4103 		BReference<ProcessGroup> groupReference(group, true);
4104 
4105 		// get the target team
4106 		Team* team = Team::Get(processID);
4107 		if (team == NULL)
4108 			return ESRCH;
4109 		BReference<Team> teamReference(team, true);
4110 
4111 		// lock the new process group and the team's current process group
4112 		while (true) {
4113 			// lock the team's current process group
4114 			team->LockProcessGroup();
4115 
4116 			ProcessGroup* oldGroup = team->group;
4117 			if (oldGroup == group) {
4118 				// it's the same as the target group, so just bail out
4119 				oldGroup->Unlock();
4120 				return group->id;
4121 			}
4122 
4123 			oldGroup->AcquireReference();
4124 
4125 			// lock the target process group, if locking order allows it
4126 			if (newGroup || group->id > oldGroup->id) {
4127 				group->Lock();
4128 				break;
4129 			}
4130 
4131 			// try to lock
4132 			if (group->TryLock())
4133 				break;
4134 
4135 			// no dice -- unlock the team's current process group and relock in
4136 			// the correct order
4137 			oldGroup->Unlock();
4138 
4139 			group->Lock();
4140 			oldGroup->Lock();
4141 
4142 			// check whether things are still the same
4143 			TeamLocker teamLocker(team);
4144 			if (team->group == oldGroup)
4145 				break;
4146 
4147 			// something changed -- unlock everything and retry
4148 			teamLocker.Unlock();
4149 			oldGroup->Unlock();
4150 			group->Unlock();
4151 			oldGroup->ReleaseReference();
4152 		}
4153 
4154 		// we now have references and locks of both new and old process group
4155 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4156 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4157 		AutoLocker<ProcessGroup> groupLocker(group, true);
4158 
4159 		// also lock the target team and its parent
4160 		team->LockTeamAndParent(false);
4161 		TeamLocker parentLocker(team->parent, true);
4162 		TeamLocker teamLocker(team, true);
4163 
4164 		// perform the checks
4165 		if (team == currentTeam) {
4166 			// we set our own group
4167 
4168 			// we must not change our process group ID if we're a session leader
4169 			if (is_session_leader(currentTeam))
4170 				return B_NOT_ALLOWED;
4171 		} else {
4172 			// Calling team != target team. The target team must be a child of
4173 			// the calling team and in the same session. (If that's the case it
4174 			// isn't a session leader either.)
4175 			if (team->parent != currentTeam
4176 				|| team->session_id != currentTeam->session_id) {
4177 				return B_NOT_ALLOWED;
4178 			}
4179 
4180 			// The call is also supposed to fail on a child, when the child has
4181 			// already executed exec*() [EACCES].
4182 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4183 				return EACCES;
4184 		}
4185 
4186 		// If we created a new process group, publish it now.
4187 		if (newGroup) {
4188 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4189 			if (sGroupHash.Lookup(groupID)) {
4190 				// A group with the group ID appeared since we first checked.
4191 				// Back to square one.
4192 				continue;
4193 			}
4194 
4195 			group->PublishLocked(team->group->Session());
4196 		} else if (group->Session()->id != team->session_id) {
4197 			// The existing target process group belongs to a different session.
4198 			// That's not allowed.
4199 			return B_NOT_ALLOWED;
4200 		}
4201 
4202 		// Everything is ready -- set the group.
4203 		remove_team_from_group(team);
4204 		insert_team_into_group(group, team);
4205 
4206 		// Changing the process group might have changed the situation for a
4207 		// parent waiting in wait_for_child(). Hence we notify it.
4208 		team->parent->dead_children.condition_variable.NotifyAll();
4209 
4210 		return group->id;
4211 	}
4212 }
4213 
4214 
4215 pid_t
4216 _user_setsid(void)
4217 {
4218 	Team* team = thread_get_current_thread()->team;
4219 
4220 	// create a new process group and session
4221 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4222 	if (group == NULL)
4223 		return B_NO_MEMORY;
4224 	BReference<ProcessGroup> groupReference(group, true);
4225 	AutoLocker<ProcessGroup> groupLocker(group);
4226 
4227 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4228 	if (session == NULL)
4229 		return B_NO_MEMORY;
4230 	BReference<ProcessSession> sessionReference(session, true);
4231 
4232 	// lock the team's current process group, parent, and the team itself
4233 	team->LockTeamParentAndProcessGroup();
4234 	BReference<ProcessGroup> oldGroupReference(team->group);
4235 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4236 	TeamLocker parentLocker(team->parent, true);
4237 	TeamLocker teamLocker(team, true);
4238 
4239 	// the team must not already be a process group leader
4240 	if (is_process_group_leader(team))
4241 		return B_NOT_ALLOWED;
4242 
4243 	// remove the team from the old and add it to the new process group
4244 	remove_team_from_group(team);
4245 	group->Publish(session);
4246 	insert_team_into_group(group, team);
4247 
4248 	// Changing the process group might have changed the situation for a
4249 	// parent waiting in wait_for_child(). Hence we notify it.
4250 	team->parent->dead_children.condition_variable.NotifyAll();
4251 
4252 	return group->id;
4253 }
4254 
4255 
4256 status_t
4257 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4258 {
4259 	status_t returnCode;
4260 	status_t status;
4261 
4262 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4263 		return B_BAD_ADDRESS;
4264 
4265 	status = wait_for_team(id, &returnCode);
4266 	if (status >= B_OK && _userReturnCode != NULL) {
4267 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4268 				!= B_OK)
4269 			return B_BAD_ADDRESS;
4270 		return B_OK;
4271 	}
4272 
4273 	return syscall_restart_handle_post(status);
4274 }
4275 
4276 
4277 thread_id
4278 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4279 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4280 	port_id errorPort, uint32 errorToken)
4281 {
4282 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4283 
4284 	if (argCount < 1)
4285 		return B_BAD_VALUE;
4286 
4287 	// copy and relocate the flat arguments
4288 	char** flatArgs;
4289 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4290 		argCount, envCount, flatArgs);
4291 	if (error != B_OK)
4292 		return error;
4293 
4294 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4295 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4296 		errorToken);
4297 
4298 	free(flatArgs);
4299 		// load_image_internal() unset our variable if it took over ownership
4300 
4301 	return thread;
4302 }
4303 
4304 
4305 void
4306 _user_exit_team(status_t returnValue)
4307 {
4308 	Thread* thread = thread_get_current_thread();
4309 	Team* team = thread->team;
4310 
4311 	// set this thread's exit status
4312 	thread->exit.status = returnValue;
4313 
4314 	// set the team exit status
4315 	TeamLocker teamLocker(team);
4316 
4317 	if (!team->exit.initialized) {
4318 		team->exit.reason = CLD_EXITED;
4319 		team->exit.signal = 0;
4320 		team->exit.signaling_user = 0;
4321 		team->exit.status = returnValue;
4322 		team->exit.initialized = true;
4323 	}
4324 
4325 	teamLocker.Unlock();
4326 
4327 	// Stop the thread, if the team is being debugged and that has been
4328 	// requested.
4329 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4330 		user_debug_stop_thread();
4331 
4332 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4333 	// userland. The signal handling code forwards the signal to the main
4334 	// thread (if that's not already this one), which will take the team down.
4335 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4336 	send_signal_to_thread(thread, signal, 0);
4337 }
4338 
4339 
4340 status_t
4341 _user_kill_team(team_id team)
4342 {
4343 	return kill_team(team);
4344 }
4345 
4346 
4347 status_t
4348 _user_get_team_info(team_id id, team_info* userInfo)
4349 {
4350 	status_t status;
4351 	team_info info;
4352 
4353 	if (!IS_USER_ADDRESS(userInfo))
4354 		return B_BAD_ADDRESS;
4355 
4356 	status = _get_team_info(id, &info, sizeof(team_info));
4357 	if (status == B_OK) {
4358 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4359 			return B_BAD_ADDRESS;
4360 	}
4361 
4362 	return status;
4363 }
4364 
4365 
4366 status_t
4367 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4368 {
4369 	status_t status;
4370 	team_info info;
4371 	int32 cookie;
4372 
4373 	if (!IS_USER_ADDRESS(userCookie)
4374 		|| !IS_USER_ADDRESS(userInfo)
4375 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4376 		return B_BAD_ADDRESS;
4377 
4378 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4379 	if (status != B_OK)
4380 		return status;
4381 
4382 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4383 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4384 		return B_BAD_ADDRESS;
4385 
4386 	return status;
4387 }
4388 
4389 
4390 team_id
4391 _user_get_current_team(void)
4392 {
4393 	return team_get_current_team_id();
4394 }
4395 
4396 
4397 status_t
4398 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4399 	size_t size)
4400 {
4401 	if (size != sizeof(team_usage_info))
4402 		return B_BAD_VALUE;
4403 
4404 	team_usage_info info;
4405 	status_t status = common_get_team_usage_info(team, who, &info,
4406 		B_CHECK_PERMISSION);
4407 
4408 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4409 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4410 		return B_BAD_ADDRESS;
4411 	}
4412 
4413 	return status;
4414 }
4415 
4416 
4417 status_t
4418 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4419 	size_t size, size_t* _sizeNeeded)
4420 {
4421 	// check parameters
4422 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4423 		|| (buffer == NULL && size > 0)
4424 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4425 		return B_BAD_ADDRESS;
4426 	}
4427 
4428 	KMessage info;
4429 
4430 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4431 		// allocate memory for a copy of the needed team data
4432 		struct ExtendedTeamData {
4433 			team_id	id;
4434 			pid_t	group_id;
4435 			pid_t	session_id;
4436 			uid_t	real_uid;
4437 			gid_t	real_gid;
4438 			uid_t	effective_uid;
4439 			gid_t	effective_gid;
4440 			char	name[B_OS_NAME_LENGTH];
4441 		} teamClone;
4442 
4443 		io_context* ioContext;
4444 		{
4445 			// get the team structure
4446 			Team* team = Team::GetAndLock(teamID);
4447 			if (team == NULL)
4448 				return B_BAD_TEAM_ID;
4449 			BReference<Team> teamReference(team, true);
4450 			TeamLocker teamLocker(team, true);
4451 
4452 			// copy the data
4453 			teamClone.id = team->id;
4454 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4455 			teamClone.group_id = team->group_id;
4456 			teamClone.session_id = team->session_id;
4457 			teamClone.real_uid = team->real_uid;
4458 			teamClone.real_gid = team->real_gid;
4459 			teamClone.effective_uid = team->effective_uid;
4460 			teamClone.effective_gid = team->effective_gid;
4461 
4462 			// also fetch a reference to the I/O context
4463 			ioContext = team->io_context;
4464 			vfs_get_io_context(ioContext);
4465 		}
4466 		CObjectDeleter<io_context, void, vfs_put_io_context>
4467 			ioContextPutter(ioContext);
4468 
4469 		// add the basic data to the info message
4470 		if (info.AddInt32("id", teamClone.id) != B_OK
4471 			|| info.AddString("name", teamClone.name) != B_OK
4472 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4473 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4474 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4475 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4476 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4477 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4478 			return B_NO_MEMORY;
4479 		}
4480 
4481 		// get the current working directory from the I/O context
4482 		dev_t cwdDevice;
4483 		ino_t cwdDirectory;
4484 		{
4485 			MutexLocker ioContextLocker(ioContext->io_mutex);
4486 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4487 		}
4488 
4489 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4490 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4491 			return B_NO_MEMORY;
4492 		}
4493 	}
4494 
4495 	// TODO: Support the other flags!
4496 
4497 	// copy the needed size and, if it fits, the message back to userland
4498 	size_t sizeNeeded = info.ContentSize();
4499 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4500 		return B_BAD_ADDRESS;
4501 
4502 	if (sizeNeeded > size)
4503 		return B_BUFFER_OVERFLOW;
4504 
4505 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4506 		return B_BAD_ADDRESS;
4507 
4508 	return B_OK;
4509 }
4510