xref: /haiku/src/system/kernel/team.cpp (revision a267f17ca645ceae1728e10680c6d1377589eef1)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_runtime.h>
55 #include <user_thread.h>
56 #include <usergroup.h>
57 #include <vfs.h>
58 #include <vm/vm.h>
59 #include <vm/VMAddressSpace.h>
60 #include <util/AutoLock.h>
61 
62 #include "TeamThreadTables.h"
63 
64 
65 //#define TRACE_TEAM
66 #ifdef TRACE_TEAM
67 #	define TRACE(x) dprintf x
68 #else
69 #	define TRACE(x) ;
70 #endif
71 
72 
73 struct team_key {
74 	team_id id;
75 };
76 
77 struct team_arg {
78 	char	*path;
79 	char	**flat_args;
80 	size_t	flat_args_size;
81 	uint32	arg_count;
82 	uint32	env_count;
83 	mode_t	umask;
84 	uint32	flags;
85 	port_id	error_port;
86 	uint32	error_token;
87 };
88 
89 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
90 
91 
92 namespace {
93 
94 
95 class TeamNotificationService : public DefaultNotificationService {
96 public:
97 							TeamNotificationService();
98 
99 			void			Notify(uint32 eventCode, Team* team);
100 };
101 
102 
103 // #pragma mark - TeamTable
104 
105 
106 typedef BKernel::TeamThreadTable<Team> TeamTable;
107 
108 
109 // #pragma mark - ProcessGroupHashDefinition
110 
111 
112 struct ProcessGroupHashDefinition {
113 	typedef pid_t			KeyType;
114 	typedef	ProcessGroup	ValueType;
115 
116 	size_t HashKey(pid_t key) const
117 	{
118 		return key;
119 	}
120 
121 	size_t Hash(ProcessGroup* value) const
122 	{
123 		return HashKey(value->id);
124 	}
125 
126 	bool Compare(pid_t key, ProcessGroup* value) const
127 	{
128 		return value->id == key;
129 	}
130 
131 	ProcessGroup*& GetLink(ProcessGroup* value) const
132 	{
133 		return value->next;
134 	}
135 };
136 
137 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
138 
139 
140 }	// unnamed namespace
141 
142 
143 // #pragma mark -
144 
145 
146 // the team_id -> Team hash table and the lock protecting it
147 static TeamTable sTeamHash;
148 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
149 
150 // the pid_t -> ProcessGroup hash table and the lock protecting it
151 static ProcessGroupHashTable sGroupHash;
152 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
153 
154 static Team* sKernelTeam = NULL;
155 static bool sDisableUserAddOns = false;
156 
157 // A list of process groups of children of dying session leaders that need to
158 // be signalled, if they have become orphaned and contain stopped processes.
159 static ProcessGroupList sOrphanedCheckProcessGroups;
160 static mutex sOrphanedCheckLock
161 	= MUTEX_INITIALIZER("orphaned process group check");
162 
163 // some arbitrarily chosen limits -- should probably depend on the available
164 // memory (the limit is not yet enforced)
165 static int32 sMaxTeams = 2048;
166 static int32 sUsedTeams = 1;
167 
168 static TeamNotificationService sNotificationService;
169 
170 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
171 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
172 
173 
174 // #pragma mark - TeamListIterator
175 
176 
177 TeamListIterator::TeamListIterator()
178 {
179 	// queue the entry
180 	InterruptsWriteSpinLocker locker(sTeamHashLock);
181 	sTeamHash.InsertIteratorEntry(&fEntry);
182 }
183 
184 
185 TeamListIterator::~TeamListIterator()
186 {
187 	// remove the entry
188 	InterruptsWriteSpinLocker locker(sTeamHashLock);
189 	sTeamHash.RemoveIteratorEntry(&fEntry);
190 }
191 
192 
193 Team*
194 TeamListIterator::Next()
195 {
196 	// get the next team -- if there is one, get reference for it
197 	InterruptsWriteSpinLocker locker(sTeamHashLock);
198 	Team* team = sTeamHash.NextElement(&fEntry);
199 	if (team != NULL)
200 		team->AcquireReference();
201 
202 	return team;
203 }
204 
205 
206 // #pragma mark - Tracing
207 
208 
209 #if TEAM_TRACING
210 namespace TeamTracing {
211 
212 class TeamForked : public AbstractTraceEntry {
213 public:
214 	TeamForked(thread_id forkedThread)
215 		:
216 		fForkedThread(forkedThread)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
224 	}
225 
226 private:
227 	thread_id			fForkedThread;
228 };
229 
230 
231 class ExecTeam : public AbstractTraceEntry {
232 public:
233 	ExecTeam(const char* path, int32 argCount, const char* const* args,
234 			int32 envCount, const char* const* env)
235 		:
236 		fArgCount(argCount),
237 		fArgs(NULL)
238 	{
239 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
240 			false);
241 
242 		// determine the buffer size we need for the args
243 		size_t argBufferSize = 0;
244 		for (int32 i = 0; i < argCount; i++)
245 			argBufferSize += strlen(args[i]) + 1;
246 
247 		// allocate a buffer
248 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
249 		if (fArgs) {
250 			char* buffer = fArgs;
251 			for (int32 i = 0; i < argCount; i++) {
252 				size_t argSize = strlen(args[i]) + 1;
253 				memcpy(buffer, args[i], argSize);
254 				buffer += argSize;
255 			}
256 		}
257 
258 		// ignore env for the time being
259 		(void)envCount;
260 		(void)env;
261 
262 		Initialized();
263 	}
264 
265 	virtual void AddDump(TraceOutput& out)
266 	{
267 		out.Print("team exec, \"%p\", args:", fPath);
268 
269 		if (fArgs != NULL) {
270 			char* args = fArgs;
271 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
272 				out.Print(" \"%s\"", args);
273 				args += strlen(args) + 1;
274 			}
275 		} else
276 			out.Print(" <too long>");
277 	}
278 
279 private:
280 	char*	fPath;
281 	int32	fArgCount;
282 	char*	fArgs;
283 };
284 
285 
286 static const char*
287 job_control_state_name(job_control_state state)
288 {
289 	switch (state) {
290 		case JOB_CONTROL_STATE_NONE:
291 			return "none";
292 		case JOB_CONTROL_STATE_STOPPED:
293 			return "stopped";
294 		case JOB_CONTROL_STATE_CONTINUED:
295 			return "continued";
296 		case JOB_CONTROL_STATE_DEAD:
297 			return "dead";
298 		default:
299 			return "invalid";
300 	}
301 }
302 
303 
304 class SetJobControlState : public AbstractTraceEntry {
305 public:
306 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
307 		:
308 		fTeam(team),
309 		fNewState(newState),
310 		fSignal(signal != NULL ? signal->Number() : 0)
311 	{
312 		Initialized();
313 	}
314 
315 	virtual void AddDump(TraceOutput& out)
316 	{
317 		out.Print("team set job control state, team %" B_PRId32 ", "
318 			"new state: %s, signal: %d",
319 			fTeam, job_control_state_name(fNewState), fSignal);
320 	}
321 
322 private:
323 	team_id				fTeam;
324 	job_control_state	fNewState;
325 	int					fSignal;
326 };
327 
328 
329 class WaitForChild : public AbstractTraceEntry {
330 public:
331 	WaitForChild(pid_t child, uint32 flags)
332 		:
333 		fChild(child),
334 		fFlags(flags)
335 	{
336 		Initialized();
337 	}
338 
339 	virtual void AddDump(TraceOutput& out)
340 	{
341 		out.Print("team wait for child, child: %" B_PRId32 ", "
342 			"flags: %#" B_PRIx32, fChild, fFlags);
343 	}
344 
345 private:
346 	pid_t	fChild;
347 	uint32	fFlags;
348 };
349 
350 
351 class WaitForChildDone : public AbstractTraceEntry {
352 public:
353 	WaitForChildDone(const job_control_entry& entry)
354 		:
355 		fState(entry.state),
356 		fTeam(entry.thread),
357 		fStatus(entry.status),
358 		fReason(entry.reason),
359 		fSignal(entry.signal)
360 	{
361 		Initialized();
362 	}
363 
364 	WaitForChildDone(status_t error)
365 		:
366 		fTeam(error)
367 	{
368 		Initialized();
369 	}
370 
371 	virtual void AddDump(TraceOutput& out)
372 	{
373 		if (fTeam >= 0) {
374 			out.Print("team wait for child done, team: %" B_PRId32 ", "
375 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
376 				fTeam, job_control_state_name(fState), fStatus, fReason,
377 				fSignal);
378 		} else {
379 			out.Print("team wait for child failed, error: "
380 				"%#" B_PRIx32 ", ", fTeam);
381 		}
382 	}
383 
384 private:
385 	job_control_state	fState;
386 	team_id				fTeam;
387 	status_t			fStatus;
388 	uint16				fReason;
389 	uint16				fSignal;
390 };
391 
392 }	// namespace TeamTracing
393 
394 #	define T(x) new(std::nothrow) TeamTracing::x;
395 #else
396 #	define T(x) ;
397 #endif
398 
399 
400 //	#pragma mark - TeamNotificationService
401 
402 
403 TeamNotificationService::TeamNotificationService()
404 	: DefaultNotificationService("teams")
405 {
406 }
407 
408 
409 void
410 TeamNotificationService::Notify(uint32 eventCode, Team* team)
411 {
412 	char eventBuffer[128];
413 	KMessage event;
414 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
415 	event.AddInt32("event", eventCode);
416 	event.AddInt32("team", team->id);
417 	event.AddPointer("teamStruct", team);
418 
419 	DefaultNotificationService::Notify(event, eventCode);
420 }
421 
422 
423 //	#pragma mark - Team
424 
425 
426 Team::Team(team_id id, bool kernel)
427 {
428 	// allocate an ID
429 	this->id = id;
430 	visible = true;
431 	serial_number = -1;
432 
433 	// init mutex
434 	if (kernel) {
435 		mutex_init(&fLock, "Team:kernel");
436 	} else {
437 		char lockName[16];
438 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
439 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
440 	}
441 
442 	hash_next = siblings_next = children = parent = NULL;
443 	fName[0] = '\0';
444 	fArgs[0] = '\0';
445 	num_threads = 0;
446 	io_context = NULL;
447 	address_space = kernel ? VMAddressSpace::Kernel() : NULL;
448 	realtime_sem_context = NULL;
449 	xsi_sem_context = NULL;
450 	thread_list = NULL;
451 	main_thread = NULL;
452 	loading_info = NULL;
453 	state = TEAM_STATE_BIRTH;
454 	flags = 0;
455 	death_entry = NULL;
456 	user_data_area = -1;
457 	user_data = 0;
458 	used_user_data = 0;
459 	user_data_size = 0;
460 	free_user_threads = NULL;
461 
462 	commpage_address = NULL;
463 
464 	supplementary_groups = NULL;
465 	supplementary_group_count = 0;
466 
467 	dead_threads_kernel_time = 0;
468 	dead_threads_user_time = 0;
469 	cpu_clock_offset = 0;
470 
471 	// dead threads
472 	list_init(&dead_threads);
473 
474 	// dead children
475 	dead_children.count = 0;
476 	dead_children.kernel_time = 0;
477 	dead_children.user_time = 0;
478 
479 	// job control entry
480 	job_control_entry = new(nothrow) ::job_control_entry;
481 	if (job_control_entry != NULL) {
482 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
483 		job_control_entry->thread = id;
484 		job_control_entry->team = this;
485 	}
486 
487 	// exit status -- setting initialized to false suffices
488 	exit.initialized = false;
489 
490 	list_init(&sem_list);
491 	list_init_etc(&port_list, port_team_link_offset());
492 	list_init(&image_list);
493 	list_init(&watcher_list);
494 
495 	clear_team_debug_info(&debug_info, true);
496 
497 	// init dead/stopped/continued children condition vars
498 	dead_children.condition_variable.Init(&dead_children, "team children");
499 
500 	B_INITIALIZE_SPINLOCK(&time_lock);
501 	B_INITIALIZE_SPINLOCK(&signal_lock);
502 
503 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
504 		kernel ? -1 : MAX_QUEUED_SIGNALS);
505 	memset(fSignalActions, 0, sizeof(fSignalActions));
506 
507 	fUserDefinedTimerCount = 0;
508 
509 	fCoreDumpCondition = NULL;
510 }
511 
512 
513 Team::~Team()
514 {
515 	// get rid of all associated data
516 	PrepareForDeletion();
517 
518 	if (io_context != NULL)
519 		vfs_put_io_context(io_context);
520 	delete_owned_ports(this);
521 	sem_delete_owned_sems(this);
522 
523 	DeleteUserTimers(false);
524 
525 	fPendingSignals.Clear();
526 
527 	if (fQueuedSignalsCounter != NULL)
528 		fQueuedSignalsCounter->ReleaseReference();
529 
530 	while (thread_death_entry* threadDeathEntry
531 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
532 		free(threadDeathEntry);
533 	}
534 
535 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
536 		delete entry;
537 
538 	while (free_user_thread* entry = free_user_threads) {
539 		free_user_threads = entry->next;
540 		free(entry);
541 	}
542 
543 	malloc_referenced_release(supplementary_groups);
544 
545 	delete job_control_entry;
546 		// usually already NULL and transferred to the parent
547 
548 	mutex_destroy(&fLock);
549 }
550 
551 
552 /*static*/ Team*
553 Team::Create(team_id id, const char* name, bool kernel)
554 {
555 	// create the team object
556 	Team* team = new(std::nothrow) Team(id, kernel);
557 	if (team == NULL)
558 		return NULL;
559 	ObjectDeleter<Team> teamDeleter(team);
560 
561 	if (name != NULL)
562 		team->SetName(name);
563 
564 	// check initialization
565 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
566 		return NULL;
567 
568 	// finish initialization (arch specifics)
569 	if (arch_team_init_team_struct(team, kernel) != B_OK)
570 		return NULL;
571 
572 	if (!kernel) {
573 		status_t error = user_timer_create_team_timers(team);
574 		if (error != B_OK)
575 			return NULL;
576 	}
577 
578 	// everything went fine
579 	return teamDeleter.Detach();
580 }
581 
582 
583 /*!	\brief Returns the team with the given ID.
584 	Returns a reference to the team.
585 	Team and thread spinlock must not be held.
586 */
587 /*static*/ Team*
588 Team::Get(team_id id)
589 {
590 	if (id == B_CURRENT_TEAM) {
591 		Team* team = thread_get_current_thread()->team;
592 		team->AcquireReference();
593 		return team;
594 	}
595 
596 	InterruptsReadSpinLocker locker(sTeamHashLock);
597 	Team* team = sTeamHash.Lookup(id);
598 	if (team != NULL)
599 		team->AcquireReference();
600 	return team;
601 }
602 
603 
604 /*!	\brief Returns the team with the given ID in a locked state.
605 	Returns a reference to the team.
606 	Team and thread spinlock must not be held.
607 */
608 /*static*/ Team*
609 Team::GetAndLock(team_id id)
610 {
611 	// get the team
612 	Team* team = Get(id);
613 	if (team == NULL)
614 		return NULL;
615 
616 	// lock it
617 	team->Lock();
618 
619 	// only return the team, when it isn't already dying
620 	if (team->state >= TEAM_STATE_SHUTDOWN) {
621 		team->Unlock();
622 		team->ReleaseReference();
623 		return NULL;
624 	}
625 
626 	return team;
627 }
628 
629 
630 /*!	Locks the team and its parent team (if any).
631 	The caller must hold a reference to the team or otherwise make sure that
632 	it won't be deleted.
633 	If the team doesn't have a parent, only the team itself is locked. If the
634 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
635 	only the team itself is locked.
636 
637 	\param dontLockParentIfKernel If \c true, the team's parent team is only
638 		locked, if it is not the kernel team.
639 */
640 void
641 Team::LockTeamAndParent(bool dontLockParentIfKernel)
642 {
643 	// The locking order is parent -> child. Since the parent can change as long
644 	// as we don't lock the team, we need to do a trial and error loop.
645 	Lock();
646 
647 	while (true) {
648 		// If the team doesn't have a parent, we're done. Otherwise try to lock
649 		// the parent.This will succeed in most cases, simplifying things.
650 		Team* parent = this->parent;
651 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
652 			|| parent->TryLock()) {
653 			return;
654 		}
655 
656 		// get a temporary reference to the parent, unlock this team, lock the
657 		// parent, and re-lock this team
658 		BReference<Team> parentReference(parent);
659 
660 		Unlock();
661 		parent->Lock();
662 		Lock();
663 
664 		// If the parent hasn't changed in the meantime, we're done.
665 		if (this->parent == parent)
666 			return;
667 
668 		// The parent has changed -- unlock and retry.
669 		parent->Unlock();
670 	}
671 }
672 
673 
674 /*!	Unlocks the team and its parent team (if any).
675 */
676 void
677 Team::UnlockTeamAndParent()
678 {
679 	if (parent != NULL)
680 		parent->Unlock();
681 
682 	Unlock();
683 }
684 
685 
686 /*!	Locks the team, its parent team (if any), and the team's process group.
687 	The caller must hold a reference to the team or otherwise make sure that
688 	it won't be deleted.
689 	If the team doesn't have a parent, only the team itself is locked.
690 */
691 void
692 Team::LockTeamParentAndProcessGroup()
693 {
694 	LockTeamAndProcessGroup();
695 
696 	// We hold the group's and the team's lock, but not the parent team's lock.
697 	// If we have a parent, try to lock it.
698 	if (this->parent == NULL || this->parent->TryLock())
699 		return;
700 
701 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
702 	// the job.
703 	Unlock();
704 	LockTeamAndParent(false);
705 }
706 
707 
708 /*!	Unlocks the team, its parent team (if any), and the team's process group.
709 */
710 void
711 Team::UnlockTeamParentAndProcessGroup()
712 {
713 	group->Unlock();
714 
715 	if (parent != NULL)
716 		parent->Unlock();
717 
718 	Unlock();
719 }
720 
721 
722 void
723 Team::LockTeamAndProcessGroup()
724 {
725 	// The locking order is process group -> child. Since the process group can
726 	// change as long as we don't lock the team, we need to do a trial and error
727 	// loop.
728 	Lock();
729 
730 	while (true) {
731 		// Try to lock the group. This will succeed in most cases, simplifying
732 		// things.
733 		ProcessGroup* group = this->group;
734 		if (group->TryLock())
735 			return;
736 
737 		// get a temporary reference to the group, unlock this team, lock the
738 		// group, and re-lock this team
739 		BReference<ProcessGroup> groupReference(group);
740 
741 		Unlock();
742 		group->Lock();
743 		Lock();
744 
745 		// If the group hasn't changed in the meantime, we're done.
746 		if (this->group == group)
747 			return;
748 
749 		// The group has changed -- unlock and retry.
750 		group->Unlock();
751 	}
752 }
753 
754 
755 void
756 Team::UnlockTeamAndProcessGroup()
757 {
758 	group->Unlock();
759 	Unlock();
760 }
761 
762 
763 void
764 Team::SetName(const char* name)
765 {
766 	if (const char* lastSlash = strrchr(name, '/'))
767 		name = lastSlash + 1;
768 
769 	strlcpy(fName, name, B_OS_NAME_LENGTH);
770 }
771 
772 
773 void
774 Team::SetArgs(const char* args)
775 {
776 	strlcpy(fArgs, args, sizeof(fArgs));
777 }
778 
779 
780 void
781 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
782 {
783 	fArgs[0] = '\0';
784 	strlcpy(fArgs, path, sizeof(fArgs));
785 	for (int i = 0; i < otherArgCount; i++) {
786 		strlcat(fArgs, " ", sizeof(fArgs));
787 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
788 	}
789 }
790 
791 
792 void
793 Team::ResetSignalsOnExec()
794 {
795 	// We are supposed to keep pending signals. Signal actions shall be reset
796 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
797 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
798 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
799 	// flags, but since there aren't any handlers, they make little sense, so
800 	// we clear them.
801 
802 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
803 		struct sigaction& action = SignalActionFor(i);
804 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
805 			action.sa_handler = SIG_DFL;
806 
807 		action.sa_mask = 0;
808 		action.sa_flags = 0;
809 		action.sa_userdata = NULL;
810 	}
811 }
812 
813 
814 void
815 Team::InheritSignalActions(Team* parent)
816 {
817 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
818 }
819 
820 
821 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
822 	ID.
823 
824 	The caller must hold the team's lock.
825 
826 	\param timer The timer to be added. If it doesn't have an ID yet, it is
827 		considered user-defined and will be assigned an ID.
828 	\return \c B_OK, if the timer was added successfully, another error code
829 		otherwise.
830 */
831 status_t
832 Team::AddUserTimer(UserTimer* timer)
833 {
834 	// don't allow addition of timers when already shutting the team down
835 	if (state >= TEAM_STATE_SHUTDOWN)
836 		return B_BAD_TEAM_ID;
837 
838 	// If the timer is user-defined, check timer limit and increment
839 	// user-defined count.
840 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
841 		return EAGAIN;
842 
843 	fUserTimers.AddTimer(timer);
844 
845 	return B_OK;
846 }
847 
848 
849 /*!	Removes the given user timer from the team.
850 
851 	The caller must hold the team's lock.
852 
853 	\param timer The timer to be removed.
854 
855 */
856 void
857 Team::RemoveUserTimer(UserTimer* timer)
858 {
859 	fUserTimers.RemoveTimer(timer);
860 
861 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
862 		UserDefinedTimersRemoved(1);
863 }
864 
865 
866 /*!	Deletes all (or all user-defined) user timers of the team.
867 
868 	Timer's belonging to the team's threads are not affected.
869 	The caller must hold the team's lock.
870 
871 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
872 		otherwise all timers are deleted.
873 */
874 void
875 Team::DeleteUserTimers(bool userDefinedOnly)
876 {
877 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
878 	UserDefinedTimersRemoved(count);
879 }
880 
881 
882 /*!	If not at the limit yet, increments the team's user-defined timer count.
883 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
884 */
885 bool
886 Team::CheckAddUserDefinedTimer()
887 {
888 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
889 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
890 		atomic_add(&fUserDefinedTimerCount, -1);
891 		return false;
892 	}
893 
894 	return true;
895 }
896 
897 
898 /*!	Subtracts the given count for the team's user-defined timer count.
899 	\param count The count to subtract.
900 */
901 void
902 Team::UserDefinedTimersRemoved(int32 count)
903 {
904 	atomic_add(&fUserDefinedTimerCount, -count);
905 }
906 
907 
908 void
909 Team::DeactivateCPUTimeUserTimers()
910 {
911 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
912 		timer->Deactivate();
913 
914 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
915 		timer->Deactivate();
916 }
917 
918 
919 /*!	Returns the team's current total CPU time (kernel + user + offset).
920 
921 	The caller must hold \c time_lock.
922 
923 	\param ignoreCurrentRun If \c true and the current thread is one team's
924 		threads, don't add the time since the last time \c last_time was
925 		updated. Should be used in "thread unscheduled" scheduler callbacks,
926 		since although the thread is still running at that time, its time has
927 		already been stopped.
928 	\return The team's current total CPU time.
929 */
930 bigtime_t
931 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
932 {
933 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
934 		+ dead_threads_user_time;
935 
936 	Thread* currentThread = thread_get_current_thread();
937 	bigtime_t now = system_time();
938 
939 	for (Thread* thread = thread_list; thread != NULL;
940 			thread = thread->team_next) {
941 		bool alreadyLocked = thread == lockedThread;
942 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
943 		time += thread->kernel_time + thread->user_time;
944 
945 		if (thread->last_time != 0) {
946 			if (!ignoreCurrentRun || thread != currentThread)
947 				time += now - thread->last_time;
948 		}
949 
950 		if (alreadyLocked)
951 			threadTimeLocker.Detach();
952 	}
953 
954 	return time;
955 }
956 
957 
958 /*!	Returns the team's current user CPU time.
959 
960 	The caller must hold \c time_lock.
961 
962 	\return The team's current user CPU time.
963 */
964 bigtime_t
965 Team::UserCPUTime() const
966 {
967 	bigtime_t time = dead_threads_user_time;
968 
969 	bigtime_t now = system_time();
970 
971 	for (Thread* thread = thread_list; thread != NULL;
972 			thread = thread->team_next) {
973 		SpinLocker threadTimeLocker(thread->time_lock);
974 		time += thread->user_time;
975 
976 		if (thread->last_time != 0 && !thread->in_kernel)
977 			time += now - thread->last_time;
978 	}
979 
980 	return time;
981 }
982 
983 
984 //	#pragma mark - ProcessGroup
985 
986 
987 ProcessGroup::ProcessGroup(pid_t id)
988 	:
989 	id(id),
990 	teams(NULL),
991 	fSession(NULL),
992 	fInOrphanedCheckList(false)
993 {
994 	char lockName[32];
995 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
996 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
997 }
998 
999 
1000 ProcessGroup::~ProcessGroup()
1001 {
1002 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1003 
1004 	// If the group is in the orphaned check list, remove it.
1005 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1006 
1007 	if (fInOrphanedCheckList)
1008 		sOrphanedCheckProcessGroups.Remove(this);
1009 
1010 	orphanedCheckLocker.Unlock();
1011 
1012 	// remove group from the hash table and from the session
1013 	if (fSession != NULL) {
1014 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1015 		sGroupHash.RemoveUnchecked(this);
1016 		groupHashLocker.Unlock();
1017 
1018 		fSession->ReleaseReference();
1019 	}
1020 
1021 	mutex_destroy(&fLock);
1022 }
1023 
1024 
1025 /*static*/ ProcessGroup*
1026 ProcessGroup::Get(pid_t id)
1027 {
1028 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1029 	ProcessGroup* group = sGroupHash.Lookup(id);
1030 	if (group != NULL)
1031 		group->AcquireReference();
1032 	return group;
1033 }
1034 
1035 
1036 /*!	Adds the group the given session and makes it publicly accessible.
1037 	The caller must not hold the process group hash lock.
1038 */
1039 void
1040 ProcessGroup::Publish(ProcessSession* session)
1041 {
1042 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1043 	PublishLocked(session);
1044 }
1045 
1046 
1047 /*!	Adds the group to the given session and makes it publicly accessible.
1048 	The caller must hold the process group hash lock.
1049 */
1050 void
1051 ProcessGroup::PublishLocked(ProcessSession* session)
1052 {
1053 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1054 
1055 	fSession = session;
1056 	fSession->AcquireReference();
1057 
1058 	sGroupHash.InsertUnchecked(this);
1059 }
1060 
1061 
1062 /*!	Checks whether the process group is orphaned.
1063 	The caller must hold the group's lock.
1064 	\return \c true, if the group is orphaned, \c false otherwise.
1065 */
1066 bool
1067 ProcessGroup::IsOrphaned() const
1068 {
1069 	// Orphaned Process Group: "A process group in which the parent of every
1070 	// member is either itself a member of the group or is not a member of the
1071 	// group's session." (Open Group Base Specs Issue 7)
1072 	bool orphaned = true;
1073 
1074 	Team* team = teams;
1075 	while (orphaned && team != NULL) {
1076 		team->LockTeamAndParent(false);
1077 
1078 		Team* parent = team->parent;
1079 		if (parent != NULL && parent->group_id != id
1080 			&& parent->session_id == fSession->id) {
1081 			orphaned = false;
1082 		}
1083 
1084 		team->UnlockTeamAndParent();
1085 
1086 		team = team->group_next;
1087 	}
1088 
1089 	return orphaned;
1090 }
1091 
1092 
1093 void
1094 ProcessGroup::ScheduleOrphanedCheck()
1095 {
1096 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1097 
1098 	if (!fInOrphanedCheckList) {
1099 		sOrphanedCheckProcessGroups.Add(this);
1100 		fInOrphanedCheckList = true;
1101 	}
1102 }
1103 
1104 
1105 void
1106 ProcessGroup::UnsetOrphanedCheck()
1107 {
1108 	fInOrphanedCheckList = false;
1109 }
1110 
1111 
1112 //	#pragma mark - ProcessSession
1113 
1114 
1115 ProcessSession::ProcessSession(pid_t id)
1116 	:
1117 	id(id),
1118 	controlling_tty(-1),
1119 	foreground_group(-1)
1120 {
1121 	char lockName[32];
1122 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1123 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1124 }
1125 
1126 
1127 ProcessSession::~ProcessSession()
1128 {
1129 	mutex_destroy(&fLock);
1130 }
1131 
1132 
1133 //	#pragma mark - KDL functions
1134 
1135 
1136 static void
1137 _dump_team_info(Team* team)
1138 {
1139 	kprintf("TEAM: %p\n", team);
1140 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1141 		team->id);
1142 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1143 	kprintf("name:             '%s'\n", team->Name());
1144 	kprintf("args:             '%s'\n", team->Args());
1145 	kprintf("hash_next:        %p\n", team->hash_next);
1146 	kprintf("parent:           %p", team->parent);
1147 	if (team->parent != NULL) {
1148 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1149 	} else
1150 		kprintf("\n");
1151 
1152 	kprintf("children:         %p\n", team->children);
1153 	kprintf("num_threads:      %d\n", team->num_threads);
1154 	kprintf("state:            %d\n", team->state);
1155 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1156 	kprintf("io_context:       %p\n", team->io_context);
1157 	if (team->address_space)
1158 		kprintf("address_space:    %p\n", team->address_space);
1159 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1160 		(void*)team->user_data, team->user_data_area);
1161 	kprintf("free user thread: %p\n", team->free_user_threads);
1162 	kprintf("main_thread:      %p\n", team->main_thread);
1163 	kprintf("thread_list:      %p\n", team->thread_list);
1164 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1165 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1166 }
1167 
1168 
1169 static int
1170 dump_team_info(int argc, char** argv)
1171 {
1172 	ulong arg;
1173 	bool found = false;
1174 
1175 	if (argc < 2) {
1176 		Thread* thread = thread_get_current_thread();
1177 		if (thread != NULL && thread->team != NULL)
1178 			_dump_team_info(thread->team);
1179 		else
1180 			kprintf("No current team!\n");
1181 		return 0;
1182 	}
1183 
1184 	arg = strtoul(argv[1], NULL, 0);
1185 	if (IS_KERNEL_ADDRESS(arg)) {
1186 		// semi-hack
1187 		_dump_team_info((Team*)arg);
1188 		return 0;
1189 	}
1190 
1191 	// walk through the thread list, trying to match name or id
1192 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1193 		Team* team = it.Next();) {
1194 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1195 			|| team->id == (team_id)arg) {
1196 			_dump_team_info(team);
1197 			found = true;
1198 			break;
1199 		}
1200 	}
1201 
1202 	if (!found)
1203 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1204 	return 0;
1205 }
1206 
1207 
1208 static int
1209 dump_teams(int argc, char** argv)
1210 {
1211 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1212 		B_PRINTF_POINTER_WIDTH, "parent");
1213 
1214 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1215 		Team* team = it.Next();) {
1216 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 
1223 //	#pragma mark - Private functions
1224 
1225 
1226 /*! Get the parent of a given process.
1227 
1228 	Used in the implementation of getppid (where a process can get its own
1229 	parent, only) as well as in user_process_info where the information is
1230 	available to anyone (allowing to display a tree of running processes)
1231 */
1232 static pid_t
1233 _getppid(pid_t id)
1234 {
1235 	if (id < 0) {
1236 		errno = EINVAL;
1237 		return -1;
1238 	}
1239 
1240 	if (id == 0) {
1241 		Team* team = thread_get_current_thread()->team;
1242 		TeamLocker teamLocker(team);
1243 		if (team->parent == NULL) {
1244 			errno = EINVAL;
1245 			return -1;
1246 		}
1247 		return team->parent->id;
1248 	}
1249 
1250 	Team* team = Team::GetAndLock(id);
1251 	if (team == NULL) {
1252 		errno = ESRCH;
1253 		return -1;
1254 	}
1255 
1256 	pid_t parentID;
1257 
1258 	if (team->parent == NULL) {
1259 		errno = EINVAL;
1260 		parentID = -1;
1261 	} else
1262 		parentID = team->parent->id;
1263 
1264 	team->UnlockAndReleaseReference();
1265 
1266 	return parentID;
1267 }
1268 
1269 
1270 /*!	Inserts team \a team into the child list of team \a parent.
1271 
1272 	The caller must hold the lock of both \a parent and \a team.
1273 
1274 	\param parent The parent team.
1275 	\param team The team to be inserted into \a parent's child list.
1276 */
1277 static void
1278 insert_team_into_parent(Team* parent, Team* team)
1279 {
1280 	ASSERT(parent != NULL);
1281 
1282 	team->siblings_next = parent->children;
1283 	parent->children = team;
1284 	team->parent = parent;
1285 }
1286 
1287 
1288 /*!	Removes team \a team from the child list of team \a parent.
1289 
1290 	The caller must hold the lock of both \a parent and \a team.
1291 
1292 	\param parent The parent team.
1293 	\param team The team to be removed from \a parent's child list.
1294 */
1295 static void
1296 remove_team_from_parent(Team* parent, Team* team)
1297 {
1298 	Team* child;
1299 	Team* last = NULL;
1300 
1301 	for (child = parent->children; child != NULL;
1302 			child = child->siblings_next) {
1303 		if (child == team) {
1304 			if (last == NULL)
1305 				parent->children = child->siblings_next;
1306 			else
1307 				last->siblings_next = child->siblings_next;
1308 
1309 			team->parent = NULL;
1310 			break;
1311 		}
1312 		last = child;
1313 	}
1314 }
1315 
1316 
1317 /*!	Returns whether the given team is a session leader.
1318 	The caller must hold the team's lock or its process group's lock.
1319 */
1320 static bool
1321 is_session_leader(Team* team)
1322 {
1323 	return team->session_id == team->id;
1324 }
1325 
1326 
1327 /*!	Returns whether the given team is a process group leader.
1328 	The caller must hold the team's lock or its process group's lock.
1329 */
1330 static bool
1331 is_process_group_leader(Team* team)
1332 {
1333 	return team->group_id == team->id;
1334 }
1335 
1336 
1337 /*!	Inserts the given team into the given process group.
1338 	The caller must hold the process group's lock, the team's lock, and the
1339 	team's parent's lock.
1340 */
1341 static void
1342 insert_team_into_group(ProcessGroup* group, Team* team)
1343 {
1344 	team->group = group;
1345 	team->group_id = group->id;
1346 	team->session_id = group->Session()->id;
1347 
1348 	team->group_next = group->teams;
1349 	group->teams = team;
1350 	group->AcquireReference();
1351 }
1352 
1353 
1354 /*!	Removes the given team from its process group.
1355 
1356 	The caller must hold the process group's lock, the team's lock, and the
1357 	team's parent's lock. Interrupts must be enabled.
1358 
1359 	\param team The team that'll be removed from its process group.
1360 */
1361 static void
1362 remove_team_from_group(Team* team)
1363 {
1364 	ProcessGroup* group = team->group;
1365 	Team* current;
1366 	Team* last = NULL;
1367 
1368 	// the team must be in a process group to let this function have any effect
1369 	if  (group == NULL)
1370 		return;
1371 
1372 	for (current = group->teams; current != NULL;
1373 			current = current->group_next) {
1374 		if (current == team) {
1375 			if (last == NULL)
1376 				group->teams = current->group_next;
1377 			else
1378 				last->group_next = current->group_next;
1379 
1380 			team->group = NULL;
1381 			break;
1382 		}
1383 		last = current;
1384 	}
1385 
1386 	team->group = NULL;
1387 	team->group_next = NULL;
1388 
1389 	group->ReleaseReference();
1390 }
1391 
1392 
1393 static status_t
1394 create_team_user_data(Team* team, void* exactAddress = NULL)
1395 {
1396 	void* address;
1397 	uint32 addressSpec;
1398 
1399 	if (exactAddress != NULL) {
1400 		address = exactAddress;
1401 		addressSpec = B_EXACT_ADDRESS;
1402 	} else {
1403 		address = (void*)KERNEL_USER_DATA_BASE;
1404 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1405 	}
1406 
1407 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1408 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1409 
1410 	virtual_address_restrictions virtualRestrictions = {};
1411 	if (result == B_OK || exactAddress != NULL) {
1412 		if (exactAddress != NULL)
1413 			virtualRestrictions.address = exactAddress;
1414 		else
1415 			virtualRestrictions.address = address;
1416 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1417 	} else {
1418 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1419 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1420 	}
1421 
1422 	physical_address_restrictions physicalRestrictions = {};
1423 	team->user_data_area = create_area_etc(team->id, "user area",
1424 		kTeamUserDataInitialSize, B_FULL_LOCK,
1425 		B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1426 		&virtualRestrictions, &physicalRestrictions, &address);
1427 	if (team->user_data_area < 0)
1428 		return team->user_data_area;
1429 
1430 	team->user_data = (addr_t)address;
1431 	team->used_user_data = 0;
1432 	team->user_data_size = kTeamUserDataInitialSize;
1433 	team->free_user_threads = NULL;
1434 
1435 	return B_OK;
1436 }
1437 
1438 
1439 static void
1440 delete_team_user_data(Team* team)
1441 {
1442 	if (team->user_data_area >= 0) {
1443 		vm_delete_area(team->id, team->user_data_area, true);
1444 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1445 			kTeamUserDataReservedSize);
1446 
1447 		team->user_data = 0;
1448 		team->used_user_data = 0;
1449 		team->user_data_size = 0;
1450 		team->user_data_area = -1;
1451 		while (free_user_thread* entry = team->free_user_threads) {
1452 			team->free_user_threads = entry->next;
1453 			free(entry);
1454 		}
1455 	}
1456 }
1457 
1458 
1459 static status_t
1460 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1461 	int32 argCount, int32 envCount, char**& _flatArgs)
1462 {
1463 	if (argCount < 0 || envCount < 0)
1464 		return B_BAD_VALUE;
1465 
1466 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1467 		return B_TOO_MANY_ARGS;
1468 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1469 		return B_BAD_VALUE;
1470 
1471 	if (!IS_USER_ADDRESS(userFlatArgs))
1472 		return B_BAD_ADDRESS;
1473 
1474 	// allocate kernel memory
1475 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1476 	if (flatArgs == NULL)
1477 		return B_NO_MEMORY;
1478 
1479 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1480 		free(flatArgs);
1481 		return B_BAD_ADDRESS;
1482 	}
1483 
1484 	// check and relocate the array
1485 	status_t error = B_OK;
1486 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1487 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1488 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1489 		if (i == argCount || i == argCount + envCount + 1) {
1490 			// check array null termination
1491 			if (flatArgs[i] != NULL) {
1492 				error = B_BAD_VALUE;
1493 				break;
1494 			}
1495 		} else {
1496 			// check string
1497 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1498 			size_t maxLen = stringEnd - arg;
1499 			if (arg < stringBase || arg >= stringEnd
1500 					|| strnlen(arg, maxLen) == maxLen) {
1501 				error = B_BAD_VALUE;
1502 				break;
1503 			}
1504 
1505 			flatArgs[i] = arg;
1506 		}
1507 	}
1508 
1509 	if (error == B_OK)
1510 		_flatArgs = flatArgs;
1511 	else
1512 		free(flatArgs);
1513 
1514 	return error;
1515 }
1516 
1517 
1518 static void
1519 free_team_arg(struct team_arg* teamArg)
1520 {
1521 	if (teamArg != NULL) {
1522 		free(teamArg->flat_args);
1523 		free(teamArg->path);
1524 		free(teamArg);
1525 	}
1526 }
1527 
1528 
1529 static status_t
1530 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1531 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1532 	port_id port, uint32 token)
1533 {
1534 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1535 	if (teamArg == NULL)
1536 		return B_NO_MEMORY;
1537 
1538 	teamArg->path = strdup(path);
1539 	if (teamArg->path == NULL) {
1540 		free(teamArg);
1541 		return B_NO_MEMORY;
1542 	}
1543 
1544 	// copy the args over
1545 	teamArg->flat_args = flatArgs;
1546 	teamArg->flat_args_size = flatArgsSize;
1547 	teamArg->arg_count = argCount;
1548 	teamArg->env_count = envCount;
1549 	teamArg->flags = 0;
1550 	teamArg->umask = umask;
1551 	teamArg->error_port = port;
1552 	teamArg->error_token = token;
1553 
1554 	// determine the flags from the environment
1555 	const char* const* env = flatArgs + argCount + 1;
1556 	for (int32 i = 0; i < envCount; i++) {
1557 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1558 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1559 			break;
1560 		}
1561 	}
1562 
1563 	*_teamArg = teamArg;
1564 	return B_OK;
1565 }
1566 
1567 
1568 static status_t
1569 team_create_thread_start_internal(void* args)
1570 {
1571 	status_t err;
1572 	Thread* thread;
1573 	Team* team;
1574 	struct team_arg* teamArgs = (struct team_arg*)args;
1575 	const char* path;
1576 	addr_t entry;
1577 	char** userArgs;
1578 	char** userEnv;
1579 	struct user_space_program_args* programArgs;
1580 	uint32 argCount, envCount;
1581 
1582 	thread = thread_get_current_thread();
1583 	team = thread->team;
1584 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1585 
1586 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1587 		thread->id));
1588 
1589 	// Main stack area layout is currently as follows (starting from 0):
1590 	//
1591 	// size								| usage
1592 	// ---------------------------------+--------------------------------
1593 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1594 	// TLS_SIZE							| TLS data
1595 	// sizeof(user_space_program_args)	| argument structure for the runtime
1596 	//									| loader
1597 	// flat arguments size				| flat process arguments and environment
1598 
1599 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1600 	// the heap
1601 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1602 
1603 	argCount = teamArgs->arg_count;
1604 	envCount = teamArgs->env_count;
1605 
1606 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1607 		+ thread->user_stack_size + TLS_SIZE);
1608 
1609 	userArgs = (char**)(programArgs + 1);
1610 	userEnv = userArgs + argCount + 1;
1611 	path = teamArgs->path;
1612 
1613 	if (user_strlcpy(programArgs->program_path, path,
1614 				sizeof(programArgs->program_path)) < B_OK
1615 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1616 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1617 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1618 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1619 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1620 				sizeof(port_id)) < B_OK
1621 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1622 				sizeof(uint32)) < B_OK
1623 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1624 		|| user_memcpy(&programArgs->disable_user_addons,
1625 			&sDisableUserAddOns, sizeof(bool)) < B_OK
1626 		|| user_memcpy(userArgs, teamArgs->flat_args,
1627 				teamArgs->flat_args_size) < B_OK) {
1628 		// the team deletion process will clean this mess
1629 		free_team_arg(teamArgs);
1630 		return B_BAD_ADDRESS;
1631 	}
1632 
1633 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1634 
1635 	// set team args and update state
1636 	team->Lock();
1637 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1638 	team->state = TEAM_STATE_NORMAL;
1639 	team->Unlock();
1640 
1641 	free_team_arg(teamArgs);
1642 		// the arguments are already on the user stack, we no longer need
1643 		// them in this form
1644 
1645 	// Clone commpage area
1646 	area_id commPageArea = clone_commpage_area(team->id,
1647 		&team->commpage_address);
1648 	if (commPageArea  < B_OK) {
1649 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1650 			strerror(commPageArea)));
1651 		return commPageArea;
1652 	}
1653 
1654 	// Register commpage image
1655 	image_id commPageImage = get_commpage_image();
1656 	extended_image_info imageInfo;
1657 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1658 	if (err != B_OK) {
1659 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1660 			strerror(err)));
1661 		return err;
1662 	}
1663 	imageInfo.basic_info.text = team->commpage_address;
1664 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1665 	imageInfo.symbol_table = NULL;
1666 	imageInfo.symbol_hash = NULL;
1667 	imageInfo.string_table = NULL;
1668 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1669 	if (image < 0) {
1670 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1671 			strerror(image)));
1672 		return image;
1673 	}
1674 
1675 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1676 	// automatic variables with function scope will never be destroyed.
1677 	{
1678 		// find runtime_loader path
1679 		KPath runtimeLoaderPath;
1680 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1681 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1682 		if (err < B_OK) {
1683 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1684 				strerror(err)));
1685 			return err;
1686 		}
1687 		runtimeLoaderPath.UnlockBuffer();
1688 		err = runtimeLoaderPath.Append("runtime_loader");
1689 
1690 		if (err == B_OK) {
1691 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1692 				&entry);
1693 		}
1694 	}
1695 
1696 	if (err < B_OK) {
1697 		// Luckily, we don't have to clean up the mess we created - that's
1698 		// done for us by the normal team deletion process
1699 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1700 			"%s\n", strerror(err)));
1701 		return err;
1702 	}
1703 
1704 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1705 
1706 	// enter userspace -- returns only in case of error
1707 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1708 		programArgs, team->commpage_address);
1709 }
1710 
1711 
1712 static status_t
1713 team_create_thread_start(void* args)
1714 {
1715 	team_create_thread_start_internal(args);
1716 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1717 	thread_exit();
1718 		// does not return
1719 	return B_OK;
1720 }
1721 
1722 
1723 static thread_id
1724 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1725 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1726 	port_id errorPort, uint32 errorToken)
1727 {
1728 	char** flatArgs = _flatArgs;
1729 	thread_id thread;
1730 	status_t status;
1731 	struct team_arg* teamArgs;
1732 	struct team_loading_info loadingInfo;
1733 	ConditionVariableEntry loadingWaitEntry;
1734 	io_context* parentIOContext = NULL;
1735 	team_id teamID;
1736 	bool teamLimitReached = false;
1737 
1738 	if (flatArgs == NULL || argCount == 0)
1739 		return B_BAD_VALUE;
1740 
1741 	const char* path = flatArgs[0];
1742 
1743 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1744 		"\n", path, flatArgs, argCount));
1745 
1746 	// cut the path from the main thread name
1747 	const char* threadName = strrchr(path, '/');
1748 	if (threadName != NULL)
1749 		threadName++;
1750 	else
1751 		threadName = path;
1752 
1753 	// create the main thread object
1754 	Thread* mainThread;
1755 	status = Thread::Create(threadName, mainThread);
1756 	if (status != B_OK)
1757 		return status;
1758 	BReference<Thread> mainThreadReference(mainThread, true);
1759 
1760 	// create team object
1761 	Team* team = Team::Create(mainThread->id, path, false);
1762 	if (team == NULL)
1763 		return B_NO_MEMORY;
1764 	BReference<Team> teamReference(team, true);
1765 
1766 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1767 		loadingInfo.condition.Init(team, "image load");
1768 		loadingInfo.condition.Add(&loadingWaitEntry);
1769 		loadingInfo.result = B_ERROR;
1770 		team->loading_info = &loadingInfo;
1771 	}
1772 
1773 	// get the parent team
1774 	Team* parent = Team::Get(parentID);
1775 	if (parent == NULL)
1776 		return B_BAD_TEAM_ID;
1777 	BReference<Team> parentReference(parent, true);
1778 
1779 	parent->LockTeamAndProcessGroup();
1780 	team->Lock();
1781 
1782 	// inherit the parent's user/group
1783 	inherit_parent_user_and_group(team, parent);
1784 
1785 	// get a reference to the parent's I/O context -- we need it to create ours
1786 	parentIOContext = parent->io_context;
1787 	vfs_get_io_context(parentIOContext);
1788 
1789 	team->Unlock();
1790 	parent->UnlockTeamAndProcessGroup();
1791 
1792 	// check the executable's set-user/group-id permission
1793 	update_set_id_user_and_group(team, path);
1794 
1795 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1796 		envCount, (mode_t)-1, errorPort, errorToken);
1797 	if (status != B_OK)
1798 		goto err1;
1799 
1800 	_flatArgs = NULL;
1801 		// args are owned by the team_arg structure now
1802 
1803 	// create a new io_context for this team
1804 	team->io_context = vfs_new_io_context(parentIOContext, true);
1805 	if (!team->io_context) {
1806 		status = B_NO_MEMORY;
1807 		goto err2;
1808 	}
1809 
1810 	// We don't need the parent's I/O context any longer.
1811 	vfs_put_io_context(parentIOContext);
1812 	parentIOContext = NULL;
1813 
1814 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1815 	vfs_exec_io_context(team->io_context);
1816 
1817 	// create an address space for this team
1818 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1819 		&team->address_space);
1820 	if (status != B_OK)
1821 		goto err2;
1822 
1823 	team->address_space->SetRandomizingEnabled(
1824 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1825 
1826 	// create the user data area
1827 	status = create_team_user_data(team);
1828 	if (status != B_OK)
1829 		goto err4;
1830 
1831 	// insert the team into its parent and the teams hash
1832 	parent->LockTeamAndProcessGroup();
1833 	team->Lock();
1834 
1835 	{
1836 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1837 
1838 		sTeamHash.Insert(team);
1839 		teamLimitReached = sUsedTeams >= sMaxTeams;
1840 		if (!teamLimitReached)
1841 			sUsedTeams++;
1842 	}
1843 
1844 	insert_team_into_parent(parent, team);
1845 	insert_team_into_group(parent->group, team);
1846 
1847 	team->Unlock();
1848 	parent->UnlockTeamAndProcessGroup();
1849 
1850 	// notify team listeners
1851 	sNotificationService.Notify(TEAM_ADDED, team);
1852 
1853 	if (teamLimitReached) {
1854 		status = B_NO_MORE_TEAMS;
1855 		goto err6;
1856 	}
1857 
1858 	// In case we start the main thread, we shouldn't access the team object
1859 	// afterwards, so cache the team's ID.
1860 	teamID = team->id;
1861 
1862 	// Create a kernel thread, but under the context of the new team
1863 	// The new thread will take over ownership of teamArgs.
1864 	{
1865 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1866 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1867 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1868 			+ teamArgs->flat_args_size;
1869 		thread = thread_create_thread(threadAttributes, false);
1870 		if (thread < 0) {
1871 			status = thread;
1872 			goto err6;
1873 		}
1874 	}
1875 
1876 	// The team has been created successfully, so we keep the reference. Or
1877 	// more precisely: It's owned by the team's main thread, now.
1878 	teamReference.Detach();
1879 
1880 	// wait for the loader of the new team to finish its work
1881 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1882 		if (mainThread != NULL) {
1883 			// resume the team's main thread
1884 			thread_continue(mainThread);
1885 		}
1886 
1887 		// Now wait until loading is finished. We will be woken either by the
1888 		// thread, when it finished or aborted loading, or when the team is
1889 		// going to die (e.g. is killed). In either case the one notifying is
1890 		// responsible for unsetting `loading_info` in the team structure.
1891 		loadingWaitEntry.Wait();
1892 
1893 		if (loadingInfo.result < B_OK)
1894 			return loadingInfo.result;
1895 	}
1896 
1897 	// notify the debugger
1898 	user_debug_team_created(teamID);
1899 
1900 	return thread;
1901 
1902 err6:
1903 	// Remove the team structure from the process group, the parent team, and
1904 	// the team hash table and delete the team structure.
1905 	parent->LockTeamAndProcessGroup();
1906 	team->Lock();
1907 
1908 	remove_team_from_group(team);
1909 	remove_team_from_parent(team->parent, team);
1910 
1911 	team->Unlock();
1912 	parent->UnlockTeamAndProcessGroup();
1913 
1914 	{
1915 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1916 		sTeamHash.Remove(team);
1917 		if (!teamLimitReached)
1918 			sUsedTeams--;
1919 	}
1920 
1921 	sNotificationService.Notify(TEAM_REMOVED, team);
1922 
1923 	delete_team_user_data(team);
1924 err4:
1925 	team->address_space->Put();
1926 err2:
1927 	free_team_arg(teamArgs);
1928 err1:
1929 	if (parentIOContext != NULL)
1930 		vfs_put_io_context(parentIOContext);
1931 
1932 	return status;
1933 }
1934 
1935 
1936 /*!	Almost shuts down the current team and loads a new image into it.
1937 	If successful, this function does not return and will takeover ownership of
1938 	the arguments provided.
1939 	This function may only be called in a userland team (caused by one of the
1940 	exec*() syscalls).
1941 */
1942 static status_t
1943 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1944 	int32 argCount, int32 envCount, mode_t umask)
1945 {
1946 	// NOTE: Since this function normally doesn't return, don't use automatic
1947 	// variables that need destruction in the function scope.
1948 	char** flatArgs = _flatArgs;
1949 	Team* team = thread_get_current_thread()->team;
1950 	struct team_arg* teamArgs;
1951 	const char* threadName;
1952 	thread_id nubThreadID = -1;
1953 
1954 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1955 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1956 		team->id));
1957 
1958 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1959 
1960 	// switching the kernel at run time is probably not a good idea :)
1961 	if (team == team_get_kernel_team())
1962 		return B_NOT_ALLOWED;
1963 
1964 	// we currently need to be single threaded here
1965 	// TODO: maybe we should just kill all other threads and
1966 	//	make the current thread the team's main thread?
1967 	Thread* currentThread = thread_get_current_thread();
1968 	if (currentThread != team->main_thread)
1969 		return B_NOT_ALLOWED;
1970 
1971 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1972 	// We iterate through the thread list to make sure that there's no other
1973 	// thread.
1974 	TeamLocker teamLocker(team);
1975 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1976 
1977 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1978 		nubThreadID = team->debug_info.nub_thread;
1979 
1980 	debugInfoLocker.Unlock();
1981 
1982 	for (Thread* thread = team->thread_list; thread != NULL;
1983 			thread = thread->team_next) {
1984 		if (thread != team->main_thread && thread->id != nubThreadID)
1985 			return B_NOT_ALLOWED;
1986 	}
1987 
1988 	team->DeleteUserTimers(true);
1989 	team->ResetSignalsOnExec();
1990 
1991 	teamLocker.Unlock();
1992 
1993 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1994 		argCount, envCount, umask, -1, 0);
1995 	if (status != B_OK)
1996 		return status;
1997 
1998 	_flatArgs = NULL;
1999 		// args are owned by the team_arg structure now
2000 
2001 	// TODO: remove team resources if there are any left
2002 	// thread_atkernel_exit() might not be called at all
2003 
2004 	thread_reset_for_exec();
2005 
2006 	user_debug_prepare_for_exec();
2007 
2008 	delete_team_user_data(team);
2009 	vm_delete_areas(team->address_space, false);
2010 	xsi_sem_undo(team);
2011 	delete_owned_ports(team);
2012 	sem_delete_owned_sems(team);
2013 	remove_images(team);
2014 	vfs_exec_io_context(team->io_context);
2015 	delete_realtime_sem_context(team->realtime_sem_context);
2016 	team->realtime_sem_context = NULL;
2017 
2018 	// update ASLR
2019 	team->address_space->SetRandomizingEnabled(
2020 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2021 
2022 	status = create_team_user_data(team);
2023 	if (status != B_OK) {
2024 		// creating the user data failed -- we're toast
2025 		free_team_arg(teamArgs);
2026 		exit_thread(status);
2027 		return status;
2028 	}
2029 
2030 	user_debug_finish_after_exec();
2031 
2032 	// rename the team
2033 
2034 	team->Lock();
2035 	team->SetName(path);
2036 	team->Unlock();
2037 
2038 	// cut the path from the team name and rename the main thread, too
2039 	threadName = strrchr(path, '/');
2040 	if (threadName != NULL)
2041 		threadName++;
2042 	else
2043 		threadName = path;
2044 	rename_thread(thread_get_current_thread_id(), threadName);
2045 
2046 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2047 
2048 	// Update user/group according to the executable's set-user/group-id
2049 	// permission.
2050 	update_set_id_user_and_group(team, path);
2051 
2052 	user_debug_team_exec();
2053 
2054 	// notify team listeners
2055 	sNotificationService.Notify(TEAM_EXEC, team);
2056 
2057 	// get a user thread for the thread
2058 	user_thread* userThread = team_allocate_user_thread(team);
2059 		// cannot fail (the allocation for the team would have failed already)
2060 	ThreadLocker currentThreadLocker(currentThread);
2061 	currentThread->user_thread = userThread;
2062 	currentThreadLocker.Unlock();
2063 
2064 	// create the user stack for the thread
2065 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2066 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2067 	if (status == B_OK) {
2068 		// prepare the stack, load the runtime loader, and enter userspace
2069 		team_create_thread_start(teamArgs);
2070 			// does never return
2071 	} else
2072 		free_team_arg(teamArgs);
2073 
2074 	// Sorry, we have to kill ourselves, there is no way out anymore
2075 	// (without any areas left and all that).
2076 	exit_thread(status);
2077 
2078 	// We return a status here since the signal that is sent by the
2079 	// call above is not immediately handled.
2080 	return B_ERROR;
2081 }
2082 
2083 
2084 static thread_id
2085 fork_team(void)
2086 {
2087 	Thread* parentThread = thread_get_current_thread();
2088 	Team* parentTeam = parentThread->team;
2089 	Team* team;
2090 	arch_fork_arg* forkArgs;
2091 	struct area_info info;
2092 	thread_id threadID;
2093 	status_t status;
2094 	ssize_t areaCookie;
2095 	bool teamLimitReached = false;
2096 
2097 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2098 
2099 	if (parentTeam == team_get_kernel_team())
2100 		return B_NOT_ALLOWED;
2101 
2102 	// create a new team
2103 	// TODO: this is very similar to load_image_internal() - maybe we can do
2104 	// something about it :)
2105 
2106 	// create the main thread object
2107 	Thread* thread;
2108 	status = Thread::Create(parentThread->name, thread);
2109 	if (status != B_OK)
2110 		return status;
2111 	BReference<Thread> threadReference(thread, true);
2112 
2113 	// create the team object
2114 	team = Team::Create(thread->id, NULL, false);
2115 	if (team == NULL)
2116 		return B_NO_MEMORY;
2117 
2118 	parentTeam->LockTeamAndProcessGroup();
2119 	team->Lock();
2120 
2121 	team->SetName(parentTeam->Name());
2122 	team->SetArgs(parentTeam->Args());
2123 
2124 	team->commpage_address = parentTeam->commpage_address;
2125 
2126 	// Inherit the parent's user/group.
2127 	inherit_parent_user_and_group(team, parentTeam);
2128 
2129 	// inherit signal handlers
2130 	team->InheritSignalActions(parentTeam);
2131 
2132 	team->Unlock();
2133 	parentTeam->UnlockTeamAndProcessGroup();
2134 
2135 	// inherit some team debug flags
2136 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2137 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2138 
2139 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2140 	if (forkArgs == NULL) {
2141 		status = B_NO_MEMORY;
2142 		goto err1;
2143 	}
2144 
2145 	// create a new io_context for this team
2146 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2147 	if (!team->io_context) {
2148 		status = B_NO_MEMORY;
2149 		goto err2;
2150 	}
2151 
2152 	// duplicate the realtime sem context
2153 	if (parentTeam->realtime_sem_context) {
2154 		team->realtime_sem_context = clone_realtime_sem_context(
2155 			parentTeam->realtime_sem_context);
2156 		if (team->realtime_sem_context == NULL) {
2157 			status = B_NO_MEMORY;
2158 			goto err2;
2159 		}
2160 	}
2161 
2162 	// create an address space for this team
2163 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2164 		&team->address_space);
2165 	if (status < B_OK)
2166 		goto err3;
2167 
2168 	// copy all areas of the team
2169 	// TODO: should be able to handle stack areas differently (ie. don't have
2170 	// them copy-on-write)
2171 
2172 	areaCookie = 0;
2173 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2174 		if (info.area == parentTeam->user_data_area) {
2175 			// don't clone the user area; just create a new one
2176 			status = create_team_user_data(team, info.address);
2177 			if (status != B_OK)
2178 				break;
2179 
2180 			thread->user_thread = team_allocate_user_thread(team);
2181 		} else {
2182 			void* address;
2183 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2184 				&address, B_CLONE_ADDRESS, info.area);
2185 			if (area < B_OK) {
2186 				status = area;
2187 				break;
2188 			}
2189 
2190 			if (info.area == parentThread->user_stack_area)
2191 				thread->user_stack_area = area;
2192 		}
2193 	}
2194 
2195 	if (status < B_OK)
2196 		goto err4;
2197 
2198 	if (thread->user_thread == NULL) {
2199 #if KDEBUG
2200 		panic("user data area not found, parent area is %" B_PRId32,
2201 			parentTeam->user_data_area);
2202 #endif
2203 		status = B_ERROR;
2204 		goto err4;
2205 	}
2206 
2207 	thread->user_stack_base = parentThread->user_stack_base;
2208 	thread->user_stack_size = parentThread->user_stack_size;
2209 	thread->user_local_storage = parentThread->user_local_storage;
2210 	thread->sig_block_mask = parentThread->sig_block_mask;
2211 	thread->signal_stack_base = parentThread->signal_stack_base;
2212 	thread->signal_stack_size = parentThread->signal_stack_size;
2213 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2214 
2215 	arch_store_fork_frame(forkArgs);
2216 
2217 	// copy image list
2218 	if (copy_images(parentTeam->id, team) != B_OK)
2219 		goto err5;
2220 
2221 	// insert the team into its parent and the teams hash
2222 	parentTeam->LockTeamAndProcessGroup();
2223 	team->Lock();
2224 
2225 	{
2226 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2227 
2228 		sTeamHash.Insert(team);
2229 		teamLimitReached = sUsedTeams >= sMaxTeams;
2230 		if (!teamLimitReached)
2231 			sUsedTeams++;
2232 	}
2233 
2234 	insert_team_into_parent(parentTeam, team);
2235 	insert_team_into_group(parentTeam->group, team);
2236 
2237 	team->Unlock();
2238 	parentTeam->UnlockTeamAndProcessGroup();
2239 
2240 	// notify team listeners
2241 	sNotificationService.Notify(TEAM_ADDED, team);
2242 
2243 	if (teamLimitReached) {
2244 		status = B_NO_MORE_TEAMS;
2245 		goto err6;
2246 	}
2247 
2248 	// create the main thread
2249 	{
2250 		ThreadCreationAttributes threadCreationAttributes(NULL,
2251 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2252 		threadCreationAttributes.forkArgs = forkArgs;
2253 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2254 		threadID = thread_create_thread(threadCreationAttributes, false);
2255 		if (threadID < 0) {
2256 			status = threadID;
2257 			goto err6;
2258 		}
2259 	}
2260 
2261 	// notify the debugger
2262 	user_debug_team_created(team->id);
2263 
2264 	T(TeamForked(threadID));
2265 
2266 	resume_thread(threadID);
2267 	return threadID;
2268 
2269 err6:
2270 	// Remove the team structure from the process group, the parent team, and
2271 	// the team hash table and delete the team structure.
2272 	parentTeam->LockTeamAndProcessGroup();
2273 	team->Lock();
2274 
2275 	remove_team_from_group(team);
2276 	remove_team_from_parent(team->parent, team);
2277 
2278 	team->Unlock();
2279 	parentTeam->UnlockTeamAndProcessGroup();
2280 
2281 	{
2282 		InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2283 		sTeamHash.Remove(team);
2284 		if (!teamLimitReached)
2285 			sUsedTeams--;
2286 	}
2287 
2288 	sNotificationService.Notify(TEAM_REMOVED, team);
2289 err5:
2290 	remove_images(team);
2291 err4:
2292 	team->address_space->RemoveAndPut();
2293 err3:
2294 	delete_realtime_sem_context(team->realtime_sem_context);
2295 err2:
2296 	free(forkArgs);
2297 err1:
2298 	team->ReleaseReference();
2299 
2300 	return status;
2301 }
2302 
2303 
2304 /*!	Returns if the specified team \a parent has any children belonging to the
2305 	process group with the specified ID \a groupID.
2306 	The caller must hold \a parent's lock.
2307 */
2308 static bool
2309 has_children_in_group(Team* parent, pid_t groupID)
2310 {
2311 	for (Team* child = parent->children; child != NULL;
2312 			child = child->siblings_next) {
2313 		TeamLocker childLocker(child);
2314 		if (child->group_id == groupID)
2315 			return true;
2316 	}
2317 
2318 	return false;
2319 }
2320 
2321 
2322 /*!	Returns the first job control entry from \a children, which matches \a id.
2323 	\a id can be:
2324 	- \code > 0 \endcode: Matching an entry with that team ID.
2325 	- \code == -1 \endcode: Matching any entry.
2326 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2327 	\c 0 is an invalid value for \a id.
2328 
2329 	The caller must hold the lock of the team that \a children belongs to.
2330 
2331 	\param children The job control entry list to check.
2332 	\param id The match criterion.
2333 	\return The first matching entry or \c NULL, if none matches.
2334 */
2335 static job_control_entry*
2336 get_job_control_entry(team_job_control_children& children, pid_t id)
2337 {
2338 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2339 		 job_control_entry* entry = it.Next();) {
2340 
2341 		if (id > 0) {
2342 			if (entry->thread == id)
2343 				return entry;
2344 		} else if (id == -1) {
2345 			return entry;
2346 		} else {
2347 			pid_t processGroup
2348 				= (entry->team ? entry->team->group_id : entry->group_id);
2349 			if (processGroup == -id)
2350 				return entry;
2351 		}
2352 	}
2353 
2354 	return NULL;
2355 }
2356 
2357 
2358 /*!	Returns the first job control entry from one of team's dead, continued, or
2359 	stopped children which matches \a id.
2360 	\a id can be:
2361 	- \code > 0 \endcode: Matching an entry with that team ID.
2362 	- \code == -1 \endcode: Matching any entry.
2363 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2364 	\c 0 is an invalid value for \a id.
2365 
2366 	The caller must hold \a team's lock.
2367 
2368 	\param team The team whose dead, stopped, and continued child lists shall be
2369 		checked.
2370 	\param id The match criterion.
2371 	\param flags Specifies which children shall be considered. Dead children
2372 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2373 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2374 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2375 		\c WCONTINUED.
2376 	\return The first matching entry or \c NULL, if none matches.
2377 */
2378 static job_control_entry*
2379 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2380 {
2381 	job_control_entry* entry = NULL;
2382 
2383 	if ((flags & WEXITED) != 0)
2384 		entry = get_job_control_entry(team->dead_children, id);
2385 
2386 	if (entry == NULL && (flags & WCONTINUED) != 0)
2387 		entry = get_job_control_entry(team->continued_children, id);
2388 
2389 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2390 		entry = get_job_control_entry(team->stopped_children, id);
2391 
2392 	return entry;
2393 }
2394 
2395 
2396 job_control_entry::job_control_entry()
2397 	:
2398 	has_group_ref(false)
2399 {
2400 }
2401 
2402 
2403 job_control_entry::~job_control_entry()
2404 {
2405 	if (has_group_ref) {
2406 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2407 
2408 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2409 		if (group == NULL) {
2410 			panic("job_control_entry::~job_control_entry(): unknown group "
2411 				"ID: %" B_PRId32, group_id);
2412 			return;
2413 		}
2414 
2415 		groupHashLocker.Unlock();
2416 
2417 		group->ReleaseReference();
2418 	}
2419 }
2420 
2421 
2422 /*!	Invoked when the owning team is dying, initializing the entry according to
2423 	the dead state.
2424 
2425 	The caller must hold the owning team's lock and the scheduler lock.
2426 */
2427 void
2428 job_control_entry::InitDeadState()
2429 {
2430 	if (team != NULL) {
2431 		ASSERT(team->exit.initialized);
2432 
2433 		group_id = team->group_id;
2434 		team->group->AcquireReference();
2435 		has_group_ref = true;
2436 
2437 		thread = team->id;
2438 		status = team->exit.status;
2439 		reason = team->exit.reason;
2440 		signal = team->exit.signal;
2441 		signaling_user = team->exit.signaling_user;
2442 		user_time = team->dead_threads_user_time
2443 			+ team->dead_children.user_time;
2444 		kernel_time = team->dead_threads_kernel_time
2445 			+ team->dead_children.kernel_time;
2446 
2447 		team = NULL;
2448 	}
2449 }
2450 
2451 
2452 job_control_entry&
2453 job_control_entry::operator=(const job_control_entry& other)
2454 {
2455 	state = other.state;
2456 	thread = other.thread;
2457 	signal = other.signal;
2458 	has_group_ref = false;
2459 	signaling_user = other.signaling_user;
2460 	team = other.team;
2461 	group_id = other.group_id;
2462 	status = other.status;
2463 	reason = other.reason;
2464 	user_time = other.user_time;
2465 	kernel_time = other.kernel_time;
2466 
2467 	return *this;
2468 }
2469 
2470 
2471 /*! This is the kernel backend for waitid().
2472 */
2473 static thread_id
2474 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2475 	team_usage_info& _usage_info)
2476 {
2477 	Thread* thread = thread_get_current_thread();
2478 	Team* team = thread->team;
2479 	struct job_control_entry foundEntry;
2480 	struct job_control_entry* freeDeathEntry = NULL;
2481 	status_t status = B_OK;
2482 
2483 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2484 		child, flags));
2485 
2486 	T(WaitForChild(child, flags));
2487 
2488 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2489 		T(WaitForChildDone(B_BAD_VALUE));
2490 		return B_BAD_VALUE;
2491 	}
2492 
2493 	pid_t originalChild = child;
2494 
2495 	bool ignoreFoundEntries = false;
2496 	bool ignoreFoundEntriesChecked = false;
2497 
2498 	while (true) {
2499 		// lock the team
2500 		TeamLocker teamLocker(team);
2501 
2502 		// A 0 child argument means to wait for all children in the process
2503 		// group of the calling team.
2504 		child = originalChild == 0 ? -team->group_id : originalChild;
2505 
2506 		// check whether any condition holds
2507 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2508 
2509 		// If we don't have an entry yet, check whether there are any children
2510 		// complying to the process group specification at all.
2511 		if (entry == NULL) {
2512 			// No success yet -- check whether there are any children complying
2513 			// to the process group specification at all.
2514 			bool childrenExist = false;
2515 			if (child == -1) {
2516 				childrenExist = team->children != NULL;
2517 			} else if (child < -1) {
2518 				childrenExist = has_children_in_group(team, -child);
2519 			} else if (child != team->id) {
2520 				if (Team* childTeam = Team::Get(child)) {
2521 					BReference<Team> childTeamReference(childTeam, true);
2522 					TeamLocker childTeamLocker(childTeam);
2523 					childrenExist = childTeam->parent == team;
2524 				}
2525 			}
2526 
2527 			if (!childrenExist) {
2528 				// there is no child we could wait for
2529 				status = ECHILD;
2530 			} else {
2531 				// the children we're waiting for are still running
2532 				status = B_WOULD_BLOCK;
2533 			}
2534 		} else {
2535 			// got something
2536 			foundEntry = *entry;
2537 
2538 			// unless WNOWAIT has been specified, "consume" the wait state
2539 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2540 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2541 					// The child is dead. Reap its death entry.
2542 					freeDeathEntry = entry;
2543 					team->dead_children.entries.Remove(entry);
2544 					team->dead_children.count--;
2545 				} else {
2546 					// The child is well. Reset its job control state.
2547 					team_set_job_control_state(entry->team,
2548 						JOB_CONTROL_STATE_NONE, NULL);
2549 				}
2550 			}
2551 		}
2552 
2553 		// If we haven't got anything yet, prepare for waiting for the
2554 		// condition variable.
2555 		ConditionVariableEntry deadWaitEntry;
2556 
2557 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2558 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2559 
2560 		teamLocker.Unlock();
2561 
2562 		// we got our entry and can return to our caller
2563 		if (status == B_OK) {
2564 			if (ignoreFoundEntries) {
2565 				// ... unless we shall ignore found entries
2566 				delete freeDeathEntry;
2567 				freeDeathEntry = NULL;
2568 				continue;
2569 			}
2570 
2571 			break;
2572 		}
2573 
2574 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2575 			T(WaitForChildDone(status));
2576 			return status;
2577 		}
2578 
2579 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2580 		if (status == B_INTERRUPTED) {
2581 			T(WaitForChildDone(status));
2582 			return status;
2583 		}
2584 
2585 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2586 		// all our children are dead and fail with ECHILD. We check the
2587 		// condition at this point.
2588 		if (!ignoreFoundEntriesChecked) {
2589 			teamLocker.Lock();
2590 
2591 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2592 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2593 				|| handler.sa_handler == SIG_IGN) {
2594 				ignoreFoundEntries = true;
2595 			}
2596 
2597 			teamLocker.Unlock();
2598 
2599 			ignoreFoundEntriesChecked = true;
2600 		}
2601 	}
2602 
2603 	delete freeDeathEntry;
2604 
2605 	// When we got here, we have a valid death entry, and already got
2606 	// unregistered from the team or group. Fill in the returned info.
2607 	memset(&_info, 0, sizeof(_info));
2608 	_info.si_signo = SIGCHLD;
2609 	_info.si_pid = foundEntry.thread;
2610 	_info.si_uid = foundEntry.signaling_user;
2611 	// TODO: Fill in si_errno?
2612 
2613 	switch (foundEntry.state) {
2614 		case JOB_CONTROL_STATE_DEAD:
2615 			_info.si_code = foundEntry.reason;
2616 			_info.si_status = foundEntry.reason == CLD_EXITED
2617 				? foundEntry.status : foundEntry.signal;
2618 			_usage_info.user_time = foundEntry.user_time;
2619 			_usage_info.kernel_time = foundEntry.kernel_time;
2620 			break;
2621 		case JOB_CONTROL_STATE_STOPPED:
2622 			_info.si_code = CLD_STOPPED;
2623 			_info.si_status = foundEntry.signal;
2624 			break;
2625 		case JOB_CONTROL_STATE_CONTINUED:
2626 			_info.si_code = CLD_CONTINUED;
2627 			_info.si_status = 0;
2628 			break;
2629 		case JOB_CONTROL_STATE_NONE:
2630 			// can't happen
2631 			break;
2632 	}
2633 
2634 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2635 	// status is available.
2636 	TeamLocker teamLocker(team);
2637 	InterruptsSpinLocker signalLocker(team->signal_lock);
2638 	SpinLocker threadCreationLocker(gThreadCreationLock);
2639 
2640 	if (is_team_signal_blocked(team, SIGCHLD)) {
2641 		if (get_job_control_entry(team, child, flags) == NULL)
2642 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2643 	}
2644 
2645 	threadCreationLocker.Unlock();
2646 	signalLocker.Unlock();
2647 	teamLocker.Unlock();
2648 
2649 	// When the team is dead, the main thread continues to live in the kernel
2650 	// team for a very short time. To avoid surprises for the caller we rather
2651 	// wait until the thread is really gone.
2652 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2653 		wait_for_thread(foundEntry.thread, NULL);
2654 
2655 	T(WaitForChildDone(foundEntry));
2656 
2657 	return foundEntry.thread;
2658 }
2659 
2660 
2661 /*! Fills the team_info structure with information from the specified team.
2662 	Interrupts must be enabled. The team must not be locked.
2663 */
2664 static status_t
2665 fill_team_info(Team* team, team_info* info, size_t size)
2666 {
2667 	if (size != sizeof(team_info))
2668 		return B_BAD_VALUE;
2669 
2670 	// TODO: Set more informations for team_info
2671 	memset(info, 0, size);
2672 
2673 	info->team = team->id;
2674 		// immutable
2675 	info->image_count = count_images(team);
2676 		// protected by sImageMutex
2677 
2678 	TeamLocker teamLocker(team);
2679 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2680 
2681 	info->thread_count = team->num_threads;
2682 	//info->area_count =
2683 	info->debugger_nub_thread = team->debug_info.nub_thread;
2684 	info->debugger_nub_port = team->debug_info.nub_port;
2685 	info->uid = team->effective_uid;
2686 	info->gid = team->effective_gid;
2687 
2688 	strlcpy(info->args, team->Args(), sizeof(info->args));
2689 	info->argc = 1;
2690 
2691 	return B_OK;
2692 }
2693 
2694 
2695 /*!	Returns whether the process group contains stopped processes.
2696 	The caller must hold the process group's lock.
2697 */
2698 static bool
2699 process_group_has_stopped_processes(ProcessGroup* group)
2700 {
2701 	Team* team = group->teams;
2702 	while (team != NULL) {
2703 		// the parent team's lock guards the job control entry -- acquire it
2704 		team->LockTeamAndParent(false);
2705 
2706 		if (team->job_control_entry != NULL
2707 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2708 			team->UnlockTeamAndParent();
2709 			return true;
2710 		}
2711 
2712 		team->UnlockTeamAndParent();
2713 
2714 		team = team->group_next;
2715 	}
2716 
2717 	return false;
2718 }
2719 
2720 
2721 /*!	Iterates through all process groups queued in team_remove_team() and signals
2722 	those that are orphaned and have stopped processes.
2723 	The caller must not hold any team or process group locks.
2724 */
2725 static void
2726 orphaned_process_group_check()
2727 {
2728 	// process as long as there are groups in the list
2729 	while (true) {
2730 		// remove the head from the list
2731 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2732 
2733 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2734 		if (group == NULL)
2735 			return;
2736 
2737 		group->UnsetOrphanedCheck();
2738 		BReference<ProcessGroup> groupReference(group);
2739 
2740 		orphanedCheckLocker.Unlock();
2741 
2742 		AutoLocker<ProcessGroup> groupLocker(group);
2743 
2744 		// If the group is orphaned and contains stopped processes, we're
2745 		// supposed to send SIGHUP + SIGCONT.
2746 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2747 			Thread* currentThread = thread_get_current_thread();
2748 
2749 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2750 			send_signal_to_process_group_locked(group, signal, 0);
2751 
2752 			signal.SetNumber(SIGCONT);
2753 			send_signal_to_process_group_locked(group, signal, 0);
2754 		}
2755 	}
2756 }
2757 
2758 
2759 static status_t
2760 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2761 	uint32 flags)
2762 {
2763 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2764 		return B_BAD_VALUE;
2765 
2766 	// get the team
2767 	Team* team = Team::GetAndLock(id);
2768 	if (team == NULL)
2769 		return B_BAD_TEAM_ID;
2770 	BReference<Team> teamReference(team, true);
2771 	TeamLocker teamLocker(team, true);
2772 
2773 	if ((flags & B_CHECK_PERMISSION) != 0) {
2774 		uid_t uid = geteuid();
2775 		if (uid != 0 && uid != team->effective_uid)
2776 			return B_NOT_ALLOWED;
2777 	}
2778 
2779 	bigtime_t kernelTime = 0;
2780 	bigtime_t userTime = 0;
2781 
2782 	switch (who) {
2783 		case B_TEAM_USAGE_SELF:
2784 		{
2785 			Thread* thread = team->thread_list;
2786 
2787 			for (; thread != NULL; thread = thread->team_next) {
2788 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2789 				kernelTime += thread->kernel_time;
2790 				userTime += thread->user_time;
2791 			}
2792 
2793 			kernelTime += team->dead_threads_kernel_time;
2794 			userTime += team->dead_threads_user_time;
2795 			break;
2796 		}
2797 
2798 		case B_TEAM_USAGE_CHILDREN:
2799 		{
2800 			Team* child = team->children;
2801 			for (; child != NULL; child = child->siblings_next) {
2802 				TeamLocker childLocker(child);
2803 
2804 				Thread* thread = team->thread_list;
2805 
2806 				for (; thread != NULL; thread = thread->team_next) {
2807 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2808 					kernelTime += thread->kernel_time;
2809 					userTime += thread->user_time;
2810 				}
2811 
2812 				kernelTime += child->dead_threads_kernel_time;
2813 				userTime += child->dead_threads_user_time;
2814 			}
2815 
2816 			kernelTime += team->dead_children.kernel_time;
2817 			userTime += team->dead_children.user_time;
2818 			break;
2819 		}
2820 	}
2821 
2822 	info->kernel_time = kernelTime;
2823 	info->user_time = userTime;
2824 
2825 	return B_OK;
2826 }
2827 
2828 
2829 //	#pragma mark - Private kernel API
2830 
2831 
2832 status_t
2833 team_init(kernel_args* args)
2834 {
2835 	// create the team hash table
2836 	new(&sTeamHash) TeamTable;
2837 	if (sTeamHash.Init(64) != B_OK)
2838 		panic("Failed to init team hash table!");
2839 
2840 	new(&sGroupHash) ProcessGroupHashTable;
2841 	if (sGroupHash.Init() != B_OK)
2842 		panic("Failed to init process group hash table!");
2843 
2844 	// create initial session and process groups
2845 
2846 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2847 	if (session == NULL)
2848 		panic("Could not create initial session.\n");
2849 	BReference<ProcessSession> sessionReference(session, true);
2850 
2851 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2852 	if (group == NULL)
2853 		panic("Could not create initial process group.\n");
2854 	BReference<ProcessGroup> groupReference(group, true);
2855 
2856 	group->Publish(session);
2857 
2858 	// create the kernel team
2859 	sKernelTeam = Team::Create(1, "kernel_team", true);
2860 	if (sKernelTeam == NULL)
2861 		panic("could not create kernel team!\n");
2862 	sKernelTeam->SetArgs(sKernelTeam->Name());
2863 	sKernelTeam->state = TEAM_STATE_NORMAL;
2864 
2865 	sKernelTeam->saved_set_uid = 0;
2866 	sKernelTeam->real_uid = 0;
2867 	sKernelTeam->effective_uid = 0;
2868 	sKernelTeam->saved_set_gid = 0;
2869 	sKernelTeam->real_gid = 0;
2870 	sKernelTeam->effective_gid = 0;
2871 	sKernelTeam->supplementary_groups = NULL;
2872 	sKernelTeam->supplementary_group_count = 0;
2873 
2874 	insert_team_into_group(group, sKernelTeam);
2875 
2876 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2877 	if (sKernelTeam->io_context == NULL)
2878 		panic("could not create io_context for kernel team!\n");
2879 
2880 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2881 		dprintf("Failed to resize FD table for kernel team!\n");
2882 
2883 	// stick it in the team hash
2884 	sTeamHash.Insert(sKernelTeam);
2885 
2886 	// check safe mode settings
2887 	sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2888 		false);
2889 
2890 	add_debugger_command_etc("team", &dump_team_info,
2891 		"Dump info about a particular team",
2892 		"[ <id> | <address> | <name> ]\n"
2893 		"Prints information about the specified team. If no argument is given\n"
2894 		"the current team is selected.\n"
2895 		"  <id>       - The ID of the team.\n"
2896 		"  <address>  - The address of the team structure.\n"
2897 		"  <name>     - The team's name.\n", 0);
2898 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2899 		"\n"
2900 		"Prints a list of all existing teams.\n", 0);
2901 
2902 	new(&sNotificationService) TeamNotificationService();
2903 
2904 	sNotificationService.Register();
2905 
2906 	return B_OK;
2907 }
2908 
2909 
2910 int32
2911 team_max_teams(void)
2912 {
2913 	return sMaxTeams;
2914 }
2915 
2916 
2917 int32
2918 team_used_teams(void)
2919 {
2920 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2921 	return sUsedTeams;
2922 }
2923 
2924 
2925 /*! Returns a death entry of a child team specified by ID (if any).
2926 	The caller must hold the team's lock.
2927 
2928 	\param team The team whose dead children list to check.
2929 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2930 	\param _deleteEntry Return variable, indicating whether the caller needs to
2931 		delete the returned entry.
2932 	\return The death entry of the matching team, or \c NULL, if no death entry
2933 		for the team was found.
2934 */
2935 job_control_entry*
2936 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2937 {
2938 	if (child <= 0)
2939 		return NULL;
2940 
2941 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2942 		child);
2943 	if (entry) {
2944 		// remove the entry only, if the caller is the parent of the found team
2945 		if (team_get_current_team_id() == entry->thread) {
2946 			team->dead_children.entries.Remove(entry);
2947 			team->dead_children.count--;
2948 			*_deleteEntry = true;
2949 		} else {
2950 			*_deleteEntry = false;
2951 		}
2952 	}
2953 
2954 	return entry;
2955 }
2956 
2957 
2958 /*! Quick check to see if we have a valid team ID. */
2959 bool
2960 team_is_valid(team_id id)
2961 {
2962 	if (id <= 0)
2963 		return false;
2964 
2965 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2966 	return team_get_team_struct_locked(id) != NULL;
2967 }
2968 
2969 
2970 Team*
2971 team_get_team_struct_locked(team_id id)
2972 {
2973 	return sTeamHash.Lookup(id);
2974 }
2975 
2976 
2977 void
2978 team_set_controlling_tty(int32 ttyIndex)
2979 {
2980 	// lock the team, so its session won't change while we're playing with it
2981 	Team* team = thread_get_current_thread()->team;
2982 	TeamLocker teamLocker(team);
2983 
2984 	// get and lock the session
2985 	ProcessSession* session = team->group->Session();
2986 	AutoLocker<ProcessSession> sessionLocker(session);
2987 
2988 	// set the session's fields
2989 	session->controlling_tty = ttyIndex;
2990 	session->foreground_group = -1;
2991 }
2992 
2993 
2994 int32
2995 team_get_controlling_tty()
2996 {
2997 	// lock the team, so its session won't change while we're playing with it
2998 	Team* team = thread_get_current_thread()->team;
2999 	TeamLocker teamLocker(team);
3000 
3001 	// get and lock the session
3002 	ProcessSession* session = team->group->Session();
3003 	AutoLocker<ProcessSession> sessionLocker(session);
3004 
3005 	// get the session's field
3006 	return session->controlling_tty;
3007 }
3008 
3009 
3010 status_t
3011 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
3012 {
3013 	// lock the team, so its session won't change while we're playing with it
3014 	Thread* thread = thread_get_current_thread();
3015 	Team* team = thread->team;
3016 	TeamLocker teamLocker(team);
3017 
3018 	// get and lock the session
3019 	ProcessSession* session = team->group->Session();
3020 	AutoLocker<ProcessSession> sessionLocker(session);
3021 
3022 	// check given TTY -- must be the controlling tty of the calling process
3023 	if (session->controlling_tty != ttyIndex)
3024 		return ENOTTY;
3025 
3026 	// check given process group -- must belong to our session
3027 	{
3028 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3029 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3030 		if (group == NULL || group->Session() != session)
3031 			return B_BAD_VALUE;
3032 	}
3033 
3034 	// If we are a background group, we can do that unharmed only when we
3035 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3036 	if (session->foreground_group != -1
3037 		&& session->foreground_group != team->group_id
3038 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3039 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3040 		InterruptsSpinLocker signalLocker(team->signal_lock);
3041 
3042 		if (!is_team_signal_blocked(team, SIGTTOU)) {
3043 			pid_t groupID = team->group_id;
3044 
3045 			signalLocker.Unlock();
3046 			sessionLocker.Unlock();
3047 			teamLocker.Unlock();
3048 
3049 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3050 			send_signal_to_process_group(groupID, signal, 0);
3051 			return B_INTERRUPTED;
3052 		}
3053 	}
3054 
3055 	session->foreground_group = processGroupID;
3056 
3057 	return B_OK;
3058 }
3059 
3060 
3061 uid_t
3062 team_geteuid(team_id id)
3063 {
3064 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3065 	Team* team = team_get_team_struct_locked(id);
3066 	if (team == NULL)
3067 		return (uid_t)-1;
3068 	return team->effective_uid;
3069 }
3070 
3071 
3072 /*!	Removes the specified team from the global team hash, from its process
3073 	group, and from its parent.
3074 	It also moves all of its children to the kernel team.
3075 
3076 	The caller must hold the following locks:
3077 	- \a team's process group's lock,
3078 	- the kernel team's lock,
3079 	- \a team's parent team's lock (might be the kernel team), and
3080 	- \a team's lock.
3081 */
3082 void
3083 team_remove_team(Team* team, pid_t& _signalGroup)
3084 {
3085 	Team* parent = team->parent;
3086 
3087 	// remember how long this team lasted
3088 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3089 		+ team->dead_children.kernel_time;
3090 	parent->dead_children.user_time += team->dead_threads_user_time
3091 		+ team->dead_children.user_time;
3092 
3093 	// remove the team from the hash table
3094 	InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3095 	sTeamHash.Remove(team);
3096 	sUsedTeams--;
3097 	teamsLocker.Unlock();
3098 
3099 	// The team can no longer be accessed by ID. Navigation to it is still
3100 	// possible from its process group and its parent and children, but that
3101 	// will be rectified shortly.
3102 	team->state = TEAM_STATE_DEATH;
3103 
3104 	// If we're a controlling process (i.e. a session leader with controlling
3105 	// terminal), there's a bit of signalling we have to do. We can't do any of
3106 	// the signaling here due to the bunch of locks we're holding, but we need
3107 	// to determine, whom to signal.
3108 	_signalGroup = -1;
3109 	bool isSessionLeader = false;
3110 	if (team->session_id == team->id
3111 		&& team->group->Session()->controlling_tty >= 0) {
3112 		isSessionLeader = true;
3113 
3114 		ProcessSession* session = team->group->Session();
3115 
3116 		AutoLocker<ProcessSession> sessionLocker(session);
3117 
3118 		session->controlling_tty = -1;
3119 		_signalGroup = session->foreground_group;
3120 	}
3121 
3122 	// remove us from our process group
3123 	remove_team_from_group(team);
3124 
3125 	// move the team's children to the kernel team
3126 	while (Team* child = team->children) {
3127 		// remove the child from the current team and add it to the kernel team
3128 		TeamLocker childLocker(child);
3129 
3130 		remove_team_from_parent(team, child);
3131 		insert_team_into_parent(sKernelTeam, child);
3132 
3133 		// move job control entries too
3134 		sKernelTeam->stopped_children.entries.MoveFrom(
3135 			&team->stopped_children.entries);
3136 		sKernelTeam->continued_children.entries.MoveFrom(
3137 			&team->continued_children.entries);
3138 
3139 		// If the team was a session leader with controlling terminal,
3140 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3141 		// groups with stopped processes. Due to locking complications we can't
3142 		// do that here, so we only check whether we were a reason for the
3143 		// child's process group not being an orphan and, if so, schedule a
3144 		// later check (cf. orphaned_process_group_check()).
3145 		if (isSessionLeader) {
3146 			ProcessGroup* childGroup = child->group;
3147 			if (childGroup->Session()->id == team->session_id
3148 				&& childGroup->id != team->group_id) {
3149 				childGroup->ScheduleOrphanedCheck();
3150 			}
3151 		}
3152 
3153 		// Note, we don't move the dead children entries. Those will be deleted
3154 		// when the team structure is deleted.
3155 	}
3156 
3157 	// remove us from our parent
3158 	remove_team_from_parent(parent, team);
3159 }
3160 
3161 
3162 /*!	Kills all threads but the main thread of the team and shuts down user
3163 	debugging for it.
3164 	To be called on exit of the team's main thread. No locks must be held.
3165 
3166 	\param team The team in question.
3167 	\return The port of the debugger for the team, -1 if none. To be passed to
3168 		team_delete_team().
3169 */
3170 port_id
3171 team_shutdown_team(Team* team)
3172 {
3173 	ASSERT(thread_get_current_thread() == team->main_thread);
3174 
3175 	TeamLocker teamLocker(team);
3176 
3177 	// Make sure debugging changes won't happen anymore.
3178 	port_id debuggerPort = -1;
3179 	while (true) {
3180 		// If a debugger change is in progress for the team, we'll have to
3181 		// wait until it is done.
3182 		ConditionVariableEntry waitForDebuggerEntry;
3183 		bool waitForDebugger = false;
3184 
3185 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3186 
3187 		if (team->debug_info.debugger_changed_condition != NULL) {
3188 			team->debug_info.debugger_changed_condition->Add(
3189 				&waitForDebuggerEntry);
3190 			waitForDebugger = true;
3191 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3192 			// The team is being debugged. That will stop with the termination
3193 			// of the nub thread. Since we set the team state to death, no one
3194 			// can install a debugger anymore. We fetch the debugger's port to
3195 			// send it a message at the bitter end.
3196 			debuggerPort = team->debug_info.debugger_port;
3197 		}
3198 
3199 		debugInfoLocker.Unlock();
3200 
3201 		if (!waitForDebugger)
3202 			break;
3203 
3204 		// wait for the debugger change to be finished
3205 		teamLocker.Unlock();
3206 
3207 		waitForDebuggerEntry.Wait();
3208 
3209 		teamLocker.Lock();
3210 	}
3211 
3212 	// Mark the team as shutting down. That will prevent new threads from being
3213 	// created and debugger changes from taking place.
3214 	team->state = TEAM_STATE_SHUTDOWN;
3215 
3216 	// delete all timers
3217 	team->DeleteUserTimers(false);
3218 
3219 	// deactivate CPU time user timers for the team
3220 	InterruptsSpinLocker timeLocker(team->time_lock);
3221 
3222 	if (team->HasActiveCPUTimeUserTimers())
3223 		team->DeactivateCPUTimeUserTimers();
3224 
3225 	timeLocker.Unlock();
3226 
3227 	// kill all threads but the main thread
3228 	team_death_entry deathEntry;
3229 	deathEntry.condition.Init(team, "team death");
3230 
3231 	while (true) {
3232 		team->death_entry = &deathEntry;
3233 		deathEntry.remaining_threads = 0;
3234 
3235 		Thread* thread = team->thread_list;
3236 		while (thread != NULL) {
3237 			if (thread != team->main_thread) {
3238 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3239 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3240 				deathEntry.remaining_threads++;
3241 			}
3242 
3243 			thread = thread->team_next;
3244 		}
3245 
3246 		if (deathEntry.remaining_threads == 0)
3247 			break;
3248 
3249 		// there are threads to wait for
3250 		ConditionVariableEntry entry;
3251 		deathEntry.condition.Add(&entry);
3252 
3253 		teamLocker.Unlock();
3254 
3255 		entry.Wait();
3256 
3257 		teamLocker.Lock();
3258 	}
3259 
3260 	team->death_entry = NULL;
3261 
3262 	return debuggerPort;
3263 }
3264 
3265 
3266 /*!	Called on team exit to notify threads waiting on the team and free most
3267 	resources associated with it.
3268 	The caller shouldn't hold any locks.
3269 */
3270 void
3271 team_delete_team(Team* team, port_id debuggerPort)
3272 {
3273 	// Not quite in our job description, but work that has been left by
3274 	// team_remove_team() and that can be done now that we're not holding any
3275 	// locks.
3276 	orphaned_process_group_check();
3277 
3278 	team_id teamID = team->id;
3279 
3280 	ASSERT(team->num_threads == 0);
3281 
3282 	// If someone is waiting for this team to be loaded, but it dies
3283 	// unexpectedly before being done, we need to notify the waiting
3284 	// thread now.
3285 
3286 	TeamLocker teamLocker(team);
3287 
3288 	if (team->loading_info) {
3289 		// there's indeed someone waiting
3290 		struct team_loading_info* loadingInfo = team->loading_info;
3291 		team->loading_info = NULL;
3292 
3293 		loadingInfo->result = B_ERROR;
3294 
3295 		// wake up the waiting thread
3296 		loadingInfo->condition.NotifyAll();
3297 	}
3298 
3299 	// notify team watchers
3300 
3301 	{
3302 		// we're not reachable from anyone anymore at this point, so we
3303 		// can safely access the list without any locking
3304 		struct team_watcher* watcher;
3305 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3306 				&team->watcher_list)) != NULL) {
3307 			watcher->hook(teamID, watcher->data);
3308 			free(watcher);
3309 		}
3310 	}
3311 
3312 	teamLocker.Unlock();
3313 
3314 	sNotificationService.Notify(TEAM_REMOVED, team);
3315 
3316 	// free team resources
3317 
3318 	delete_realtime_sem_context(team->realtime_sem_context);
3319 	xsi_sem_undo(team);
3320 	remove_images(team);
3321 	team->address_space->RemoveAndPut();
3322 
3323 	team->ReleaseReference();
3324 
3325 	// notify the debugger, that the team is gone
3326 	user_debug_team_deleted(teamID, debuggerPort);
3327 }
3328 
3329 
3330 Team*
3331 team_get_kernel_team(void)
3332 {
3333 	return sKernelTeam;
3334 }
3335 
3336 
3337 team_id
3338 team_get_kernel_team_id(void)
3339 {
3340 	if (!sKernelTeam)
3341 		return 0;
3342 
3343 	return sKernelTeam->id;
3344 }
3345 
3346 
3347 team_id
3348 team_get_current_team_id(void)
3349 {
3350 	return thread_get_current_thread()->team->id;
3351 }
3352 
3353 
3354 status_t
3355 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3356 {
3357 	if (id == sKernelTeam->id) {
3358 		// we're the kernel team, so we don't have to go through all
3359 		// the hassle (locking and hash lookup)
3360 		*_addressSpace = VMAddressSpace::GetKernel();
3361 		return B_OK;
3362 	}
3363 
3364 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3365 
3366 	Team* team = team_get_team_struct_locked(id);
3367 	if (team == NULL)
3368 		return B_BAD_VALUE;
3369 
3370 	team->address_space->Get();
3371 	*_addressSpace = team->address_space;
3372 	return B_OK;
3373 }
3374 
3375 
3376 /*!	Sets the team's job control state.
3377 	The caller must hold the parent team's lock. Interrupts are allowed to be
3378 	enabled or disabled.
3379 	\a team The team whose job control state shall be set.
3380 	\a newState The new state to be set.
3381 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3382 		the caller is responsible for filling in the following fields of the
3383 		entry before releasing the parent team's lock, unless the new state is
3384 		\c JOB_CONTROL_STATE_NONE:
3385 		- \c signal: The number of the signal causing the state change.
3386 		- \c signaling_user: The real UID of the user sending the signal.
3387 */
3388 void
3389 team_set_job_control_state(Team* team, job_control_state newState,
3390 	Signal* signal)
3391 {
3392 	if (team == NULL || team->job_control_entry == NULL)
3393 		return;
3394 
3395 	// don't touch anything, if the state stays the same or the team is already
3396 	// dead
3397 	job_control_entry* entry = team->job_control_entry;
3398 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3399 		return;
3400 
3401 	T(SetJobControlState(team->id, newState, signal));
3402 
3403 	// remove from the old list
3404 	switch (entry->state) {
3405 		case JOB_CONTROL_STATE_NONE:
3406 			// entry is in no list ATM
3407 			break;
3408 		case JOB_CONTROL_STATE_DEAD:
3409 			// can't get here
3410 			break;
3411 		case JOB_CONTROL_STATE_STOPPED:
3412 			team->parent->stopped_children.entries.Remove(entry);
3413 			break;
3414 		case JOB_CONTROL_STATE_CONTINUED:
3415 			team->parent->continued_children.entries.Remove(entry);
3416 			break;
3417 	}
3418 
3419 	entry->state = newState;
3420 
3421 	if (signal != NULL) {
3422 		entry->signal = signal->Number();
3423 		entry->signaling_user = signal->SendingUser();
3424 	}
3425 
3426 	// add to new list
3427 	team_job_control_children* childList = NULL;
3428 	switch (entry->state) {
3429 		case JOB_CONTROL_STATE_NONE:
3430 			// entry doesn't get into any list
3431 			break;
3432 		case JOB_CONTROL_STATE_DEAD:
3433 			childList = &team->parent->dead_children;
3434 			team->parent->dead_children.count++;
3435 			break;
3436 		case JOB_CONTROL_STATE_STOPPED:
3437 			childList = &team->parent->stopped_children;
3438 			break;
3439 		case JOB_CONTROL_STATE_CONTINUED:
3440 			childList = &team->parent->continued_children;
3441 			break;
3442 	}
3443 
3444 	if (childList != NULL) {
3445 		childList->entries.Add(entry);
3446 		team->parent->dead_children.condition_variable.NotifyAll();
3447 	}
3448 }
3449 
3450 
3451 /*!	Inits the given team's exit information, if not yet initialized, to some
3452 	generic "killed" status.
3453 	The caller must not hold the team's lock. Interrupts must be enabled.
3454 
3455 	\param team The team whose exit info shall be initialized.
3456 */
3457 void
3458 team_init_exit_info_on_error(Team* team)
3459 {
3460 	TeamLocker teamLocker(team);
3461 
3462 	if (!team->exit.initialized) {
3463 		team->exit.reason = CLD_KILLED;
3464 		team->exit.signal = SIGKILL;
3465 		team->exit.signaling_user = geteuid();
3466 		team->exit.status = 0;
3467 		team->exit.initialized = true;
3468 	}
3469 }
3470 
3471 
3472 /*! Adds a hook to the team that is called as soon as this team goes away.
3473 	This call might get public in the future.
3474 */
3475 status_t
3476 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3477 {
3478 	if (hook == NULL || teamID < B_OK)
3479 		return B_BAD_VALUE;
3480 
3481 	// create the watcher object
3482 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3483 	if (watcher == NULL)
3484 		return B_NO_MEMORY;
3485 
3486 	watcher->hook = hook;
3487 	watcher->data = data;
3488 
3489 	// add watcher, if the team isn't already dying
3490 	// get the team
3491 	Team* team = Team::GetAndLock(teamID);
3492 	if (team == NULL) {
3493 		free(watcher);
3494 		return B_BAD_TEAM_ID;
3495 	}
3496 
3497 	list_add_item(&team->watcher_list, watcher);
3498 
3499 	team->UnlockAndReleaseReference();
3500 
3501 	return B_OK;
3502 }
3503 
3504 
3505 status_t
3506 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3507 {
3508 	if (hook == NULL || teamID < 0)
3509 		return B_BAD_VALUE;
3510 
3511 	// get team and remove watcher (if present)
3512 	Team* team = Team::GetAndLock(teamID);
3513 	if (team == NULL)
3514 		return B_BAD_TEAM_ID;
3515 
3516 	// search for watcher
3517 	team_watcher* watcher = NULL;
3518 	while ((watcher = (team_watcher*)list_get_next_item(
3519 			&team->watcher_list, watcher)) != NULL) {
3520 		if (watcher->hook == hook && watcher->data == data) {
3521 			// got it!
3522 			list_remove_item(&team->watcher_list, watcher);
3523 			break;
3524 		}
3525 	}
3526 
3527 	team->UnlockAndReleaseReference();
3528 
3529 	if (watcher == NULL)
3530 		return B_ENTRY_NOT_FOUND;
3531 
3532 	free(watcher);
3533 	return B_OK;
3534 }
3535 
3536 
3537 /*!	Allocates a user_thread structure from the team.
3538 	The team lock must be held, unless the function is called for the team's
3539 	main thread. Interrupts must be enabled.
3540 */
3541 struct user_thread*
3542 team_allocate_user_thread(Team* team)
3543 {
3544 	if (team->user_data == 0)
3545 		return NULL;
3546 
3547 	// take an entry from the free list, if any
3548 	if (struct free_user_thread* entry = team->free_user_threads) {
3549 		user_thread* thread = entry->thread;
3550 		team->free_user_threads = entry->next;
3551 		free(entry);
3552 		return thread;
3553 	}
3554 
3555 	while (true) {
3556 		// enough space left?
3557 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3558 		if (team->user_data_size - team->used_user_data < needed) {
3559 			// try to resize the area
3560 			if (resize_area(team->user_data_area,
3561 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3562 				return NULL;
3563 			}
3564 
3565 			// resized user area successfully -- try to allocate the user_thread
3566 			// again
3567 			team->user_data_size += B_PAGE_SIZE;
3568 			continue;
3569 		}
3570 
3571 		// allocate the user_thread
3572 		user_thread* thread
3573 			= (user_thread*)(team->user_data + team->used_user_data);
3574 		team->used_user_data += needed;
3575 
3576 		return thread;
3577 	}
3578 }
3579 
3580 
3581 /*!	Frees the given user_thread structure.
3582 	The team's lock must not be held. Interrupts must be enabled.
3583 	\param team The team the user thread was allocated from.
3584 	\param userThread The user thread to free.
3585 */
3586 void
3587 team_free_user_thread(Team* team, struct user_thread* userThread)
3588 {
3589 	if (userThread == NULL)
3590 		return;
3591 
3592 	// create a free list entry
3593 	free_user_thread* entry
3594 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3595 	if (entry == NULL) {
3596 		// we have to leak the user thread :-/
3597 		return;
3598 	}
3599 
3600 	// add to free list
3601 	TeamLocker teamLocker(team);
3602 
3603 	entry->thread = userThread;
3604 	entry->next = team->free_user_threads;
3605 	team->free_user_threads = entry;
3606 }
3607 
3608 
3609 //	#pragma mark - Associated data interface
3610 
3611 
3612 AssociatedData::AssociatedData()
3613 	:
3614 	fOwner(NULL)
3615 {
3616 }
3617 
3618 
3619 AssociatedData::~AssociatedData()
3620 {
3621 }
3622 
3623 
3624 void
3625 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3626 {
3627 }
3628 
3629 
3630 AssociatedDataOwner::AssociatedDataOwner()
3631 {
3632 	mutex_init(&fLock, "associated data owner");
3633 }
3634 
3635 
3636 AssociatedDataOwner::~AssociatedDataOwner()
3637 {
3638 	mutex_destroy(&fLock);
3639 }
3640 
3641 
3642 bool
3643 AssociatedDataOwner::AddData(AssociatedData* data)
3644 {
3645 	MutexLocker locker(fLock);
3646 
3647 	if (data->Owner() != NULL)
3648 		return false;
3649 
3650 	data->AcquireReference();
3651 	fList.Add(data);
3652 	data->SetOwner(this);
3653 
3654 	return true;
3655 }
3656 
3657 
3658 bool
3659 AssociatedDataOwner::RemoveData(AssociatedData* data)
3660 {
3661 	MutexLocker locker(fLock);
3662 
3663 	if (data->Owner() != this)
3664 		return false;
3665 
3666 	data->SetOwner(NULL);
3667 	fList.Remove(data);
3668 
3669 	locker.Unlock();
3670 
3671 	data->ReleaseReference();
3672 
3673 	return true;
3674 }
3675 
3676 
3677 void
3678 AssociatedDataOwner::PrepareForDeletion()
3679 {
3680 	MutexLocker locker(fLock);
3681 
3682 	// move all data to a temporary list and unset the owner
3683 	DataList list;
3684 	list.MoveFrom(&fList);
3685 
3686 	for (DataList::Iterator it = list.GetIterator();
3687 		AssociatedData* data = it.Next();) {
3688 		data->SetOwner(NULL);
3689 	}
3690 
3691 	locker.Unlock();
3692 
3693 	// call the notification hooks and release our references
3694 	while (AssociatedData* data = list.RemoveHead()) {
3695 		data->OwnerDeleted(this);
3696 		data->ReleaseReference();
3697 	}
3698 }
3699 
3700 
3701 /*!	Associates data with the current team.
3702 	When the team is deleted, the data object is notified.
3703 	The team acquires a reference to the object.
3704 
3705 	\param data The data object.
3706 	\return \c true on success, \c false otherwise. Fails only when the supplied
3707 		data object is already associated with another owner.
3708 */
3709 bool
3710 team_associate_data(AssociatedData* data)
3711 {
3712 	return thread_get_current_thread()->team->AddData(data);
3713 }
3714 
3715 
3716 /*!	Dissociates data from the current team.
3717 	Balances an earlier call to team_associate_data().
3718 
3719 	\param data The data object.
3720 	\return \c true on success, \c false otherwise. Fails only when the data
3721 		object is not associated with the current team.
3722 */
3723 bool
3724 team_dissociate_data(AssociatedData* data)
3725 {
3726 	return thread_get_current_thread()->team->RemoveData(data);
3727 }
3728 
3729 
3730 //	#pragma mark - Public kernel API
3731 
3732 
3733 thread_id
3734 load_image(int32 argCount, const char** args, const char** env)
3735 {
3736 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3737 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3738 }
3739 
3740 
3741 thread_id
3742 load_image_etc(int32 argCount, const char* const* args,
3743 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3744 {
3745 	// we need to flatten the args and environment
3746 
3747 	if (args == NULL)
3748 		return B_BAD_VALUE;
3749 
3750 	// determine total needed size
3751 	int32 argSize = 0;
3752 	for (int32 i = 0; i < argCount; i++)
3753 		argSize += strlen(args[i]) + 1;
3754 
3755 	int32 envCount = 0;
3756 	int32 envSize = 0;
3757 	while (env != NULL && env[envCount] != NULL)
3758 		envSize += strlen(env[envCount++]) + 1;
3759 
3760 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3761 	if (size > MAX_PROCESS_ARGS_SIZE)
3762 		return B_TOO_MANY_ARGS;
3763 
3764 	// allocate space
3765 	char** flatArgs = (char**)malloc(size);
3766 	if (flatArgs == NULL)
3767 		return B_NO_MEMORY;
3768 
3769 	char** slot = flatArgs;
3770 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3771 
3772 	// copy arguments and environment
3773 	for (int32 i = 0; i < argCount; i++) {
3774 		int32 argSize = strlen(args[i]) + 1;
3775 		memcpy(stringSpace, args[i], argSize);
3776 		*slot++ = stringSpace;
3777 		stringSpace += argSize;
3778 	}
3779 
3780 	*slot++ = NULL;
3781 
3782 	for (int32 i = 0; i < envCount; i++) {
3783 		int32 envSize = strlen(env[i]) + 1;
3784 		memcpy(stringSpace, env[i], envSize);
3785 		*slot++ = stringSpace;
3786 		stringSpace += envSize;
3787 	}
3788 
3789 	*slot++ = NULL;
3790 
3791 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3792 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3793 
3794 	free(flatArgs);
3795 		// load_image_internal() unset our variable if it took over ownership
3796 
3797 	return thread;
3798 }
3799 
3800 
3801 status_t
3802 wait_for_team(team_id id, status_t* _returnCode)
3803 {
3804 	// check whether the team exists
3805 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3806 
3807 	Team* team = team_get_team_struct_locked(id);
3808 	if (team == NULL)
3809 		return B_BAD_TEAM_ID;
3810 
3811 	id = team->id;
3812 
3813 	teamsLocker.Unlock();
3814 
3815 	// wait for the main thread (it has the same ID as the team)
3816 	return wait_for_thread(id, _returnCode);
3817 }
3818 
3819 
3820 status_t
3821 kill_team(team_id id)
3822 {
3823 	InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3824 
3825 	Team* team = team_get_team_struct_locked(id);
3826 	if (team == NULL)
3827 		return B_BAD_TEAM_ID;
3828 
3829 	id = team->id;
3830 
3831 	teamsLocker.Unlock();
3832 
3833 	if (team == sKernelTeam)
3834 		return B_NOT_ALLOWED;
3835 
3836 	// Just kill the team's main thread (it has same ID as the team). The
3837 	// cleanup code there will take care of the team.
3838 	return kill_thread(id);
3839 }
3840 
3841 
3842 status_t
3843 _get_team_info(team_id id, team_info* info, size_t size)
3844 {
3845 	// get the team
3846 	Team* team = Team::Get(id);
3847 	if (team == NULL)
3848 		return B_BAD_TEAM_ID;
3849 	BReference<Team> teamReference(team, true);
3850 
3851 	// fill in the info
3852 	return fill_team_info(team, info, size);
3853 }
3854 
3855 
3856 status_t
3857 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3858 {
3859 	int32 slot = *cookie;
3860 	if (slot < 1)
3861 		slot = 1;
3862 
3863 	InterruptsReadSpinLocker locker(sTeamHashLock);
3864 
3865 	team_id lastTeamID = peek_next_thread_id();
3866 		// TODO: This is broken, since the id can wrap around!
3867 
3868 	// get next valid team
3869 	Team* team = NULL;
3870 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3871 		slot++;
3872 
3873 	if (team == NULL)
3874 		return B_BAD_TEAM_ID;
3875 
3876 	// get a reference to the team and unlock
3877 	BReference<Team> teamReference(team);
3878 	locker.Unlock();
3879 
3880 	// fill in the info
3881 	*cookie = ++slot;
3882 	return fill_team_info(team, info, size);
3883 }
3884 
3885 
3886 status_t
3887 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3888 {
3889 	if (size != sizeof(team_usage_info))
3890 		return B_BAD_VALUE;
3891 
3892 	return common_get_team_usage_info(id, who, info, 0);
3893 }
3894 
3895 
3896 pid_t
3897 getpid(void)
3898 {
3899 	return thread_get_current_thread()->team->id;
3900 }
3901 
3902 
3903 pid_t
3904 getppid()
3905 {
3906 	return _getppid(0);
3907 }
3908 
3909 
3910 pid_t
3911 getpgid(pid_t id)
3912 {
3913 	if (id < 0) {
3914 		errno = EINVAL;
3915 		return -1;
3916 	}
3917 
3918 	if (id == 0) {
3919 		// get process group of the calling process
3920 		Team* team = thread_get_current_thread()->team;
3921 		TeamLocker teamLocker(team);
3922 		return team->group_id;
3923 	}
3924 
3925 	// get the team
3926 	Team* team = Team::GetAndLock(id);
3927 	if (team == NULL) {
3928 		errno = ESRCH;
3929 		return -1;
3930 	}
3931 
3932 	// get the team's process group ID
3933 	pid_t groupID = team->group_id;
3934 
3935 	team->UnlockAndReleaseReference();
3936 
3937 	return groupID;
3938 }
3939 
3940 
3941 pid_t
3942 getsid(pid_t id)
3943 {
3944 	if (id < 0) {
3945 		errno = EINVAL;
3946 		return -1;
3947 	}
3948 
3949 	if (id == 0) {
3950 		// get session of the calling process
3951 		Team* team = thread_get_current_thread()->team;
3952 		TeamLocker teamLocker(team);
3953 		return team->session_id;
3954 	}
3955 
3956 	// get the team
3957 	Team* team = Team::GetAndLock(id);
3958 	if (team == NULL) {
3959 		errno = ESRCH;
3960 		return -1;
3961 	}
3962 
3963 	// get the team's session ID
3964 	pid_t sessionID = team->session_id;
3965 
3966 	team->UnlockAndReleaseReference();
3967 
3968 	return sessionID;
3969 }
3970 
3971 
3972 //	#pragma mark - User syscalls
3973 
3974 
3975 status_t
3976 _user_exec(const char* userPath, const char* const* userFlatArgs,
3977 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3978 {
3979 	// NOTE: Since this function normally doesn't return, don't use automatic
3980 	// variables that need destruction in the function scope.
3981 	char path[B_PATH_NAME_LENGTH];
3982 
3983 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3984 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3985 		return B_BAD_ADDRESS;
3986 
3987 	// copy and relocate the flat arguments
3988 	char** flatArgs;
3989 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3990 		argCount, envCount, flatArgs);
3991 
3992 	if (error == B_OK) {
3993 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3994 			envCount, umask);
3995 			// this one only returns in case of error
3996 	}
3997 
3998 	free(flatArgs);
3999 	return error;
4000 }
4001 
4002 
4003 thread_id
4004 _user_fork(void)
4005 {
4006 	return fork_team();
4007 }
4008 
4009 
4010 pid_t
4011 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4012 	team_usage_info* usageInfo)
4013 {
4014 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4015 		return B_BAD_ADDRESS;
4016 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4017 		return B_BAD_ADDRESS;
4018 
4019 	siginfo_t info;
4020 	team_usage_info usage_info;
4021 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4022 	if (foundChild < 0)
4023 		return syscall_restart_handle_post(foundChild);
4024 
4025 	// copy info back to userland
4026 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4027 		return B_BAD_ADDRESS;
4028 	// copy usage_info back to userland
4029 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4030 		sizeof(usage_info)) != B_OK) {
4031 		return B_BAD_ADDRESS;
4032 	}
4033 
4034 	return foundChild;
4035 }
4036 
4037 
4038 pid_t
4039 _user_process_info(pid_t process, int32 which)
4040 {
4041 	pid_t result;
4042 	switch (which) {
4043 		case SESSION_ID:
4044 			result = getsid(process);
4045 			break;
4046 		case GROUP_ID:
4047 			result = getpgid(process);
4048 			break;
4049 		case PARENT_ID:
4050 			result = _getppid(process);
4051 			break;
4052 		default:
4053 			return B_BAD_VALUE;
4054 	}
4055 
4056 	return result >= 0 ? result : errno;
4057 }
4058 
4059 
4060 pid_t
4061 _user_setpgid(pid_t processID, pid_t groupID)
4062 {
4063 	// setpgid() can be called either by the parent of the target process or
4064 	// by the process itself to do one of two things:
4065 	// * Create a new process group with the target process' ID and the target
4066 	//   process as group leader.
4067 	// * Set the target process' process group to an already existing one in the
4068 	//   same session.
4069 
4070 	if (groupID < 0)
4071 		return B_BAD_VALUE;
4072 
4073 	Team* currentTeam = thread_get_current_thread()->team;
4074 	if (processID == 0)
4075 		processID = currentTeam->id;
4076 
4077 	// if the group ID is not specified, use the target process' ID
4078 	if (groupID == 0)
4079 		groupID = processID;
4080 
4081 	// We loop when running into the following race condition: We create a new
4082 	// process group, because there isn't one with that ID yet, but later when
4083 	// trying to publish it, we find that someone else created and published
4084 	// a group with that ID in the meantime. In that case we just restart the
4085 	// whole action.
4086 	while (true) {
4087 		// Look up the process group by ID. If it doesn't exist yet and we are
4088 		// allowed to create a new one, do that.
4089 		ProcessGroup* group = ProcessGroup::Get(groupID);
4090 		bool newGroup = false;
4091 		if (group == NULL) {
4092 			if (groupID != processID)
4093 				return B_NOT_ALLOWED;
4094 
4095 			group = new(std::nothrow) ProcessGroup(groupID);
4096 			if (group == NULL)
4097 				return B_NO_MEMORY;
4098 
4099 			newGroup = true;
4100 		}
4101 		BReference<ProcessGroup> groupReference(group, true);
4102 
4103 		// get the target team
4104 		Team* team = Team::Get(processID);
4105 		if (team == NULL)
4106 			return ESRCH;
4107 		BReference<Team> teamReference(team, true);
4108 
4109 		// lock the new process group and the team's current process group
4110 		while (true) {
4111 			// lock the team's current process group
4112 			team->LockProcessGroup();
4113 
4114 			ProcessGroup* oldGroup = team->group;
4115 			if (oldGroup == group) {
4116 				// it's the same as the target group, so just bail out
4117 				oldGroup->Unlock();
4118 				return group->id;
4119 			}
4120 
4121 			oldGroup->AcquireReference();
4122 
4123 			// lock the target process group, if locking order allows it
4124 			if (newGroup || group->id > oldGroup->id) {
4125 				group->Lock();
4126 				break;
4127 			}
4128 
4129 			// try to lock
4130 			if (group->TryLock())
4131 				break;
4132 
4133 			// no dice -- unlock the team's current process group and relock in
4134 			// the correct order
4135 			oldGroup->Unlock();
4136 
4137 			group->Lock();
4138 			oldGroup->Lock();
4139 
4140 			// check whether things are still the same
4141 			TeamLocker teamLocker(team);
4142 			if (team->group == oldGroup)
4143 				break;
4144 
4145 			// something changed -- unlock everything and retry
4146 			teamLocker.Unlock();
4147 			oldGroup->Unlock();
4148 			group->Unlock();
4149 			oldGroup->ReleaseReference();
4150 		}
4151 
4152 		// we now have references and locks of both new and old process group
4153 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4154 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4155 		AutoLocker<ProcessGroup> groupLocker(group, true);
4156 
4157 		// also lock the target team and its parent
4158 		team->LockTeamAndParent(false);
4159 		TeamLocker parentLocker(team->parent, true);
4160 		TeamLocker teamLocker(team, true);
4161 
4162 		// perform the checks
4163 		if (team == currentTeam) {
4164 			// we set our own group
4165 
4166 			// we must not change our process group ID if we're a session leader
4167 			if (is_session_leader(currentTeam))
4168 				return B_NOT_ALLOWED;
4169 		} else {
4170 			// Calling team != target team. The target team must be a child of
4171 			// the calling team and in the same session. (If that's the case it
4172 			// isn't a session leader either.)
4173 			if (team->parent != currentTeam
4174 				|| team->session_id != currentTeam->session_id) {
4175 				return B_NOT_ALLOWED;
4176 			}
4177 
4178 			// The call is also supposed to fail on a child, when the child has
4179 			// already executed exec*() [EACCES].
4180 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4181 				return EACCES;
4182 		}
4183 
4184 		// If we created a new process group, publish it now.
4185 		if (newGroup) {
4186 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4187 			if (sGroupHash.Lookup(groupID)) {
4188 				// A group with the group ID appeared since we first checked.
4189 				// Back to square one.
4190 				continue;
4191 			}
4192 
4193 			group->PublishLocked(team->group->Session());
4194 		} else if (group->Session()->id != team->session_id) {
4195 			// The existing target process group belongs to a different session.
4196 			// That's not allowed.
4197 			return B_NOT_ALLOWED;
4198 		}
4199 
4200 		// Everything is ready -- set the group.
4201 		remove_team_from_group(team);
4202 		insert_team_into_group(group, team);
4203 
4204 		// Changing the process group might have changed the situation for a
4205 		// parent waiting in wait_for_child(). Hence we notify it.
4206 		team->parent->dead_children.condition_variable.NotifyAll();
4207 
4208 		return group->id;
4209 	}
4210 }
4211 
4212 
4213 pid_t
4214 _user_setsid(void)
4215 {
4216 	Team* team = thread_get_current_thread()->team;
4217 
4218 	// create a new process group and session
4219 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4220 	if (group == NULL)
4221 		return B_NO_MEMORY;
4222 	BReference<ProcessGroup> groupReference(group, true);
4223 	AutoLocker<ProcessGroup> groupLocker(group);
4224 
4225 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4226 	if (session == NULL)
4227 		return B_NO_MEMORY;
4228 	BReference<ProcessSession> sessionReference(session, true);
4229 
4230 	// lock the team's current process group, parent, and the team itself
4231 	team->LockTeamParentAndProcessGroup();
4232 	BReference<ProcessGroup> oldGroupReference(team->group);
4233 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4234 	TeamLocker parentLocker(team->parent, true);
4235 	TeamLocker teamLocker(team, true);
4236 
4237 	// the team must not already be a process group leader
4238 	if (is_process_group_leader(team))
4239 		return B_NOT_ALLOWED;
4240 
4241 	// remove the team from the old and add it to the new process group
4242 	remove_team_from_group(team);
4243 	group->Publish(session);
4244 	insert_team_into_group(group, team);
4245 
4246 	// Changing the process group might have changed the situation for a
4247 	// parent waiting in wait_for_child(). Hence we notify it.
4248 	team->parent->dead_children.condition_variable.NotifyAll();
4249 
4250 	return group->id;
4251 }
4252 
4253 
4254 status_t
4255 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4256 {
4257 	status_t returnCode;
4258 	status_t status;
4259 
4260 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4261 		return B_BAD_ADDRESS;
4262 
4263 	status = wait_for_team(id, &returnCode);
4264 	if (status >= B_OK && _userReturnCode != NULL) {
4265 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4266 				!= B_OK)
4267 			return B_BAD_ADDRESS;
4268 		return B_OK;
4269 	}
4270 
4271 	return syscall_restart_handle_post(status);
4272 }
4273 
4274 
4275 thread_id
4276 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4277 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4278 	port_id errorPort, uint32 errorToken)
4279 {
4280 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4281 
4282 	if (argCount < 1)
4283 		return B_BAD_VALUE;
4284 
4285 	// copy and relocate the flat arguments
4286 	char** flatArgs;
4287 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4288 		argCount, envCount, flatArgs);
4289 	if (error != B_OK)
4290 		return error;
4291 
4292 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4293 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4294 		errorToken);
4295 
4296 	free(flatArgs);
4297 		// load_image_internal() unset our variable if it took over ownership
4298 
4299 	return thread;
4300 }
4301 
4302 
4303 void
4304 _user_exit_team(status_t returnValue)
4305 {
4306 	Thread* thread = thread_get_current_thread();
4307 	Team* team = thread->team;
4308 
4309 	// set this thread's exit status
4310 	thread->exit.status = returnValue;
4311 
4312 	// set the team exit status
4313 	TeamLocker teamLocker(team);
4314 
4315 	if (!team->exit.initialized) {
4316 		team->exit.reason = CLD_EXITED;
4317 		team->exit.signal = 0;
4318 		team->exit.signaling_user = 0;
4319 		team->exit.status = returnValue;
4320 		team->exit.initialized = true;
4321 	}
4322 
4323 	teamLocker.Unlock();
4324 
4325 	// Stop the thread, if the team is being debugged and that has been
4326 	// requested.
4327 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4328 		user_debug_stop_thread();
4329 
4330 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4331 	// userland. The signal handling code forwards the signal to the main
4332 	// thread (if that's not already this one), which will take the team down.
4333 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4334 	send_signal_to_thread(thread, signal, 0);
4335 }
4336 
4337 
4338 status_t
4339 _user_kill_team(team_id team)
4340 {
4341 	return kill_team(team);
4342 }
4343 
4344 
4345 status_t
4346 _user_get_team_info(team_id id, team_info* userInfo)
4347 {
4348 	status_t status;
4349 	team_info info;
4350 
4351 	if (!IS_USER_ADDRESS(userInfo))
4352 		return B_BAD_ADDRESS;
4353 
4354 	status = _get_team_info(id, &info, sizeof(team_info));
4355 	if (status == B_OK) {
4356 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4357 			return B_BAD_ADDRESS;
4358 	}
4359 
4360 	return status;
4361 }
4362 
4363 
4364 status_t
4365 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4366 {
4367 	status_t status;
4368 	team_info info;
4369 	int32 cookie;
4370 
4371 	if (!IS_USER_ADDRESS(userCookie)
4372 		|| !IS_USER_ADDRESS(userInfo)
4373 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4374 		return B_BAD_ADDRESS;
4375 
4376 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4377 	if (status != B_OK)
4378 		return status;
4379 
4380 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4381 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4382 		return B_BAD_ADDRESS;
4383 
4384 	return status;
4385 }
4386 
4387 
4388 team_id
4389 _user_get_current_team(void)
4390 {
4391 	return team_get_current_team_id();
4392 }
4393 
4394 
4395 status_t
4396 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4397 	size_t size)
4398 {
4399 	if (size != sizeof(team_usage_info))
4400 		return B_BAD_VALUE;
4401 
4402 	team_usage_info info;
4403 	status_t status = common_get_team_usage_info(team, who, &info,
4404 		B_CHECK_PERMISSION);
4405 
4406 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4407 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4408 		return B_BAD_ADDRESS;
4409 	}
4410 
4411 	return status;
4412 }
4413 
4414 
4415 status_t
4416 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4417 	size_t size, size_t* _sizeNeeded)
4418 {
4419 	// check parameters
4420 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4421 		|| (buffer == NULL && size > 0)
4422 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4423 		return B_BAD_ADDRESS;
4424 	}
4425 
4426 	KMessage info;
4427 
4428 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4429 		// allocate memory for a copy of the needed team data
4430 		struct ExtendedTeamData {
4431 			team_id	id;
4432 			pid_t	group_id;
4433 			pid_t	session_id;
4434 			uid_t	real_uid;
4435 			gid_t	real_gid;
4436 			uid_t	effective_uid;
4437 			gid_t	effective_gid;
4438 			char	name[B_OS_NAME_LENGTH];
4439 		} teamClone;
4440 
4441 		io_context* ioContext;
4442 		{
4443 			// get the team structure
4444 			Team* team = Team::GetAndLock(teamID);
4445 			if (team == NULL)
4446 				return B_BAD_TEAM_ID;
4447 			BReference<Team> teamReference(team, true);
4448 			TeamLocker teamLocker(team, true);
4449 
4450 			// copy the data
4451 			teamClone.id = team->id;
4452 			strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4453 			teamClone.group_id = team->group_id;
4454 			teamClone.session_id = team->session_id;
4455 			teamClone.real_uid = team->real_uid;
4456 			teamClone.real_gid = team->real_gid;
4457 			teamClone.effective_uid = team->effective_uid;
4458 			teamClone.effective_gid = team->effective_gid;
4459 
4460 			// also fetch a reference to the I/O context
4461 			ioContext = team->io_context;
4462 			vfs_get_io_context(ioContext);
4463 		}
4464 		CObjectDeleter<io_context, void, vfs_put_io_context>
4465 			ioContextPutter(ioContext);
4466 
4467 		// add the basic data to the info message
4468 		if (info.AddInt32("id", teamClone.id) != B_OK
4469 			|| info.AddString("name", teamClone.name) != B_OK
4470 			|| info.AddInt32("process group", teamClone.group_id) != B_OK
4471 			|| info.AddInt32("session", teamClone.session_id) != B_OK
4472 			|| info.AddInt32("uid", teamClone.real_uid) != B_OK
4473 			|| info.AddInt32("gid", teamClone.real_gid) != B_OK
4474 			|| info.AddInt32("euid", teamClone.effective_uid) != B_OK
4475 			|| info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4476 			return B_NO_MEMORY;
4477 		}
4478 
4479 		// get the current working directory from the I/O context
4480 		dev_t cwdDevice;
4481 		ino_t cwdDirectory;
4482 		{
4483 			MutexLocker ioContextLocker(ioContext->io_mutex);
4484 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4485 		}
4486 
4487 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4488 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4489 			return B_NO_MEMORY;
4490 		}
4491 	}
4492 
4493 	// TODO: Support the other flags!
4494 
4495 	// copy the needed size and, if it fits, the message back to userland
4496 	size_t sizeNeeded = info.ContentSize();
4497 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4498 		return B_BAD_ADDRESS;
4499 
4500 	if (sizeNeeded > size)
4501 		return B_BUFFER_OVERFLOW;
4502 
4503 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4504 		return B_BAD_ADDRESS;
4505 
4506 	return B_OK;
4507 }
4508