xref: /haiku/src/system/kernel/team.cpp (revision 8d2bf6953e851d431fc67de1bc970c40afa79e9f)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <commpage.h>
30 #include <boot_device.h>
31 #include <elf.h>
32 #include <file_cache.h>
33 #include <fs/KPath.h>
34 #include <heap.h>
35 #include <int.h>
36 #include <kernel.h>
37 #include <kimage.h>
38 #include <kscheduler.h>
39 #include <ksignal.h>
40 #include <Notifications.h>
41 #include <port.h>
42 #include <posix/realtime_sem.h>
43 #include <posix/xsi_semaphore.h>
44 #include <sem.h>
45 #include <syscall_process_info.h>
46 #include <syscall_restart.h>
47 #include <syscalls.h>
48 #include <tls.h>
49 #include <tracing.h>
50 #include <user_runtime.h>
51 #include <user_thread.h>
52 #include <usergroup.h>
53 #include <vfs.h>
54 #include <vm/vm.h>
55 #include <vm/VMAddressSpace.h>
56 #include <util/AutoLock.h>
57 
58 #include "TeamThreadTables.h"
59 
60 
61 //#define TRACE_TEAM
62 #ifdef TRACE_TEAM
63 #	define TRACE(x) dprintf x
64 #else
65 #	define TRACE(x) ;
66 #endif
67 
68 
69 struct team_key {
70 	team_id id;
71 };
72 
73 struct team_arg {
74 	char	*path;
75 	char	**flat_args;
76 	size_t	flat_args_size;
77 	uint32	arg_count;
78 	uint32	env_count;
79 	mode_t	umask;
80 	port_id	error_port;
81 	uint32	error_token;
82 };
83 
84 
85 namespace {
86 
87 
88 class TeamNotificationService : public DefaultNotificationService {
89 public:
90 							TeamNotificationService();
91 
92 			void			Notify(uint32 eventCode, Team* team);
93 };
94 
95 
96 // #pragma mark - TeamTable
97 
98 
99 typedef BKernel::TeamThreadTable<Team> TeamTable;
100 
101 
102 // #pragma mark - ProcessGroupHashDefinition
103 
104 
105 struct ProcessGroupHashDefinition {
106 	typedef pid_t			KeyType;
107 	typedef	ProcessGroup	ValueType;
108 
109 	size_t HashKey(pid_t key) const
110 	{
111 		return key;
112 	}
113 
114 	size_t Hash(ProcessGroup* value) const
115 	{
116 		return HashKey(value->id);
117 	}
118 
119 	bool Compare(pid_t key, ProcessGroup* value) const
120 	{
121 		return value->id == key;
122 	}
123 
124 	ProcessGroup*& GetLink(ProcessGroup* value) const
125 	{
126 		return value->next;
127 	}
128 };
129 
130 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
131 
132 
133 }	// unnamed namespace
134 
135 
136 // #pragma mark -
137 
138 
139 // the team_id -> Team hash table and the lock protecting it
140 static TeamTable sTeamHash;
141 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
142 
143 // the pid_t -> ProcessGroup hash table and the lock protecting it
144 static ProcessGroupHashTable sGroupHash;
145 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
146 
147 static Team* sKernelTeam = NULL;
148 
149 // A list of process groups of children of dying session leaders that need to
150 // be signalled, if they have become orphaned and contain stopped processes.
151 static ProcessGroupList sOrphanedCheckProcessGroups;
152 static mutex sOrphanedCheckLock
153 	= MUTEX_INITIALIZER("orphaned process group check");
154 
155 // some arbitrarily chosen limits -- should probably depend on the available
156 // memory (the limit is not yet enforced)
157 static int32 sMaxTeams = 2048;
158 static int32 sUsedTeams = 1;
159 
160 static TeamNotificationService sNotificationService;
161 
162 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
163 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
164 
165 
166 // #pragma mark - TeamListIterator
167 
168 
169 TeamListIterator::TeamListIterator()
170 {
171 	// queue the entry
172 	InterruptsSpinLocker locker(sTeamHashLock);
173 	sTeamHash.InsertIteratorEntry(&fEntry);
174 }
175 
176 
177 TeamListIterator::~TeamListIterator()
178 {
179 	// remove the entry
180 	InterruptsSpinLocker locker(sTeamHashLock);
181 	sTeamHash.RemoveIteratorEntry(&fEntry);
182 }
183 
184 
185 Team*
186 TeamListIterator::Next()
187 {
188 	// get the next team -- if there is one, get reference for it
189 	InterruptsSpinLocker locker(sTeamHashLock);
190 	Team* team = sTeamHash.NextElement(&fEntry);
191 	if (team != NULL)
192 		team->AcquireReference();
193 
194 	return team;
195 }
196 
197 
198 // #pragma mark - Tracing
199 
200 
201 #if TEAM_TRACING
202 namespace TeamTracing {
203 
204 class TeamForked : public AbstractTraceEntry {
205 public:
206 	TeamForked(thread_id forkedThread)
207 		:
208 		fForkedThread(forkedThread)
209 	{
210 		Initialized();
211 	}
212 
213 	virtual void AddDump(TraceOutput& out)
214 	{
215 		out.Print("team forked, new thread %ld", fForkedThread);
216 	}
217 
218 private:
219 	thread_id			fForkedThread;
220 };
221 
222 
223 class ExecTeam : public AbstractTraceEntry {
224 public:
225 	ExecTeam(const char* path, int32 argCount, const char* const* args,
226 			int32 envCount, const char* const* env)
227 		:
228 		fArgCount(argCount),
229 		fArgs(NULL)
230 	{
231 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
232 			false);
233 
234 		// determine the buffer size we need for the args
235 		size_t argBufferSize = 0;
236 		for (int32 i = 0; i < argCount; i++)
237 			argBufferSize += strlen(args[i]) + 1;
238 
239 		// allocate a buffer
240 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
241 		if (fArgs) {
242 			char* buffer = fArgs;
243 			for (int32 i = 0; i < argCount; i++) {
244 				size_t argSize = strlen(args[i]) + 1;
245 				memcpy(buffer, args[i], argSize);
246 				buffer += argSize;
247 			}
248 		}
249 
250 		// ignore env for the time being
251 		(void)envCount;
252 		(void)env;
253 
254 		Initialized();
255 	}
256 
257 	virtual void AddDump(TraceOutput& out)
258 	{
259 		out.Print("team exec, \"%p\", args:", fPath);
260 
261 		if (fArgs != NULL) {
262 			char* args = fArgs;
263 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
264 				out.Print(" \"%s\"", args);
265 				args += strlen(args) + 1;
266 			}
267 		} else
268 			out.Print(" <too long>");
269 	}
270 
271 private:
272 	char*	fPath;
273 	int32	fArgCount;
274 	char*	fArgs;
275 };
276 
277 
278 static const char*
279 job_control_state_name(job_control_state state)
280 {
281 	switch (state) {
282 		case JOB_CONTROL_STATE_NONE:
283 			return "none";
284 		case JOB_CONTROL_STATE_STOPPED:
285 			return "stopped";
286 		case JOB_CONTROL_STATE_CONTINUED:
287 			return "continued";
288 		case JOB_CONTROL_STATE_DEAD:
289 			return "dead";
290 		default:
291 			return "invalid";
292 	}
293 }
294 
295 
296 class SetJobControlState : public AbstractTraceEntry {
297 public:
298 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
299 		:
300 		fTeam(team),
301 		fNewState(newState),
302 		fSignal(signal != NULL ? signal->Number() : 0)
303 	{
304 		Initialized();
305 	}
306 
307 	virtual void AddDump(TraceOutput& out)
308 	{
309 		out.Print("team set job control state, team %ld, "
310 			"new state: %s, signal: %d",
311 			fTeam, job_control_state_name(fNewState), fSignal);
312 	}
313 
314 private:
315 	team_id				fTeam;
316 	job_control_state	fNewState;
317 	int					fSignal;
318 };
319 
320 
321 class WaitForChild : public AbstractTraceEntry {
322 public:
323 	WaitForChild(pid_t child, uint32 flags)
324 		:
325 		fChild(child),
326 		fFlags(flags)
327 	{
328 		Initialized();
329 	}
330 
331 	virtual void AddDump(TraceOutput& out)
332 	{
333 		out.Print("team wait for child, child: %ld, "
334 			"flags: 0x%lx", fChild, fFlags);
335 	}
336 
337 private:
338 	pid_t	fChild;
339 	uint32	fFlags;
340 };
341 
342 
343 class WaitForChildDone : public AbstractTraceEntry {
344 public:
345 	WaitForChildDone(const job_control_entry& entry)
346 		:
347 		fState(entry.state),
348 		fTeam(entry.thread),
349 		fStatus(entry.status),
350 		fReason(entry.reason),
351 		fSignal(entry.signal)
352 	{
353 		Initialized();
354 	}
355 
356 	WaitForChildDone(status_t error)
357 		:
358 		fTeam(error)
359 	{
360 		Initialized();
361 	}
362 
363 	virtual void AddDump(TraceOutput& out)
364 	{
365 		if (fTeam >= 0) {
366 			out.Print("team wait for child done, team: %ld, "
367 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
368 				fTeam, job_control_state_name(fState), fStatus, fReason,
369 				fSignal);
370 		} else {
371 			out.Print("team wait for child failed, error: "
372 				"0x%lx, ", fTeam);
373 		}
374 	}
375 
376 private:
377 	job_control_state	fState;
378 	team_id				fTeam;
379 	status_t			fStatus;
380 	uint16				fReason;
381 	uint16				fSignal;
382 };
383 
384 }	// namespace TeamTracing
385 
386 #	define T(x) new(std::nothrow) TeamTracing::x;
387 #else
388 #	define T(x) ;
389 #endif
390 
391 
392 //	#pragma mark - TeamNotificationService
393 
394 
395 TeamNotificationService::TeamNotificationService()
396 	: DefaultNotificationService("teams")
397 {
398 }
399 
400 
401 void
402 TeamNotificationService::Notify(uint32 eventCode, Team* team)
403 {
404 	char eventBuffer[128];
405 	KMessage event;
406 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
407 	event.AddInt32("event", eventCode);
408 	event.AddInt32("team", team->id);
409 	event.AddPointer("teamStruct", team);
410 
411 	DefaultNotificationService::Notify(event, eventCode);
412 }
413 
414 
415 //	#pragma mark - Team
416 
417 
418 Team::Team(team_id id, bool kernel)
419 {
420 	// allocate an ID
421 	this->id = id;
422 	visible = true;
423 	serial_number = -1;
424 
425 	// init mutex
426 	if (kernel) {
427 		mutex_init(&fLock, "Team:kernel");
428 	} else {
429 		char lockName[16];
430 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
431 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
432 	}
433 
434 	hash_next = siblings_next = children = parent = NULL;
435 	fName[0] = '\0';
436 	fArgs[0] = '\0';
437 	num_threads = 0;
438 	io_context = NULL;
439 	address_space = NULL;
440 	realtime_sem_context = NULL;
441 	xsi_sem_context = NULL;
442 	thread_list = NULL;
443 	main_thread = NULL;
444 	loading_info = NULL;
445 	state = TEAM_STATE_BIRTH;
446 	flags = 0;
447 	death_entry = NULL;
448 	user_data_area = -1;
449 	user_data = 0;
450 	used_user_data = 0;
451 	user_data_size = 0;
452 	free_user_threads = NULL;
453 
454 	commpage_address = NULL;
455 
456 	supplementary_groups = NULL;
457 	supplementary_group_count = 0;
458 
459 	dead_threads_kernel_time = 0;
460 	dead_threads_user_time = 0;
461 	cpu_clock_offset = 0;
462 
463 	// dead threads
464 	list_init(&dead_threads);
465 	dead_threads_count = 0;
466 
467 	// dead children
468 	dead_children.count = 0;
469 	dead_children.kernel_time = 0;
470 	dead_children.user_time = 0;
471 
472 	// job control entry
473 	job_control_entry = new(nothrow) ::job_control_entry;
474 	if (job_control_entry != NULL) {
475 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
476 		job_control_entry->thread = id;
477 		job_control_entry->team = this;
478 	}
479 
480 	// exit status -- setting initialized to false suffices
481 	exit.initialized = false;
482 
483 	list_init(&sem_list);
484 	list_init(&port_list);
485 	list_init(&image_list);
486 	list_init(&watcher_list);
487 
488 	clear_team_debug_info(&debug_info, true);
489 
490 	// init dead/stopped/continued children condition vars
491 	dead_children.condition_variable.Init(&dead_children, "team children");
492 
493 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
494 		kernel ? -1 : MAX_QUEUED_SIGNALS);
495 	memset(fSignalActions, 0, sizeof(fSignalActions));
496 
497 	fUserDefinedTimerCount = 0;
498 }
499 
500 
501 Team::~Team()
502 {
503 	// get rid of all associated data
504 	PrepareForDeletion();
505 
506 	if (io_context != NULL)
507 		vfs_put_io_context(io_context);
508 	delete_owned_ports(this);
509 	sem_delete_owned_sems(this);
510 
511 	DeleteUserTimers(false);
512 
513 	fPendingSignals.Clear();
514 
515 	if (fQueuedSignalsCounter != NULL)
516 		fQueuedSignalsCounter->ReleaseReference();
517 
518 	while (thread_death_entry* threadDeathEntry
519 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
520 		free(threadDeathEntry);
521 	}
522 
523 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
524 		delete entry;
525 
526 	while (free_user_thread* entry = free_user_threads) {
527 		free_user_threads = entry->next;
528 		free(entry);
529 	}
530 
531 	malloc_referenced_release(supplementary_groups);
532 
533 	delete job_control_entry;
534 		// usually already NULL and transferred to the parent
535 
536 	mutex_destroy(&fLock);
537 }
538 
539 
540 /*static*/ Team*
541 Team::Create(team_id id, const char* name, bool kernel)
542 {
543 	// create the team object
544 	Team* team = new(std::nothrow) Team(id, kernel);
545 	if (team == NULL)
546 		return NULL;
547 	ObjectDeleter<Team> teamDeleter(team);
548 
549 	if (name != NULL)
550 		team->SetName(name);
551 
552 	// check initialization
553 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
554 		return NULL;
555 
556 	// finish initialization (arch specifics)
557 	if (arch_team_init_team_struct(team, kernel) != B_OK)
558 		return NULL;
559 
560 	if (!kernel) {
561 		status_t error = user_timer_create_team_timers(team);
562 		if (error != B_OK)
563 			return NULL;
564 	}
565 
566 	// everything went fine
567 	return teamDeleter.Detach();
568 }
569 
570 
571 /*!	\brief Returns the team with the given ID.
572 	Returns a reference to the team.
573 	Team and thread spinlock must not be held.
574 */
575 /*static*/ Team*
576 Team::Get(team_id id)
577 {
578 	if (id == B_CURRENT_TEAM) {
579 		Team* team = thread_get_current_thread()->team;
580 		team->AcquireReference();
581 		return team;
582 	}
583 
584 	InterruptsSpinLocker locker(sTeamHashLock);
585 	Team* team = sTeamHash.Lookup(id);
586 	if (team != NULL)
587 		team->AcquireReference();
588 	return team;
589 }
590 
591 
592 /*!	\brief Returns the team with the given ID in a locked state.
593 	Returns a reference to the team.
594 	Team and thread spinlock must not be held.
595 */
596 /*static*/ Team*
597 Team::GetAndLock(team_id id)
598 {
599 	// get the team
600 	Team* team = Get(id);
601 	if (team == NULL)
602 		return NULL;
603 
604 	// lock it
605 	team->Lock();
606 
607 	// only return the team, when it isn't already dying
608 	if (team->state >= TEAM_STATE_SHUTDOWN) {
609 		team->Unlock();
610 		team->ReleaseReference();
611 		return NULL;
612 	}
613 
614 	return team;
615 }
616 
617 
618 /*!	Locks the team and its parent team (if any).
619 	The caller must hold a reference to the team or otherwise make sure that
620 	it won't be deleted.
621 	If the team doesn't have a parent, only the team itself is locked. If the
622 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
623 	only the team itself is locked.
624 
625 	\param dontLockParentIfKernel If \c true, the team's parent team is only
626 		locked, if it is not the kernel team.
627 */
628 void
629 Team::LockTeamAndParent(bool dontLockParentIfKernel)
630 {
631 	// The locking order is parent -> child. Since the parent can change as long
632 	// as we don't lock the team, we need to do a trial and error loop.
633 	Lock();
634 
635 	while (true) {
636 		// If the team doesn't have a parent, we're done. Otherwise try to lock
637 		// the parent.This will succeed in most cases, simplifying things.
638 		Team* parent = this->parent;
639 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
640 			|| parent->TryLock()) {
641 			return;
642 		}
643 
644 		// get a temporary reference to the parent, unlock this team, lock the
645 		// parent, and re-lock this team
646 		BReference<Team> parentReference(parent);
647 
648 		Unlock();
649 		parent->Lock();
650 		Lock();
651 
652 		// If the parent hasn't changed in the meantime, we're done.
653 		if (this->parent == parent)
654 			return;
655 
656 		// The parent has changed -- unlock and retry.
657 		parent->Unlock();
658 	}
659 }
660 
661 
662 /*!	Unlocks the team and its parent team (if any).
663 */
664 void
665 Team::UnlockTeamAndParent()
666 {
667 	if (parent != NULL)
668 		parent->Unlock();
669 
670 	Unlock();
671 }
672 
673 
674 /*!	Locks the team, its parent team (if any), and the team's process group.
675 	The caller must hold a reference to the team or otherwise make sure that
676 	it won't be deleted.
677 	If the team doesn't have a parent, only the team itself is locked.
678 */
679 void
680 Team::LockTeamParentAndProcessGroup()
681 {
682 	LockTeamAndProcessGroup();
683 
684 	// We hold the group's and the team's lock, but not the parent team's lock.
685 	// If we have a parent, try to lock it.
686 	if (this->parent == NULL || this->parent->TryLock())
687 		return;
688 
689 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
690 	// the job.
691 	Unlock();
692 	LockTeamAndParent(false);
693 }
694 
695 
696 /*!	Unlocks the team, its parent team (if any), and the team's process group.
697 */
698 void
699 Team::UnlockTeamParentAndProcessGroup()
700 {
701 	group->Unlock();
702 
703 	if (parent != NULL)
704 		parent->Unlock();
705 
706 	Unlock();
707 }
708 
709 
710 void
711 Team::LockTeamAndProcessGroup()
712 {
713 	// The locking order is process group -> child. Since the process group can
714 	// change as long as we don't lock the team, we need to do a trial and error
715 	// loop.
716 	Lock();
717 
718 	while (true) {
719 		// Try to lock the group. This will succeed in most cases, simplifying
720 		// things.
721 		ProcessGroup* group = this->group;
722 		if (group->TryLock())
723 			return;
724 
725 		// get a temporary reference to the group, unlock this team, lock the
726 		// group, and re-lock this team
727 		BReference<ProcessGroup> groupReference(group);
728 
729 		Unlock();
730 		group->Lock();
731 		Lock();
732 
733 		// If the group hasn't changed in the meantime, we're done.
734 		if (this->group == group)
735 			return;
736 
737 		// The group has changed -- unlock and retry.
738 		group->Unlock();
739 	}
740 }
741 
742 
743 void
744 Team::UnlockTeamAndProcessGroup()
745 {
746 	group->Unlock();
747 	Unlock();
748 }
749 
750 
751 void
752 Team::SetName(const char* name)
753 {
754 	if (const char* lastSlash = strrchr(name, '/'))
755 		name = lastSlash + 1;
756 
757 	strlcpy(fName, name, B_OS_NAME_LENGTH);
758 }
759 
760 
761 void
762 Team::SetArgs(const char* args)
763 {
764 	strlcpy(fArgs, args, sizeof(fArgs));
765 }
766 
767 
768 void
769 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
770 {
771 	fArgs[0] = '\0';
772 	strlcpy(fArgs, path, sizeof(fArgs));
773 	for (int i = 0; i < otherArgCount; i++) {
774 		strlcat(fArgs, " ", sizeof(fArgs));
775 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
776 	}
777 }
778 
779 
780 void
781 Team::ResetSignalsOnExec()
782 {
783 	// We are supposed to keep pending signals. Signal actions shall be reset
784 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
785 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
786 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
787 	// flags, but since there aren't any handlers, they make little sense, so
788 	// we clear them.
789 
790 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
791 		struct sigaction& action = SignalActionFor(i);
792 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
793 			action.sa_handler = SIG_DFL;
794 
795 		action.sa_mask = 0;
796 		action.sa_flags = 0;
797 		action.sa_userdata = NULL;
798 	}
799 }
800 
801 
802 void
803 Team::InheritSignalActions(Team* parent)
804 {
805 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
806 }
807 
808 
809 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
810 	ID.
811 
812 	The caller must hold the team's lock.
813 
814 	\param timer The timer to be added. If it doesn't have an ID yet, it is
815 		considered user-defined and will be assigned an ID.
816 	\return \c B_OK, if the timer was added successfully, another error code
817 		otherwise.
818 */
819 status_t
820 Team::AddUserTimer(UserTimer* timer)
821 {
822 	// don't allow addition of timers when already shutting the team down
823 	if (state >= TEAM_STATE_SHUTDOWN)
824 		return B_BAD_TEAM_ID;
825 
826 	// If the timer is user-defined, check timer limit and increment
827 	// user-defined count.
828 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
829 		return EAGAIN;
830 
831 	fUserTimers.AddTimer(timer);
832 
833 	return B_OK;
834 }
835 
836 
837 /*!	Removes the given user timer from the team.
838 
839 	The caller must hold the team's lock.
840 
841 	\param timer The timer to be removed.
842 
843 */
844 void
845 Team::RemoveUserTimer(UserTimer* timer)
846 {
847 	fUserTimers.RemoveTimer(timer);
848 
849 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
850 		UserDefinedTimersRemoved(1);
851 }
852 
853 
854 /*!	Deletes all (or all user-defined) user timers of the team.
855 
856 	Timer's belonging to the team's threads are not affected.
857 	The caller must hold the team's lock.
858 
859 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
860 		otherwise all timers are deleted.
861 */
862 void
863 Team::DeleteUserTimers(bool userDefinedOnly)
864 {
865 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
866 	UserDefinedTimersRemoved(count);
867 }
868 
869 
870 /*!	If not at the limit yet, increments the team's user-defined timer count.
871 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
872 */
873 bool
874 Team::CheckAddUserDefinedTimer()
875 {
876 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
877 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
878 		atomic_add(&fUserDefinedTimerCount, -1);
879 		return false;
880 	}
881 
882 	return true;
883 }
884 
885 
886 /*!	Subtracts the given count for the team's user-defined timer count.
887 	\param count The count to subtract.
888 */
889 void
890 Team::UserDefinedTimersRemoved(int32 count)
891 {
892 	atomic_add(&fUserDefinedTimerCount, -count);
893 }
894 
895 
896 void
897 Team::DeactivateCPUTimeUserTimers()
898 {
899 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
900 		timer->Deactivate();
901 
902 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
903 		timer->Deactivate();
904 }
905 
906 
907 /*!	Returns the team's current total CPU time (kernel + user + offset).
908 
909 	The caller must hold the scheduler lock.
910 
911 	\param ignoreCurrentRun If \c true and the current thread is one team's
912 		threads, don't add the time since the last time \c last_time was
913 		updated. Should be used in "thread unscheduled" scheduler callbacks,
914 		since although the thread is still running at that time, its time has
915 		already been stopped.
916 	\return The team's current total CPU time.
917 */
918 bigtime_t
919 Team::CPUTime(bool ignoreCurrentRun) const
920 {
921 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
922 		+ dead_threads_user_time;
923 
924 	Thread* currentThread = thread_get_current_thread();
925 	bigtime_t now = system_time();
926 
927 	for (Thread* thread = thread_list; thread != NULL;
928 			thread = thread->team_next) {
929 		SpinLocker threadTimeLocker(thread->time_lock);
930 		time += thread->kernel_time + thread->user_time;
931 
932 		if (thread->IsRunning()) {
933 			if (!ignoreCurrentRun || thread != currentThread)
934 				time += now - thread->last_time;
935 		}
936 	}
937 
938 	return time;
939 }
940 
941 
942 /*!	Returns the team's current user CPU time.
943 
944 	The caller must hold the scheduler lock.
945 
946 	\return The team's current user CPU time.
947 */
948 bigtime_t
949 Team::UserCPUTime() const
950 {
951 	bigtime_t time = dead_threads_user_time;
952 
953 	bigtime_t now = system_time();
954 
955 	for (Thread* thread = thread_list; thread != NULL;
956 			thread = thread->team_next) {
957 		SpinLocker threadTimeLocker(thread->time_lock);
958 		time += thread->user_time;
959 
960 		if (thread->IsRunning() && !thread->in_kernel)
961 			time += now - thread->last_time;
962 	}
963 
964 	return time;
965 }
966 
967 
968 //	#pragma mark - ProcessGroup
969 
970 
971 ProcessGroup::ProcessGroup(pid_t id)
972 	:
973 	id(id),
974 	teams(NULL),
975 	fSession(NULL),
976 	fInOrphanedCheckList(false)
977 {
978 	char lockName[32];
979 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
980 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
981 }
982 
983 
984 ProcessGroup::~ProcessGroup()
985 {
986 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
987 
988 	// If the group is in the orphaned check list, remove it.
989 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
990 
991 	if (fInOrphanedCheckList)
992 		sOrphanedCheckProcessGroups.Remove(this);
993 
994 	orphanedCheckLocker.Unlock();
995 
996 	// remove group from the hash table and from the session
997 	if (fSession != NULL) {
998 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
999 		sGroupHash.RemoveUnchecked(this);
1000 		groupHashLocker.Unlock();
1001 
1002 		fSession->ReleaseReference();
1003 	}
1004 
1005 	mutex_destroy(&fLock);
1006 }
1007 
1008 
1009 /*static*/ ProcessGroup*
1010 ProcessGroup::Get(pid_t id)
1011 {
1012 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1013 	ProcessGroup* group = sGroupHash.Lookup(id);
1014 	if (group != NULL)
1015 		group->AcquireReference();
1016 	return group;
1017 }
1018 
1019 
1020 /*!	Adds the group the given session and makes it publicly accessible.
1021 	The caller must not hold the process group hash lock.
1022 */
1023 void
1024 ProcessGroup::Publish(ProcessSession* session)
1025 {
1026 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1027 	PublishLocked(session);
1028 }
1029 
1030 
1031 /*!	Adds the group to the given session and makes it publicly accessible.
1032 	The caller must hold the process group hash lock.
1033 */
1034 void
1035 ProcessGroup::PublishLocked(ProcessSession* session)
1036 {
1037 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1038 
1039 	fSession = session;
1040 	fSession->AcquireReference();
1041 
1042 	sGroupHash.InsertUnchecked(this);
1043 }
1044 
1045 
1046 /*!	Checks whether the process group is orphaned.
1047 	The caller must hold the group's lock.
1048 	\return \c true, if the group is orphaned, \c false otherwise.
1049 */
1050 bool
1051 ProcessGroup::IsOrphaned() const
1052 {
1053 	// Orphaned Process Group: "A process group in which the parent of every
1054 	// member is either itself a member of the group or is not a member of the
1055 	// group's session." (Open Group Base Specs Issue 7)
1056 	bool orphaned = true;
1057 
1058 	Team* team = teams;
1059 	while (orphaned && team != NULL) {
1060 		team->LockTeamAndParent(false);
1061 
1062 		Team* parent = team->parent;
1063 		if (parent != NULL && parent->group_id != id
1064 			&& parent->session_id == fSession->id) {
1065 			orphaned = false;
1066 		}
1067 
1068 		team->UnlockTeamAndParent();
1069 
1070 		team = team->group_next;
1071 	}
1072 
1073 	return orphaned;
1074 }
1075 
1076 
1077 void
1078 ProcessGroup::ScheduleOrphanedCheck()
1079 {
1080 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1081 
1082 	if (!fInOrphanedCheckList) {
1083 		sOrphanedCheckProcessGroups.Add(this);
1084 		fInOrphanedCheckList = true;
1085 	}
1086 }
1087 
1088 
1089 void
1090 ProcessGroup::UnsetOrphanedCheck()
1091 {
1092 	fInOrphanedCheckList = false;
1093 }
1094 
1095 
1096 //	#pragma mark - ProcessSession
1097 
1098 
1099 ProcessSession::ProcessSession(pid_t id)
1100 	:
1101 	id(id),
1102 	controlling_tty(-1),
1103 	foreground_group(-1)
1104 {
1105 	char lockName[32];
1106 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1107 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1108 }
1109 
1110 
1111 ProcessSession::~ProcessSession()
1112 {
1113 	mutex_destroy(&fLock);
1114 }
1115 
1116 
1117 //	#pragma mark - KDL functions
1118 
1119 
1120 static void
1121 _dump_team_info(Team* team)
1122 {
1123 	kprintf("TEAM: %p\n", team);
1124 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1125 		team->id);
1126 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1127 	kprintf("name:             '%s'\n", team->Name());
1128 	kprintf("args:             '%s'\n", team->Args());
1129 	kprintf("hash_next:        %p\n", team->hash_next);
1130 	kprintf("parent:           %p", team->parent);
1131 	if (team->parent != NULL) {
1132 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1133 	} else
1134 		kprintf("\n");
1135 
1136 	kprintf("children:         %p\n", team->children);
1137 	kprintf("num_threads:      %d\n", team->num_threads);
1138 	kprintf("state:            %d\n", team->state);
1139 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1140 	kprintf("io_context:       %p\n", team->io_context);
1141 	if (team->address_space)
1142 		kprintf("address_space:    %p\n", team->address_space);
1143 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1144 		(void*)team->user_data, team->user_data_area);
1145 	kprintf("free user thread: %p\n", team->free_user_threads);
1146 	kprintf("main_thread:      %p\n", team->main_thread);
1147 	kprintf("thread_list:      %p\n", team->thread_list);
1148 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1149 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1150 }
1151 
1152 
1153 static int
1154 dump_team_info(int argc, char** argv)
1155 {
1156 	ulong arg;
1157 	bool found = false;
1158 
1159 	if (argc < 2) {
1160 		Thread* thread = thread_get_current_thread();
1161 		if (thread != NULL && thread->team != NULL)
1162 			_dump_team_info(thread->team);
1163 		else
1164 			kprintf("No current team!\n");
1165 		return 0;
1166 	}
1167 
1168 	arg = strtoul(argv[1], NULL, 0);
1169 	if (IS_KERNEL_ADDRESS(arg)) {
1170 		// semi-hack
1171 		_dump_team_info((Team*)arg);
1172 		return 0;
1173 	}
1174 
1175 	// walk through the thread list, trying to match name or id
1176 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1177 		Team* team = it.Next();) {
1178 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1179 			|| team->id == (team_id)arg) {
1180 			_dump_team_info(team);
1181 			found = true;
1182 			break;
1183 		}
1184 	}
1185 
1186 	if (!found)
1187 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1188 	return 0;
1189 }
1190 
1191 
1192 static int
1193 dump_teams(int argc, char** argv)
1194 {
1195 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1196 		B_PRINTF_POINTER_WIDTH, "parent");
1197 
1198 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1199 		Team* team = it.Next();) {
1200 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1201 	}
1202 
1203 	return 0;
1204 }
1205 
1206 
1207 //	#pragma mark - Private functions
1208 
1209 
1210 /*!	Inserts team \a team into the child list of team \a parent.
1211 
1212 	The caller must hold the lock of both \a parent and \a team.
1213 
1214 	\param parent The parent team.
1215 	\param team The team to be inserted into \a parent's child list.
1216 */
1217 static void
1218 insert_team_into_parent(Team* parent, Team* team)
1219 {
1220 	ASSERT(parent != NULL);
1221 
1222 	team->siblings_next = parent->children;
1223 	parent->children = team;
1224 	team->parent = parent;
1225 }
1226 
1227 
1228 /*!	Removes team \a team from the child list of team \a parent.
1229 
1230 	The caller must hold the lock of both \a parent and \a team.
1231 
1232 	\param parent The parent team.
1233 	\param team The team to be removed from \a parent's child list.
1234 */
1235 static void
1236 remove_team_from_parent(Team* parent, Team* team)
1237 {
1238 	Team* child;
1239 	Team* last = NULL;
1240 
1241 	for (child = parent->children; child != NULL;
1242 			child = child->siblings_next) {
1243 		if (child == team) {
1244 			if (last == NULL)
1245 				parent->children = child->siblings_next;
1246 			else
1247 				last->siblings_next = child->siblings_next;
1248 
1249 			team->parent = NULL;
1250 			break;
1251 		}
1252 		last = child;
1253 	}
1254 }
1255 
1256 
1257 /*!	Returns whether the given team is a session leader.
1258 	The caller must hold the team's lock or its process group's lock.
1259 */
1260 static bool
1261 is_session_leader(Team* team)
1262 {
1263 	return team->session_id == team->id;
1264 }
1265 
1266 
1267 /*!	Returns whether the given team is a process group leader.
1268 	The caller must hold the team's lock or its process group's lock.
1269 */
1270 static bool
1271 is_process_group_leader(Team* team)
1272 {
1273 	return team->group_id == team->id;
1274 }
1275 
1276 
1277 /*!	Inserts the given team into the given process group.
1278 	The caller must hold the process group's lock, the team's lock, and the
1279 	team's parent's lock.
1280 */
1281 static void
1282 insert_team_into_group(ProcessGroup* group, Team* team)
1283 {
1284 	team->group = group;
1285 	team->group_id = group->id;
1286 	team->session_id = group->Session()->id;
1287 
1288 	team->group_next = group->teams;
1289 	group->teams = team;
1290 	group->AcquireReference();
1291 }
1292 
1293 
1294 /*!	Removes the given team from its process group.
1295 
1296 	The caller must hold the process group's lock, the team's lock, and the
1297 	team's parent's lock. Interrupts must be enabled.
1298 
1299 	\param team The team that'll be removed from its process group.
1300 */
1301 static void
1302 remove_team_from_group(Team* team)
1303 {
1304 	ProcessGroup* group = team->group;
1305 	Team* current;
1306 	Team* last = NULL;
1307 
1308 	// the team must be in a process group to let this function have any effect
1309 	if  (group == NULL)
1310 		return;
1311 
1312 	for (current = group->teams; current != NULL;
1313 			current = current->group_next) {
1314 		if (current == team) {
1315 			if (last == NULL)
1316 				group->teams = current->group_next;
1317 			else
1318 				last->group_next = current->group_next;
1319 
1320 			team->group = NULL;
1321 			break;
1322 		}
1323 		last = current;
1324 	}
1325 
1326 	team->group = NULL;
1327 	team->group_next = NULL;
1328 
1329 	group->ReleaseReference();
1330 }
1331 
1332 
1333 static status_t
1334 create_team_user_data(Team* team, void* exactAddress = NULL)
1335 {
1336 	void* address;
1337 	uint32 addressSpec;
1338 
1339 	if (exactAddress != NULL) {
1340 		address = exactAddress;
1341 		addressSpec = B_EXACT_ADDRESS;
1342 	} else {
1343 		address = (void*)KERNEL_USER_DATA_BASE;
1344 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1345 	}
1346 
1347 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1348 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1349 
1350 	virtual_address_restrictions virtualRestrictions = {};
1351 	if (result == B_OK || exactAddress != NULL) {
1352 		if (exactAddress != NULL)
1353 			virtualRestrictions.address = exactAddress;
1354 		else
1355 			virtualRestrictions.address = address;
1356 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1357 	} else {
1358 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1359 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1360 	}
1361 
1362 	physical_address_restrictions physicalRestrictions = {};
1363 	team->user_data_area = create_area_etc(team->id, "user area",
1364 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1365 		&virtualRestrictions, &physicalRestrictions, &address);
1366 	if (team->user_data_area < 0)
1367 		return team->user_data_area;
1368 
1369 	team->user_data = (addr_t)address;
1370 	team->used_user_data = 0;
1371 	team->user_data_size = kTeamUserDataInitialSize;
1372 	team->free_user_threads = NULL;
1373 
1374 	return B_OK;
1375 }
1376 
1377 
1378 static void
1379 delete_team_user_data(Team* team)
1380 {
1381 	if (team->user_data_area >= 0) {
1382 		vm_delete_area(team->id, team->user_data_area, true);
1383 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1384 			kTeamUserDataReservedSize);
1385 
1386 		team->user_data = 0;
1387 		team->used_user_data = 0;
1388 		team->user_data_size = 0;
1389 		team->user_data_area = -1;
1390 		while (free_user_thread* entry = team->free_user_threads) {
1391 			team->free_user_threads = entry->next;
1392 			free(entry);
1393 		}
1394 	}
1395 }
1396 
1397 
1398 static status_t
1399 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1400 	int32 argCount, int32 envCount, char**& _flatArgs)
1401 {
1402 	if (argCount < 0 || envCount < 0)
1403 		return B_BAD_VALUE;
1404 
1405 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1406 		return B_TOO_MANY_ARGS;
1407 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1408 		return B_BAD_VALUE;
1409 
1410 	if (!IS_USER_ADDRESS(userFlatArgs))
1411 		return B_BAD_ADDRESS;
1412 
1413 	// allocate kernel memory
1414 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1415 	if (flatArgs == NULL)
1416 		return B_NO_MEMORY;
1417 
1418 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1419 		free(flatArgs);
1420 		return B_BAD_ADDRESS;
1421 	}
1422 
1423 	// check and relocate the array
1424 	status_t error = B_OK;
1425 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1426 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1427 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1428 		if (i == argCount || i == argCount + envCount + 1) {
1429 			// check array null termination
1430 			if (flatArgs[i] != NULL) {
1431 				error = B_BAD_VALUE;
1432 				break;
1433 			}
1434 		} else {
1435 			// check string
1436 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1437 			size_t maxLen = stringEnd - arg;
1438 			if (arg < stringBase || arg >= stringEnd
1439 					|| strnlen(arg, maxLen) == maxLen) {
1440 				error = B_BAD_VALUE;
1441 				break;
1442 			}
1443 
1444 			flatArgs[i] = arg;
1445 		}
1446 	}
1447 
1448 	if (error == B_OK)
1449 		_flatArgs = flatArgs;
1450 	else
1451 		free(flatArgs);
1452 
1453 	return error;
1454 }
1455 
1456 
1457 static void
1458 free_team_arg(struct team_arg* teamArg)
1459 {
1460 	if (teamArg != NULL) {
1461 		free(teamArg->flat_args);
1462 		free(teamArg->path);
1463 		free(teamArg);
1464 	}
1465 }
1466 
1467 
1468 static status_t
1469 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1470 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1471 	port_id port, uint32 token)
1472 {
1473 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1474 	if (teamArg == NULL)
1475 		return B_NO_MEMORY;
1476 
1477 	teamArg->path = strdup(path);
1478 	if (teamArg->path == NULL) {
1479 		free(teamArg);
1480 		return B_NO_MEMORY;
1481 	}
1482 
1483 	// copy the args over
1484 
1485 	teamArg->flat_args = flatArgs;
1486 	teamArg->flat_args_size = flatArgsSize;
1487 	teamArg->arg_count = argCount;
1488 	teamArg->env_count = envCount;
1489 	teamArg->umask = umask;
1490 	teamArg->error_port = port;
1491 	teamArg->error_token = token;
1492 
1493 	*_teamArg = teamArg;
1494 	return B_OK;
1495 }
1496 
1497 
1498 static status_t
1499 team_create_thread_start_internal(void* args)
1500 {
1501 	status_t err;
1502 	Thread* thread;
1503 	Team* team;
1504 	struct team_arg* teamArgs = (struct team_arg*)args;
1505 	const char* path;
1506 	addr_t entry;
1507 	char** userArgs;
1508 	char** userEnv;
1509 	struct user_space_program_args* programArgs;
1510 	uint32 argCount, envCount;
1511 
1512 	thread = thread_get_current_thread();
1513 	team = thread->team;
1514 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1515 
1516 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1517 		thread->id));
1518 
1519 	// Main stack area layout is currently as follows (starting from 0):
1520 	//
1521 	// size								| usage
1522 	// ---------------------------------+--------------------------------
1523 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1524 	// TLS_SIZE							| TLS data
1525 	// sizeof(user_space_program_args)	| argument structure for the runtime
1526 	//									| loader
1527 	// flat arguments size				| flat process arguments and environment
1528 
1529 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1530 	// the heap
1531 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1532 
1533 	argCount = teamArgs->arg_count;
1534 	envCount = teamArgs->env_count;
1535 
1536 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1537 		+ thread->user_stack_size + TLS_SIZE);
1538 
1539 	userArgs = (char**)(programArgs + 1);
1540 	userEnv = userArgs + argCount + 1;
1541 	path = teamArgs->path;
1542 
1543 	if (user_strlcpy(programArgs->program_path, path,
1544 				sizeof(programArgs->program_path)) < B_OK
1545 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1546 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1547 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1548 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1549 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1550 				sizeof(port_id)) < B_OK
1551 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1552 				sizeof(uint32)) < B_OK
1553 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1554 		|| user_memcpy(userArgs, teamArgs->flat_args,
1555 				teamArgs->flat_args_size) < B_OK) {
1556 		// the team deletion process will clean this mess
1557 		free_team_arg(teamArgs);
1558 		return B_BAD_ADDRESS;
1559 	}
1560 
1561 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1562 
1563 	// set team args and update state
1564 	team->Lock();
1565 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1566 	team->state = TEAM_STATE_NORMAL;
1567 	team->Unlock();
1568 
1569 	free_team_arg(teamArgs);
1570 		// the arguments are already on the user stack, we no longer need
1571 		// them in this form
1572 
1573 	// Clone commpage area
1574 	area_id commPageArea = clone_commpage_area(team->id,
1575 		&team->commpage_address);
1576 	if (commPageArea  < B_OK) {
1577 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1578 			strerror(commPageArea)));
1579 		return commPageArea;
1580 	}
1581 
1582 	// Register commpage image
1583 	image_id commPageImage = get_commpage_image();
1584 	image_info imageInfo;
1585 	err = get_image_info(commPageImage, &imageInfo);
1586 	if (err != B_OK) {
1587 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1588 			strerror(err)));
1589 		return err;
1590 	}
1591 	imageInfo.text = team->commpage_address;
1592 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1593 	if (image < 0) {
1594 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1595 			strerror(image)));
1596 		return image;
1597 	}
1598 
1599 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1600 	// automatic variables with function scope will never be destroyed.
1601 	{
1602 		// find runtime_loader path
1603 		KPath runtimeLoaderPath;
1604 		err = find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1605 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1606 		if (err < B_OK) {
1607 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1608 				strerror(err)));
1609 			return err;
1610 		}
1611 		runtimeLoaderPath.UnlockBuffer();
1612 		err = runtimeLoaderPath.Append("runtime_loader");
1613 
1614 		if (err == B_OK) {
1615 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1616 				&entry);
1617 		}
1618 	}
1619 
1620 	if (err < B_OK) {
1621 		// Luckily, we don't have to clean up the mess we created - that's
1622 		// done for us by the normal team deletion process
1623 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1624 			"%s\n", strerror(err)));
1625 		return err;
1626 	}
1627 
1628 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1629 
1630 	// enter userspace -- returns only in case of error
1631 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1632 		programArgs, team->commpage_address);
1633 }
1634 
1635 
1636 static status_t
1637 team_create_thread_start(void* args)
1638 {
1639 	team_create_thread_start_internal(args);
1640 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1641 	thread_exit();
1642 		// does not return
1643 	return B_OK;
1644 }
1645 
1646 
1647 static thread_id
1648 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1649 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1650 	port_id errorPort, uint32 errorToken)
1651 {
1652 	char** flatArgs = _flatArgs;
1653 	thread_id thread;
1654 	status_t status;
1655 	struct team_arg* teamArgs;
1656 	struct team_loading_info loadingInfo;
1657 	io_context* parentIOContext = NULL;
1658 	team_id teamID;
1659 
1660 	if (flatArgs == NULL || argCount == 0)
1661 		return B_BAD_VALUE;
1662 
1663 	const char* path = flatArgs[0];
1664 
1665 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1666 		"\n", path, flatArgs, argCount));
1667 
1668 	// cut the path from the main thread name
1669 	const char* threadName = strrchr(path, '/');
1670 	if (threadName != NULL)
1671 		threadName++;
1672 	else
1673 		threadName = path;
1674 
1675 	// create the main thread object
1676 	Thread* mainThread;
1677 	status = Thread::Create(threadName, mainThread);
1678 	if (status != B_OK)
1679 		return status;
1680 	BReference<Thread> mainThreadReference(mainThread, true);
1681 
1682 	// create team object
1683 	Team* team = Team::Create(mainThread->id, path, false);
1684 	if (team == NULL)
1685 		return B_NO_MEMORY;
1686 	BReference<Team> teamReference(team, true);
1687 
1688 	if (flags & B_WAIT_TILL_LOADED) {
1689 		loadingInfo.thread = thread_get_current_thread();
1690 		loadingInfo.result = B_ERROR;
1691 		loadingInfo.done = false;
1692 		team->loading_info = &loadingInfo;
1693 	}
1694 
1695 	// get the parent team
1696 	Team* parent = Team::Get(parentID);
1697 	if (parent == NULL)
1698 		return B_BAD_TEAM_ID;
1699 	BReference<Team> parentReference(parent, true);
1700 
1701 	parent->LockTeamAndProcessGroup();
1702 	team->Lock();
1703 
1704 	// inherit the parent's user/group
1705 	inherit_parent_user_and_group(team, parent);
1706 
1707  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1708 
1709 	sTeamHash.Insert(team);
1710 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1711 	if (!teamLimitReached)
1712 		sUsedTeams++;
1713 
1714 	teamsLocker.Unlock();
1715 
1716 	insert_team_into_parent(parent, team);
1717 	insert_team_into_group(parent->group, team);
1718 
1719 	// get a reference to the parent's I/O context -- we need it to create ours
1720 	parentIOContext = parent->io_context;
1721 	vfs_get_io_context(parentIOContext);
1722 
1723 	team->Unlock();
1724 	parent->UnlockTeamAndProcessGroup();
1725 
1726 	// notify team listeners
1727 	sNotificationService.Notify(TEAM_ADDED, team);
1728 
1729 	// check the executable's set-user/group-id permission
1730 	update_set_id_user_and_group(team, path);
1731 
1732 	if (teamLimitReached) {
1733 		status = B_NO_MORE_TEAMS;
1734 		goto err1;
1735 	}
1736 
1737 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1738 		envCount, (mode_t)-1, errorPort, errorToken);
1739 	if (status != B_OK)
1740 		goto err1;
1741 
1742 	_flatArgs = NULL;
1743 		// args are owned by the team_arg structure now
1744 
1745 	// create a new io_context for this team
1746 	team->io_context = vfs_new_io_context(parentIOContext, true);
1747 	if (!team->io_context) {
1748 		status = B_NO_MEMORY;
1749 		goto err2;
1750 	}
1751 
1752 	// We don't need the parent's I/O context any longer.
1753 	vfs_put_io_context(parentIOContext);
1754 	parentIOContext = NULL;
1755 
1756 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1757 	vfs_exec_io_context(team->io_context);
1758 
1759 	// create an address space for this team
1760 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1761 		&team->address_space);
1762 	if (status != B_OK)
1763 		goto err2;
1764 
1765 	// create the user data area
1766 	status = create_team_user_data(team);
1767 	if (status != B_OK)
1768 		goto err4;
1769 
1770 	// In case we start the main thread, we shouldn't access the team object
1771 	// afterwards, so cache the team's ID.
1772 	teamID = team->id;
1773 
1774 	// Create a kernel thread, but under the context of the new team
1775 	// The new thread will take over ownership of teamArgs.
1776 	{
1777 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1778 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1779 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1780 			+ teamArgs->flat_args_size;
1781 		thread = thread_create_thread(threadAttributes, false);
1782 		if (thread < 0) {
1783 			status = thread;
1784 			goto err5;
1785 		}
1786 	}
1787 
1788 	// The team has been created successfully, so we keep the reference. Or
1789 	// more precisely: It's owned by the team's main thread, now.
1790 	teamReference.Detach();
1791 
1792 	// wait for the loader of the new team to finish its work
1793 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1794 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1795 
1796 		// resume the team's main thread
1797 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1798 			scheduler_enqueue_in_run_queue(mainThread);
1799 
1800 		// Now suspend ourselves until loading is finished. We will be woken
1801 		// either by the thread, when it finished or aborted loading, or when
1802 		// the team is going to die (e.g. is killed). In either case the one
1803 		// setting `loadingInfo.done' is responsible for removing the info from
1804 		// the team structure.
1805 		while (!loadingInfo.done) {
1806 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1807 			scheduler_reschedule();
1808 		}
1809 
1810 		schedulerLocker.Unlock();
1811 
1812 		if (loadingInfo.result < B_OK)
1813 			return loadingInfo.result;
1814 	}
1815 
1816 	// notify the debugger
1817 	user_debug_team_created(teamID);
1818 
1819 	return thread;
1820 
1821 err5:
1822 	delete_team_user_data(team);
1823 err4:
1824 	team->address_space->Put();
1825 err2:
1826 	free_team_arg(teamArgs);
1827 err1:
1828 	if (parentIOContext != NULL)
1829 		vfs_put_io_context(parentIOContext);
1830 
1831 	// Remove the team structure from the process group, the parent team, and
1832 	// the team hash table and delete the team structure.
1833 	parent->LockTeamAndProcessGroup();
1834 	team->Lock();
1835 
1836 	remove_team_from_group(team);
1837 	remove_team_from_parent(team->parent, team);
1838 
1839 	team->Unlock();
1840 	parent->UnlockTeamAndProcessGroup();
1841 
1842 	teamsLocker.Lock();
1843 	sTeamHash.Remove(team);
1844 	if (!teamLimitReached)
1845 		sUsedTeams--;
1846 	teamsLocker.Unlock();
1847 
1848 	sNotificationService.Notify(TEAM_REMOVED, team);
1849 
1850 	return status;
1851 }
1852 
1853 
1854 /*!	Almost shuts down the current team and loads a new image into it.
1855 	If successful, this function does not return and will takeover ownership of
1856 	the arguments provided.
1857 	This function may only be called in a userland team (caused by one of the
1858 	exec*() syscalls).
1859 */
1860 static status_t
1861 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1862 	int32 argCount, int32 envCount, mode_t umask)
1863 {
1864 	// NOTE: Since this function normally doesn't return, don't use automatic
1865 	// variables that need destruction in the function scope.
1866 	char** flatArgs = _flatArgs;
1867 	Team* team = thread_get_current_thread()->team;
1868 	struct team_arg* teamArgs;
1869 	const char* threadName;
1870 	thread_id nubThreadID = -1;
1871 
1872 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1873 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1874 		team->id));
1875 
1876 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1877 
1878 	// switching the kernel at run time is probably not a good idea :)
1879 	if (team == team_get_kernel_team())
1880 		return B_NOT_ALLOWED;
1881 
1882 	// we currently need to be single threaded here
1883 	// TODO: maybe we should just kill all other threads and
1884 	//	make the current thread the team's main thread?
1885 	Thread* currentThread = thread_get_current_thread();
1886 	if (currentThread != team->main_thread)
1887 		return B_NOT_ALLOWED;
1888 
1889 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1890 	// We iterate through the thread list to make sure that there's no other
1891 	// thread.
1892 	TeamLocker teamLocker(team);
1893 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1894 
1895 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1896 		nubThreadID = team->debug_info.nub_thread;
1897 
1898 	debugInfoLocker.Unlock();
1899 
1900 	for (Thread* thread = team->thread_list; thread != NULL;
1901 			thread = thread->team_next) {
1902 		if (thread != team->main_thread && thread->id != nubThreadID)
1903 			return B_NOT_ALLOWED;
1904 	}
1905 
1906 	team->DeleteUserTimers(true);
1907 	team->ResetSignalsOnExec();
1908 
1909 	teamLocker.Unlock();
1910 
1911 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1912 		argCount, envCount, umask, -1, 0);
1913 	if (status != B_OK)
1914 		return status;
1915 
1916 	_flatArgs = NULL;
1917 		// args are owned by the team_arg structure now
1918 
1919 	// TODO: remove team resources if there are any left
1920 	// thread_atkernel_exit() might not be called at all
1921 
1922 	thread_reset_for_exec();
1923 
1924 	user_debug_prepare_for_exec();
1925 
1926 	delete_team_user_data(team);
1927 	vm_delete_areas(team->address_space, false);
1928 	xsi_sem_undo(team);
1929 	delete_owned_ports(team);
1930 	sem_delete_owned_sems(team);
1931 	remove_images(team);
1932 	vfs_exec_io_context(team->io_context);
1933 	delete_realtime_sem_context(team->realtime_sem_context);
1934 	team->realtime_sem_context = NULL;
1935 
1936 	status = create_team_user_data(team);
1937 	if (status != B_OK) {
1938 		// creating the user data failed -- we're toast
1939 		// TODO: We should better keep the old user area in the first place.
1940 		free_team_arg(teamArgs);
1941 		exit_thread(status);
1942 		return status;
1943 	}
1944 
1945 	user_debug_finish_after_exec();
1946 
1947 	// rename the team
1948 
1949 	team->Lock();
1950 	team->SetName(path);
1951 	team->Unlock();
1952 
1953 	// cut the path from the team name and rename the main thread, too
1954 	threadName = strrchr(path, '/');
1955 	if (threadName != NULL)
1956 		threadName++;
1957 	else
1958 		threadName = path;
1959 	rename_thread(thread_get_current_thread_id(), threadName);
1960 
1961 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1962 
1963 	// Update user/group according to the executable's set-user/group-id
1964 	// permission.
1965 	update_set_id_user_and_group(team, path);
1966 
1967 	user_debug_team_exec();
1968 
1969 	// notify team listeners
1970 	sNotificationService.Notify(TEAM_EXEC, team);
1971 
1972 	// get a user thread for the thread
1973 	user_thread* userThread = team_allocate_user_thread(team);
1974 		// cannot fail (the allocation for the team would have failed already)
1975 	ThreadLocker currentThreadLocker(currentThread);
1976 	currentThread->user_thread = userThread;
1977 	currentThreadLocker.Unlock();
1978 
1979 	// create the user stack for the thread
1980 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1981 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
1982 	if (status == B_OK) {
1983 		// prepare the stack, load the runtime loader, and enter userspace
1984 		team_create_thread_start(teamArgs);
1985 			// does never return
1986 	} else
1987 		free_team_arg(teamArgs);
1988 
1989 	// Sorry, we have to kill ourselves, there is no way out anymore
1990 	// (without any areas left and all that).
1991 	exit_thread(status);
1992 
1993 	// We return a status here since the signal that is sent by the
1994 	// call above is not immediately handled.
1995 	return B_ERROR;
1996 }
1997 
1998 
1999 static thread_id
2000 fork_team(void)
2001 {
2002 	Thread* parentThread = thread_get_current_thread();
2003 	Team* parentTeam = parentThread->team;
2004 	Team* team;
2005 	arch_fork_arg* forkArgs;
2006 	struct area_info info;
2007 	thread_id threadID;
2008 	status_t status;
2009 	ssize_t areaCookie;
2010 	int32 imageCookie;
2011 
2012 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2013 
2014 	if (parentTeam == team_get_kernel_team())
2015 		return B_NOT_ALLOWED;
2016 
2017 	// create a new team
2018 	// TODO: this is very similar to load_image_internal() - maybe we can do
2019 	// something about it :)
2020 
2021 	// create the main thread object
2022 	Thread* thread;
2023 	status = Thread::Create(parentThread->name, thread);
2024 	if (status != B_OK)
2025 		return status;
2026 	BReference<Thread> threadReference(thread, true);
2027 
2028 	// create the team object
2029 	team = Team::Create(thread->id, NULL, false);
2030 	if (team == NULL)
2031 		return B_NO_MEMORY;
2032 
2033 	parentTeam->LockTeamAndProcessGroup();
2034 	team->Lock();
2035 
2036 	team->SetName(parentTeam->Name());
2037 	team->SetArgs(parentTeam->Args());
2038 
2039 	team->commpage_address = parentTeam->commpage_address;
2040 
2041 	// Inherit the parent's user/group.
2042 	inherit_parent_user_and_group(team, parentTeam);
2043 
2044 	// inherit signal handlers
2045 	team->InheritSignalActions(parentTeam);
2046 
2047 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2048 
2049 	sTeamHash.Insert(team);
2050 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2051 	if (!teamLimitReached)
2052 		sUsedTeams++;
2053 
2054 	teamsLocker.Unlock();
2055 
2056 	insert_team_into_parent(parentTeam, team);
2057 	insert_team_into_group(parentTeam->group, team);
2058 
2059 	team->Unlock();
2060 	parentTeam->UnlockTeamAndProcessGroup();
2061 
2062 	// notify team listeners
2063 	sNotificationService.Notify(TEAM_ADDED, team);
2064 
2065 	// inherit some team debug flags
2066 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2067 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2068 
2069 	if (teamLimitReached) {
2070 		status = B_NO_MORE_TEAMS;
2071 		goto err1;
2072 	}
2073 
2074 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2075 	if (forkArgs == NULL) {
2076 		status = B_NO_MEMORY;
2077 		goto err1;
2078 	}
2079 
2080 	// create a new io_context for this team
2081 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2082 	if (!team->io_context) {
2083 		status = B_NO_MEMORY;
2084 		goto err2;
2085 	}
2086 
2087 	// duplicate the realtime sem context
2088 	if (parentTeam->realtime_sem_context) {
2089 		team->realtime_sem_context = clone_realtime_sem_context(
2090 			parentTeam->realtime_sem_context);
2091 		if (team->realtime_sem_context == NULL) {
2092 			status = B_NO_MEMORY;
2093 			goto err2;
2094 		}
2095 	}
2096 
2097 	// create an address space for this team
2098 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2099 		&team->address_space);
2100 	if (status < B_OK)
2101 		goto err3;
2102 
2103 	// copy all areas of the team
2104 	// TODO: should be able to handle stack areas differently (ie. don't have
2105 	// them copy-on-write)
2106 
2107 	areaCookie = 0;
2108 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2109 		if (info.area == parentTeam->user_data_area) {
2110 			// don't clone the user area; just create a new one
2111 			status = create_team_user_data(team, info.address);
2112 			if (status != B_OK)
2113 				break;
2114 
2115 			thread->user_thread = team_allocate_user_thread(team);
2116 		} else {
2117 			void* address;
2118 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2119 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2120 			if (area < B_OK) {
2121 				status = area;
2122 				break;
2123 			}
2124 
2125 			if (info.area == parentThread->user_stack_area)
2126 				thread->user_stack_area = area;
2127 		}
2128 	}
2129 
2130 	if (status < B_OK)
2131 		goto err4;
2132 
2133 	if (thread->user_thread == NULL) {
2134 #if KDEBUG
2135 		panic("user data area not found, parent area is %" B_PRId32,
2136 			parentTeam->user_data_area);
2137 #endif
2138 		status = B_ERROR;
2139 		goto err4;
2140 	}
2141 
2142 	thread->user_stack_base = parentThread->user_stack_base;
2143 	thread->user_stack_size = parentThread->user_stack_size;
2144 	thread->user_local_storage = parentThread->user_local_storage;
2145 	thread->sig_block_mask = parentThread->sig_block_mask;
2146 	thread->signal_stack_base = parentThread->signal_stack_base;
2147 	thread->signal_stack_size = parentThread->signal_stack_size;
2148 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2149 
2150 	arch_store_fork_frame(forkArgs);
2151 
2152 	// copy image list
2153 	image_info imageInfo;
2154 	imageCookie = 0;
2155 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2156 			== B_OK) {
2157 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2158 		if (image < 0)
2159 			goto err5;
2160 	}
2161 
2162 	// create the main thread
2163 	{
2164 		ThreadCreationAttributes threadCreationAttributes(NULL,
2165 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2166 		threadCreationAttributes.forkArgs = forkArgs;
2167 		threadID = thread_create_thread(threadCreationAttributes, false);
2168 		if (threadID < 0) {
2169 			status = threadID;
2170 			goto err5;
2171 		}
2172 	}
2173 
2174 	// notify the debugger
2175 	user_debug_team_created(team->id);
2176 
2177 	T(TeamForked(threadID));
2178 
2179 	resume_thread(threadID);
2180 	return threadID;
2181 
2182 err5:
2183 	remove_images(team);
2184 err4:
2185 	team->address_space->RemoveAndPut();
2186 err3:
2187 	delete_realtime_sem_context(team->realtime_sem_context);
2188 err2:
2189 	free(forkArgs);
2190 err1:
2191 	// Remove the team structure from the process group, the parent team, and
2192 	// the team hash table and delete the team structure.
2193 	parentTeam->LockTeamAndProcessGroup();
2194 	team->Lock();
2195 
2196 	remove_team_from_group(team);
2197 	remove_team_from_parent(team->parent, team);
2198 
2199 	team->Unlock();
2200 	parentTeam->UnlockTeamAndProcessGroup();
2201 
2202 	teamsLocker.Lock();
2203 	sTeamHash.Remove(team);
2204 	if (!teamLimitReached)
2205 		sUsedTeams--;
2206 	teamsLocker.Unlock();
2207 
2208 	sNotificationService.Notify(TEAM_REMOVED, team);
2209 
2210 	team->ReleaseReference();
2211 
2212 	return status;
2213 }
2214 
2215 
2216 /*!	Returns if the specified team \a parent has any children belonging to the
2217 	process group with the specified ID \a groupID.
2218 	The caller must hold \a parent's lock.
2219 */
2220 static bool
2221 has_children_in_group(Team* parent, pid_t groupID)
2222 {
2223 	for (Team* child = parent->children; child != NULL;
2224 			child = child->siblings_next) {
2225 		TeamLocker childLocker(child);
2226 		if (child->group_id == groupID)
2227 			return true;
2228 	}
2229 
2230 	return false;
2231 }
2232 
2233 
2234 /*!	Returns the first job control entry from \a children, which matches \a id.
2235 	\a id can be:
2236 	- \code > 0 \endcode: Matching an entry with that team ID.
2237 	- \code == -1 \endcode: Matching any entry.
2238 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2239 	\c 0 is an invalid value for \a id.
2240 
2241 	The caller must hold the lock of the team that \a children belongs to.
2242 
2243 	\param children The job control entry list to check.
2244 	\param id The match criterion.
2245 	\return The first matching entry or \c NULL, if none matches.
2246 */
2247 static job_control_entry*
2248 get_job_control_entry(team_job_control_children& children, pid_t id)
2249 {
2250 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2251 		 job_control_entry* entry = it.Next();) {
2252 
2253 		if (id > 0) {
2254 			if (entry->thread == id)
2255 				return entry;
2256 		} else if (id == -1) {
2257 			return entry;
2258 		} else {
2259 			pid_t processGroup
2260 				= (entry->team ? entry->team->group_id : entry->group_id);
2261 			if (processGroup == -id)
2262 				return entry;
2263 		}
2264 	}
2265 
2266 	return NULL;
2267 }
2268 
2269 
2270 /*!	Returns the first job control entry from one of team's dead, continued, or
2271     stopped children which matches \a id.
2272 	\a id can be:
2273 	- \code > 0 \endcode: Matching an entry with that team ID.
2274 	- \code == -1 \endcode: Matching any entry.
2275 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2276 	\c 0 is an invalid value for \a id.
2277 
2278 	The caller must hold \a team's lock.
2279 
2280 	\param team The team whose dead, stopped, and continued child lists shall be
2281 		checked.
2282 	\param id The match criterion.
2283 	\param flags Specifies which children shall be considered. Dead children
2284 		always are. Stopped children are considered when \a flags is ORed
2285 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2286 		bitwise with \c WCONTINUED.
2287 	\return The first matching entry or \c NULL, if none matches.
2288 */
2289 static job_control_entry*
2290 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2291 {
2292 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2293 
2294 	if (entry == NULL && (flags & WCONTINUED) != 0)
2295 		entry = get_job_control_entry(team->continued_children, id);
2296 
2297 	if (entry == NULL && (flags & WUNTRACED) != 0)
2298 		entry = get_job_control_entry(team->stopped_children, id);
2299 
2300 	return entry;
2301 }
2302 
2303 
2304 job_control_entry::job_control_entry()
2305 	:
2306 	has_group_ref(false)
2307 {
2308 }
2309 
2310 
2311 job_control_entry::~job_control_entry()
2312 {
2313 	if (has_group_ref) {
2314 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2315 
2316 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2317 		if (group == NULL) {
2318 			panic("job_control_entry::~job_control_entry(): unknown group "
2319 				"ID: %" B_PRId32, group_id);
2320 			return;
2321 		}
2322 
2323 		groupHashLocker.Unlock();
2324 
2325 		group->ReleaseReference();
2326 	}
2327 }
2328 
2329 
2330 /*!	Invoked when the owning team is dying, initializing the entry according to
2331 	the dead state.
2332 
2333 	The caller must hold the owning team's lock and the scheduler lock.
2334 */
2335 void
2336 job_control_entry::InitDeadState()
2337 {
2338 	if (team != NULL) {
2339 		ASSERT(team->exit.initialized);
2340 
2341 		group_id = team->group_id;
2342 		team->group->AcquireReference();
2343 		has_group_ref = true;
2344 
2345 		thread = team->id;
2346 		status = team->exit.status;
2347 		reason = team->exit.reason;
2348 		signal = team->exit.signal;
2349 		signaling_user = team->exit.signaling_user;
2350 
2351 		team = NULL;
2352 	}
2353 }
2354 
2355 
2356 job_control_entry&
2357 job_control_entry::operator=(const job_control_entry& other)
2358 {
2359 	state = other.state;
2360 	thread = other.thread;
2361 	signal = other.signal;
2362 	has_group_ref = false;
2363 	signaling_user = other.signaling_user;
2364 	team = other.team;
2365 	group_id = other.group_id;
2366 	status = other.status;
2367 	reason = other.reason;
2368 
2369 	return *this;
2370 }
2371 
2372 
2373 /*! This is the kernel backend for waitid().
2374 */
2375 static thread_id
2376 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2377 {
2378 	Thread* thread = thread_get_current_thread();
2379 	Team* team = thread->team;
2380 	struct job_control_entry foundEntry;
2381 	struct job_control_entry* freeDeathEntry = NULL;
2382 	status_t status = B_OK;
2383 
2384 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2385 		child, flags));
2386 
2387 	T(WaitForChild(child, flags));
2388 
2389 	pid_t originalChild = child;
2390 
2391 	bool ignoreFoundEntries = false;
2392 	bool ignoreFoundEntriesChecked = false;
2393 
2394 	while (true) {
2395 		// lock the team
2396 		TeamLocker teamLocker(team);
2397 
2398 		// A 0 child argument means to wait for all children in the process
2399 		// group of the calling team.
2400 		child = originalChild == 0 ? -team->group_id : originalChild;
2401 
2402 		// check whether any condition holds
2403 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2404 
2405 		// If we don't have an entry yet, check whether there are any children
2406 		// complying to the process group specification at all.
2407 		if (entry == NULL) {
2408 			// No success yet -- check whether there are any children complying
2409 			// to the process group specification at all.
2410 			bool childrenExist = false;
2411 			if (child == -1) {
2412 				childrenExist = team->children != NULL;
2413 			} else if (child < -1) {
2414 				childrenExist = has_children_in_group(team, -child);
2415 			} else {
2416 				if (Team* childTeam = Team::Get(child)) {
2417 					BReference<Team> childTeamReference(childTeam, true);
2418 					TeamLocker childTeamLocker(childTeam);
2419 					childrenExist = childTeam->parent == team;
2420 				}
2421 			}
2422 
2423 			if (!childrenExist) {
2424 				// there is no child we could wait for
2425 				status = ECHILD;
2426 			} else {
2427 				// the children we're waiting for are still running
2428 				status = B_WOULD_BLOCK;
2429 			}
2430 		} else {
2431 			// got something
2432 			foundEntry = *entry;
2433 
2434 			// unless WNOWAIT has been specified, "consume" the wait state
2435 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2436 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2437 					// The child is dead. Reap its death entry.
2438 					freeDeathEntry = entry;
2439 					team->dead_children.entries.Remove(entry);
2440 					team->dead_children.count--;
2441 				} else {
2442 					// The child is well. Reset its job control state.
2443 					team_set_job_control_state(entry->team,
2444 						JOB_CONTROL_STATE_NONE, NULL, false);
2445 				}
2446 			}
2447 		}
2448 
2449 		// If we haven't got anything yet, prepare for waiting for the
2450 		// condition variable.
2451 		ConditionVariableEntry deadWaitEntry;
2452 
2453 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2454 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2455 
2456 		teamLocker.Unlock();
2457 
2458 		// we got our entry and can return to our caller
2459 		if (status == B_OK) {
2460 			if (ignoreFoundEntries) {
2461 				// ... unless we shall ignore found entries
2462 				delete freeDeathEntry;
2463 				freeDeathEntry = NULL;
2464 				continue;
2465 			}
2466 
2467 			break;
2468 		}
2469 
2470 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2471 			T(WaitForChildDone(status));
2472 			return status;
2473 		}
2474 
2475 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2476 		if (status == B_INTERRUPTED) {
2477 			T(WaitForChildDone(status));
2478 			return status;
2479 		}
2480 
2481 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2482 		// all our children are dead and fail with ECHILD. We check the
2483 		// condition at this point.
2484 		if (!ignoreFoundEntriesChecked) {
2485 			teamLocker.Lock();
2486 
2487 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2488 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2489 				|| handler.sa_handler == SIG_IGN) {
2490 				ignoreFoundEntries = true;
2491 			}
2492 
2493 			teamLocker.Unlock();
2494 
2495 			ignoreFoundEntriesChecked = true;
2496 		}
2497 	}
2498 
2499 	delete freeDeathEntry;
2500 
2501 	// When we got here, we have a valid death entry, and already got
2502 	// unregistered from the team or group. Fill in the returned info.
2503 	memset(&_info, 0, sizeof(_info));
2504 	_info.si_signo = SIGCHLD;
2505 	_info.si_pid = foundEntry.thread;
2506 	_info.si_uid = foundEntry.signaling_user;
2507 	// TODO: Fill in si_errno?
2508 
2509 	switch (foundEntry.state) {
2510 		case JOB_CONTROL_STATE_DEAD:
2511 			_info.si_code = foundEntry.reason;
2512 			_info.si_status = foundEntry.reason == CLD_EXITED
2513 				? foundEntry.status : foundEntry.signal;
2514 			break;
2515 		case JOB_CONTROL_STATE_STOPPED:
2516 			_info.si_code = CLD_STOPPED;
2517 			_info.si_status = foundEntry.signal;
2518 			break;
2519 		case JOB_CONTROL_STATE_CONTINUED:
2520 			_info.si_code = CLD_CONTINUED;
2521 			_info.si_status = 0;
2522 			break;
2523 		case JOB_CONTROL_STATE_NONE:
2524 			// can't happen
2525 			break;
2526 	}
2527 
2528 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2529 	// status is available.
2530 	TeamLocker teamLocker(team);
2531 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2532 
2533 	if (is_team_signal_blocked(team, SIGCHLD)) {
2534 		if (get_job_control_entry(team, child, flags) == NULL)
2535 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2536 	}
2537 
2538 	schedulerLocker.Unlock();
2539 	teamLocker.Unlock();
2540 
2541 	// When the team is dead, the main thread continues to live in the kernel
2542 	// team for a very short time. To avoid surprises for the caller we rather
2543 	// wait until the thread is really gone.
2544 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2545 		wait_for_thread(foundEntry.thread, NULL);
2546 
2547 	T(WaitForChildDone(foundEntry));
2548 
2549 	return foundEntry.thread;
2550 }
2551 
2552 
2553 /*! Fills the team_info structure with information from the specified team.
2554 	Interrupts must be enabled. The team must not be locked.
2555 */
2556 static status_t
2557 fill_team_info(Team* team, team_info* info, size_t size)
2558 {
2559 	if (size != sizeof(team_info))
2560 		return B_BAD_VALUE;
2561 
2562 	// TODO: Set more informations for team_info
2563 	memset(info, 0, size);
2564 
2565 	info->team = team->id;
2566 		// immutable
2567 	info->image_count = count_images(team);
2568 		// protected by sImageMutex
2569 
2570 	TeamLocker teamLocker(team);
2571 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2572 
2573 	info->thread_count = team->num_threads;
2574 	//info->area_count =
2575 	info->debugger_nub_thread = team->debug_info.nub_thread;
2576 	info->debugger_nub_port = team->debug_info.nub_port;
2577 	info->uid = team->effective_uid;
2578 	info->gid = team->effective_gid;
2579 
2580 	strlcpy(info->args, team->Args(), sizeof(info->args));
2581 	info->argc = 1;
2582 
2583 	return B_OK;
2584 }
2585 
2586 
2587 /*!	Returns whether the process group contains stopped processes.
2588 	The caller must hold the process group's lock.
2589 */
2590 static bool
2591 process_group_has_stopped_processes(ProcessGroup* group)
2592 {
2593 	Team* team = group->teams;
2594 	while (team != NULL) {
2595 		// the parent team's lock guards the job control entry -- acquire it
2596 		team->LockTeamAndParent(false);
2597 
2598 		if (team->job_control_entry != NULL
2599 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2600 			team->UnlockTeamAndParent();
2601 			return true;
2602 		}
2603 
2604 		team->UnlockTeamAndParent();
2605 
2606 		team = team->group_next;
2607 	}
2608 
2609 	return false;
2610 }
2611 
2612 
2613 /*!	Iterates through all process groups queued in team_remove_team() and signals
2614 	those that are orphaned and have stopped processes.
2615 	The caller must not hold any team or process group locks.
2616 */
2617 static void
2618 orphaned_process_group_check()
2619 {
2620 	// process as long as there are groups in the list
2621 	while (true) {
2622 		// remove the head from the list
2623 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2624 
2625 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2626 		if (group == NULL)
2627 			return;
2628 
2629 		group->UnsetOrphanedCheck();
2630 		BReference<ProcessGroup> groupReference(group);
2631 
2632 		orphanedCheckLocker.Unlock();
2633 
2634 		AutoLocker<ProcessGroup> groupLocker(group);
2635 
2636 		// If the group is orphaned and contains stopped processes, we're
2637 		// supposed to send SIGHUP + SIGCONT.
2638 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2639 			Thread* currentThread = thread_get_current_thread();
2640 
2641 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2642 			send_signal_to_process_group_locked(group, signal, 0);
2643 
2644 			signal.SetNumber(SIGCONT);
2645 			send_signal_to_process_group_locked(group, signal, 0);
2646 		}
2647 	}
2648 }
2649 
2650 
2651 static status_t
2652 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2653 	uint32 flags)
2654 {
2655 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2656 		return B_BAD_VALUE;
2657 
2658 	// get the team
2659 	Team* team = Team::GetAndLock(id);
2660 	if (team == NULL)
2661 		return B_BAD_TEAM_ID;
2662 	BReference<Team> teamReference(team, true);
2663 	TeamLocker teamLocker(team, true);
2664 
2665 	if ((flags & B_CHECK_PERMISSION) != 0) {
2666 		uid_t uid = geteuid();
2667 		if (uid != 0 && uid != team->effective_uid)
2668 			return B_NOT_ALLOWED;
2669 	}
2670 
2671 	bigtime_t kernelTime = 0;
2672 	bigtime_t userTime = 0;
2673 
2674 	switch (who) {
2675 		case B_TEAM_USAGE_SELF:
2676 		{
2677 			Thread* thread = team->thread_list;
2678 
2679 			for (; thread != NULL; thread = thread->team_next) {
2680 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2681 				kernelTime += thread->kernel_time;
2682 				userTime += thread->user_time;
2683 			}
2684 
2685 			kernelTime += team->dead_threads_kernel_time;
2686 			userTime += team->dead_threads_user_time;
2687 			break;
2688 		}
2689 
2690 		case B_TEAM_USAGE_CHILDREN:
2691 		{
2692 			Team* child = team->children;
2693 			for (; child != NULL; child = child->siblings_next) {
2694 				TeamLocker childLocker(child);
2695 
2696 				Thread* thread = team->thread_list;
2697 
2698 				for (; thread != NULL; thread = thread->team_next) {
2699 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2700 					kernelTime += thread->kernel_time;
2701 					userTime += thread->user_time;
2702 				}
2703 
2704 				kernelTime += child->dead_threads_kernel_time;
2705 				userTime += child->dead_threads_user_time;
2706 			}
2707 
2708 			kernelTime += team->dead_children.kernel_time;
2709 			userTime += team->dead_children.user_time;
2710 			break;
2711 		}
2712 	}
2713 
2714 	info->kernel_time = kernelTime;
2715 	info->user_time = userTime;
2716 
2717 	return B_OK;
2718 }
2719 
2720 
2721 //	#pragma mark - Private kernel API
2722 
2723 
2724 status_t
2725 team_init(kernel_args* args)
2726 {
2727 	// create the team hash table
2728 	new(&sTeamHash) TeamTable;
2729 	if (sTeamHash.Init(64) != B_OK)
2730 		panic("Failed to init team hash table!");
2731 
2732 	new(&sGroupHash) ProcessGroupHashTable;
2733 	if (sGroupHash.Init() != B_OK)
2734 		panic("Failed to init process group hash table!");
2735 
2736 	// create initial session and process groups
2737 
2738 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2739 	if (session == NULL)
2740 		panic("Could not create initial session.\n");
2741 	BReference<ProcessSession> sessionReference(session, true);
2742 
2743 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2744 	if (group == NULL)
2745 		panic("Could not create initial process group.\n");
2746 	BReference<ProcessGroup> groupReference(group, true);
2747 
2748 	group->Publish(session);
2749 
2750 	// create the kernel team
2751 	sKernelTeam = Team::Create(1, "kernel_team", true);
2752 	if (sKernelTeam == NULL)
2753 		panic("could not create kernel team!\n");
2754 	sKernelTeam->SetArgs(sKernelTeam->Name());
2755 	sKernelTeam->state = TEAM_STATE_NORMAL;
2756 
2757 	sKernelTeam->saved_set_uid = 0;
2758 	sKernelTeam->real_uid = 0;
2759 	sKernelTeam->effective_uid = 0;
2760 	sKernelTeam->saved_set_gid = 0;
2761 	sKernelTeam->real_gid = 0;
2762 	sKernelTeam->effective_gid = 0;
2763 	sKernelTeam->supplementary_groups = NULL;
2764 	sKernelTeam->supplementary_group_count = 0;
2765 
2766 	insert_team_into_group(group, sKernelTeam);
2767 
2768 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2769 	if (sKernelTeam->io_context == NULL)
2770 		panic("could not create io_context for kernel team!\n");
2771 
2772 	// stick it in the team hash
2773 	sTeamHash.Insert(sKernelTeam);
2774 
2775 	add_debugger_command_etc("team", &dump_team_info,
2776 		"Dump info about a particular team",
2777 		"[ <id> | <address> | <name> ]\n"
2778 		"Prints information about the specified team. If no argument is given\n"
2779 		"the current team is selected.\n"
2780 		"  <id>       - The ID of the team.\n"
2781 		"  <address>  - The address of the team structure.\n"
2782 		"  <name>     - The team's name.\n", 0);
2783 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2784 		"\n"
2785 		"Prints a list of all existing teams.\n", 0);
2786 
2787 	new(&sNotificationService) TeamNotificationService();
2788 
2789 	sNotificationService.Register();
2790 
2791 	return B_OK;
2792 }
2793 
2794 
2795 int32
2796 team_max_teams(void)
2797 {
2798 	return sMaxTeams;
2799 }
2800 
2801 
2802 int32
2803 team_used_teams(void)
2804 {
2805 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2806 	return sUsedTeams;
2807 }
2808 
2809 
2810 /*! Returns a death entry of a child team specified by ID (if any).
2811 	The caller must hold the team's lock.
2812 
2813 	\param team The team whose dead children list to check.
2814 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2815 	\param _deleteEntry Return variable, indicating whether the caller needs to
2816 		delete the returned entry.
2817 	\return The death entry of the matching team, or \c NULL, if no death entry
2818 		for the team was found.
2819 */
2820 job_control_entry*
2821 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2822 {
2823 	if (child <= 0)
2824 		return NULL;
2825 
2826 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2827 		child);
2828 	if (entry) {
2829 		// remove the entry only, if the caller is the parent of the found team
2830 		if (team_get_current_team_id() == entry->thread) {
2831 			team->dead_children.entries.Remove(entry);
2832 			team->dead_children.count--;
2833 			*_deleteEntry = true;
2834 		} else {
2835 			*_deleteEntry = false;
2836 		}
2837 	}
2838 
2839 	return entry;
2840 }
2841 
2842 
2843 /*! Quick check to see if we have a valid team ID. */
2844 bool
2845 team_is_valid(team_id id)
2846 {
2847 	if (id <= 0)
2848 		return false;
2849 
2850 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2851 
2852 	return team_get_team_struct_locked(id) != NULL;
2853 }
2854 
2855 
2856 Team*
2857 team_get_team_struct_locked(team_id id)
2858 {
2859 	return sTeamHash.Lookup(id);
2860 }
2861 
2862 
2863 void
2864 team_set_controlling_tty(int32 ttyIndex)
2865 {
2866 	// lock the team, so its session won't change while we're playing with it
2867 	Team* team = thread_get_current_thread()->team;
2868 	TeamLocker teamLocker(team);
2869 
2870 	// get and lock the session
2871 	ProcessSession* session = team->group->Session();
2872 	AutoLocker<ProcessSession> sessionLocker(session);
2873 
2874 	// set the session's fields
2875 	session->controlling_tty = ttyIndex;
2876 	session->foreground_group = -1;
2877 }
2878 
2879 
2880 int32
2881 team_get_controlling_tty()
2882 {
2883 	// lock the team, so its session won't change while we're playing with it
2884 	Team* team = thread_get_current_thread()->team;
2885 	TeamLocker teamLocker(team);
2886 
2887 	// get and lock the session
2888 	ProcessSession* session = team->group->Session();
2889 	AutoLocker<ProcessSession> sessionLocker(session);
2890 
2891 	// get the session's field
2892 	return session->controlling_tty;
2893 }
2894 
2895 
2896 status_t
2897 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2898 {
2899 	// lock the team, so its session won't change while we're playing with it
2900 	Thread* thread = thread_get_current_thread();
2901 	Team* team = thread->team;
2902 	TeamLocker teamLocker(team);
2903 
2904 	// get and lock the session
2905 	ProcessSession* session = team->group->Session();
2906 	AutoLocker<ProcessSession> sessionLocker(session);
2907 
2908 	// check given TTY -- must be the controlling tty of the calling process
2909 	if (session->controlling_tty != ttyIndex)
2910 		return ENOTTY;
2911 
2912 	// check given process group -- must belong to our session
2913 	{
2914 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2915 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2916 		if (group == NULL || group->Session() != session)
2917 			return B_BAD_VALUE;
2918 	}
2919 
2920 	// If we are a background group, we can do that unharmed only when we
2921 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2922 	if (session->foreground_group != -1
2923 		&& session->foreground_group != team->group_id
2924 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2925 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2926 
2927 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2928 			pid_t groupID = team->group_id;
2929 
2930 			schedulerLocker.Unlock();
2931 			sessionLocker.Unlock();
2932 			teamLocker.Unlock();
2933 
2934 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2935 			send_signal_to_process_group(groupID, signal, 0);
2936 			return B_INTERRUPTED;
2937 		}
2938 	}
2939 
2940 	session->foreground_group = processGroupID;
2941 
2942 	return B_OK;
2943 }
2944 
2945 
2946 /*!	Removes the specified team from the global team hash, from its process
2947 	group, and from its parent.
2948 	It also moves all of its children to the kernel team.
2949 
2950 	The caller must hold the following locks:
2951 	- \a team's process group's lock,
2952 	- the kernel team's lock,
2953 	- \a team's parent team's lock (might be the kernel team), and
2954 	- \a team's lock.
2955 */
2956 void
2957 team_remove_team(Team* team, pid_t& _signalGroup)
2958 {
2959 	Team* parent = team->parent;
2960 
2961 	// remember how long this team lasted
2962 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2963 		+ team->dead_children.kernel_time;
2964 	parent->dead_children.user_time += team->dead_threads_user_time
2965 		+ team->dead_children.user_time;
2966 
2967 	// remove the team from the hash table
2968 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2969 	sTeamHash.Remove(team);
2970 	sUsedTeams--;
2971 	teamsLocker.Unlock();
2972 
2973 	// The team can no longer be accessed by ID. Navigation to it is still
2974 	// possible from its process group and its parent and children, but that
2975 	// will be rectified shortly.
2976 	team->state = TEAM_STATE_DEATH;
2977 
2978 	// If we're a controlling process (i.e. a session leader with controlling
2979 	// terminal), there's a bit of signalling we have to do. We can't do any of
2980 	// the signaling here due to the bunch of locks we're holding, but we need
2981 	// to determine, whom to signal.
2982 	_signalGroup = -1;
2983 	bool isSessionLeader = false;
2984 	if (team->session_id == team->id
2985 		&& team->group->Session()->controlling_tty >= 0) {
2986 		isSessionLeader = true;
2987 
2988 		ProcessSession* session = team->group->Session();
2989 
2990 		AutoLocker<ProcessSession> sessionLocker(session);
2991 
2992 		session->controlling_tty = -1;
2993 		_signalGroup = session->foreground_group;
2994 	}
2995 
2996 	// remove us from our process group
2997 	remove_team_from_group(team);
2998 
2999 	// move the team's children to the kernel team
3000 	while (Team* child = team->children) {
3001 		// remove the child from the current team and add it to the kernel team
3002 		TeamLocker childLocker(child);
3003 
3004 		remove_team_from_parent(team, child);
3005 		insert_team_into_parent(sKernelTeam, child);
3006 
3007 		// move job control entries too
3008 		sKernelTeam->stopped_children.entries.MoveFrom(
3009 			&team->stopped_children.entries);
3010 		sKernelTeam->continued_children.entries.MoveFrom(
3011 			&team->continued_children.entries);
3012 
3013 		// If the team was a session leader with controlling terminal,
3014 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3015 		// groups with stopped processes. Due to locking complications we can't
3016 		// do that here, so we only check whether we were a reason for the
3017 		// child's process group not being an orphan and, if so, schedule a
3018 		// later check (cf. orphaned_process_group_check()).
3019 		if (isSessionLeader) {
3020 			ProcessGroup* childGroup = child->group;
3021 			if (childGroup->Session()->id == team->session_id
3022 				&& childGroup->id != team->group_id) {
3023 				childGroup->ScheduleOrphanedCheck();
3024 			}
3025 		}
3026 
3027 		// Note, we don't move the dead children entries. Those will be deleted
3028 		// when the team structure is deleted.
3029 	}
3030 
3031 	// remove us from our parent
3032 	remove_team_from_parent(parent, team);
3033 }
3034 
3035 
3036 /*!	Kills all threads but the main thread of the team and shuts down user
3037 	debugging for it.
3038 	To be called on exit of the team's main thread. No locks must be held.
3039 
3040 	\param team The team in question.
3041 	\return The port of the debugger for the team, -1 if none. To be passed to
3042 		team_delete_team().
3043 */
3044 port_id
3045 team_shutdown_team(Team* team)
3046 {
3047 	ASSERT(thread_get_current_thread() == team->main_thread);
3048 
3049 	TeamLocker teamLocker(team);
3050 
3051 	// Make sure debugging changes won't happen anymore.
3052 	port_id debuggerPort = -1;
3053 	while (true) {
3054 		// If a debugger change is in progress for the team, we'll have to
3055 		// wait until it is done.
3056 		ConditionVariableEntry waitForDebuggerEntry;
3057 		bool waitForDebugger = false;
3058 
3059 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3060 
3061 		if (team->debug_info.debugger_changed_condition != NULL) {
3062 			team->debug_info.debugger_changed_condition->Add(
3063 				&waitForDebuggerEntry);
3064 			waitForDebugger = true;
3065 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3066 			// The team is being debugged. That will stop with the termination
3067 			// of the nub thread. Since we set the team state to death, no one
3068 			// can install a debugger anymore. We fetch the debugger's port to
3069 			// send it a message at the bitter end.
3070 			debuggerPort = team->debug_info.debugger_port;
3071 		}
3072 
3073 		debugInfoLocker.Unlock();
3074 
3075 		if (!waitForDebugger)
3076 			break;
3077 
3078 		// wait for the debugger change to be finished
3079 		teamLocker.Unlock();
3080 
3081 		waitForDebuggerEntry.Wait();
3082 
3083 		teamLocker.Lock();
3084 	}
3085 
3086 	// Mark the team as shutting down. That will prevent new threads from being
3087 	// created and debugger changes from taking place.
3088 	team->state = TEAM_STATE_SHUTDOWN;
3089 
3090 	// delete all timers
3091 	team->DeleteUserTimers(false);
3092 
3093 	// deactivate CPU time user timers for the team
3094 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3095 
3096 	if (team->HasActiveCPUTimeUserTimers())
3097 		team->DeactivateCPUTimeUserTimers();
3098 
3099 	schedulerLocker.Unlock();
3100 
3101 	// kill all threads but the main thread
3102 	team_death_entry deathEntry;
3103 	deathEntry.condition.Init(team, "team death");
3104 
3105 	while (true) {
3106 		team->death_entry = &deathEntry;
3107 		deathEntry.remaining_threads = 0;
3108 
3109 		Thread* thread = team->thread_list;
3110 		while (thread != NULL) {
3111 			if (thread != team->main_thread) {
3112 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3113 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3114 				deathEntry.remaining_threads++;
3115 			}
3116 
3117 			thread = thread->team_next;
3118 		}
3119 
3120 		if (deathEntry.remaining_threads == 0)
3121 			break;
3122 
3123 		// there are threads to wait for
3124 		ConditionVariableEntry entry;
3125 		deathEntry.condition.Add(&entry);
3126 
3127 		teamLocker.Unlock();
3128 
3129 		entry.Wait();
3130 
3131 		teamLocker.Lock();
3132 	}
3133 
3134 	team->death_entry = NULL;
3135 
3136 	return debuggerPort;
3137 }
3138 
3139 
3140 /*!	Called on team exit to notify threads waiting on the team and free most
3141 	resources associated with it.
3142 	The caller shouldn't hold any locks.
3143 */
3144 void
3145 team_delete_team(Team* team, port_id debuggerPort)
3146 {
3147 	// Not quite in our job description, but work that has been left by
3148 	// team_remove_team() and that can be done now that we're not holding any
3149 	// locks.
3150 	orphaned_process_group_check();
3151 
3152 	team_id teamID = team->id;
3153 
3154 	ASSERT(team->num_threads == 0);
3155 
3156 	// If someone is waiting for this team to be loaded, but it dies
3157 	// unexpectedly before being done, we need to notify the waiting
3158 	// thread now.
3159 
3160 	TeamLocker teamLocker(team);
3161 
3162 	if (team->loading_info) {
3163 		// there's indeed someone waiting
3164 		struct team_loading_info* loadingInfo = team->loading_info;
3165 		team->loading_info = NULL;
3166 
3167 		loadingInfo->result = B_ERROR;
3168 		loadingInfo->done = true;
3169 
3170 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3171 
3172 		// wake up the waiting thread
3173 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3174 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3175 	}
3176 
3177 	// notify team watchers
3178 
3179 	{
3180 		// we're not reachable from anyone anymore at this point, so we
3181 		// can safely access the list without any locking
3182 		struct team_watcher* watcher;
3183 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3184 				&team->watcher_list)) != NULL) {
3185 			watcher->hook(teamID, watcher->data);
3186 			free(watcher);
3187 		}
3188 	}
3189 
3190 	teamLocker.Unlock();
3191 
3192 	sNotificationService.Notify(TEAM_REMOVED, team);
3193 
3194 	// free team resources
3195 
3196 	delete_realtime_sem_context(team->realtime_sem_context);
3197 	xsi_sem_undo(team);
3198 	remove_images(team);
3199 	team->address_space->RemoveAndPut();
3200 
3201 	team->ReleaseReference();
3202 
3203 	// notify the debugger, that the team is gone
3204 	user_debug_team_deleted(teamID, debuggerPort);
3205 }
3206 
3207 
3208 Team*
3209 team_get_kernel_team(void)
3210 {
3211 	return sKernelTeam;
3212 }
3213 
3214 
3215 team_id
3216 team_get_kernel_team_id(void)
3217 {
3218 	if (!sKernelTeam)
3219 		return 0;
3220 
3221 	return sKernelTeam->id;
3222 }
3223 
3224 
3225 team_id
3226 team_get_current_team_id(void)
3227 {
3228 	return thread_get_current_thread()->team->id;
3229 }
3230 
3231 
3232 status_t
3233 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3234 {
3235 	if (id == sKernelTeam->id) {
3236 		// we're the kernel team, so we don't have to go through all
3237 		// the hassle (locking and hash lookup)
3238 		*_addressSpace = VMAddressSpace::GetKernel();
3239 		return B_OK;
3240 	}
3241 
3242 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3243 
3244 	Team* team = team_get_team_struct_locked(id);
3245 	if (team == NULL)
3246 		return B_BAD_VALUE;
3247 
3248 	team->address_space->Get();
3249 	*_addressSpace = team->address_space;
3250 	return B_OK;
3251 }
3252 
3253 
3254 /*!	Sets the team's job control state.
3255 	The caller must hold the parent team's lock. Interrupts are allowed to be
3256 	enabled or disabled. In the latter case the scheduler lock may be held as
3257 	well.
3258 	\a team The team whose job control state shall be set.
3259 	\a newState The new state to be set.
3260 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3261 		the caller is responsible for filling in the following fields of the
3262 		entry before releasing the parent team's lock, unless the new state is
3263 		\c JOB_CONTROL_STATE_NONE:
3264 		- \c signal: The number of the signal causing the state change.
3265 		- \c signaling_user: The real UID of the user sending the signal.
3266 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3267 */
3268 void
3269 team_set_job_control_state(Team* team, job_control_state newState,
3270 	Signal* signal, bool schedulerLocked)
3271 {
3272 	if (team == NULL || team->job_control_entry == NULL)
3273 		return;
3274 
3275 	// don't touch anything, if the state stays the same or the team is already
3276 	// dead
3277 	job_control_entry* entry = team->job_control_entry;
3278 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3279 		return;
3280 
3281 	T(SetJobControlState(team->id, newState, signal));
3282 
3283 	// remove from the old list
3284 	switch (entry->state) {
3285 		case JOB_CONTROL_STATE_NONE:
3286 			// entry is in no list ATM
3287 			break;
3288 		case JOB_CONTROL_STATE_DEAD:
3289 			// can't get here
3290 			break;
3291 		case JOB_CONTROL_STATE_STOPPED:
3292 			team->parent->stopped_children.entries.Remove(entry);
3293 			break;
3294 		case JOB_CONTROL_STATE_CONTINUED:
3295 			team->parent->continued_children.entries.Remove(entry);
3296 			break;
3297 	}
3298 
3299 	entry->state = newState;
3300 
3301 	if (signal != NULL) {
3302 		entry->signal = signal->Number();
3303 		entry->signaling_user = signal->SendingUser();
3304 	}
3305 
3306 	// add to new list
3307 	team_job_control_children* childList = NULL;
3308 	switch (entry->state) {
3309 		case JOB_CONTROL_STATE_NONE:
3310 			// entry doesn't get into any list
3311 			break;
3312 		case JOB_CONTROL_STATE_DEAD:
3313 			childList = &team->parent->dead_children;
3314 			team->parent->dead_children.count++;
3315 			break;
3316 		case JOB_CONTROL_STATE_STOPPED:
3317 			childList = &team->parent->stopped_children;
3318 			break;
3319 		case JOB_CONTROL_STATE_CONTINUED:
3320 			childList = &team->parent->continued_children;
3321 			break;
3322 	}
3323 
3324 	if (childList != NULL) {
3325 		childList->entries.Add(entry);
3326 		team->parent->dead_children.condition_variable.NotifyAll(
3327 			schedulerLocked);
3328 	}
3329 }
3330 
3331 
3332 /*!	Inits the given team's exit information, if not yet initialized, to some
3333 	generic "killed" status.
3334 	The caller must not hold the team's lock. Interrupts must be enabled.
3335 
3336 	\param team The team whose exit info shall be initialized.
3337 */
3338 void
3339 team_init_exit_info_on_error(Team* team)
3340 {
3341 	TeamLocker teamLocker(team);
3342 
3343 	if (!team->exit.initialized) {
3344 		team->exit.reason = CLD_KILLED;
3345 		team->exit.signal = SIGKILL;
3346 		team->exit.signaling_user = geteuid();
3347 		team->exit.status = 0;
3348 		team->exit.initialized = true;
3349 	}
3350 }
3351 
3352 
3353 /*! Adds a hook to the team that is called as soon as this team goes away.
3354 	This call might get public in the future.
3355 */
3356 status_t
3357 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3358 {
3359 	if (hook == NULL || teamID < B_OK)
3360 		return B_BAD_VALUE;
3361 
3362 	// create the watcher object
3363 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3364 	if (watcher == NULL)
3365 		return B_NO_MEMORY;
3366 
3367 	watcher->hook = hook;
3368 	watcher->data = data;
3369 
3370 	// add watcher, if the team isn't already dying
3371 	// get the team
3372 	Team* team = Team::GetAndLock(teamID);
3373 	if (team == NULL) {
3374 		free(watcher);
3375 		return B_BAD_TEAM_ID;
3376 	}
3377 
3378 	list_add_item(&team->watcher_list, watcher);
3379 
3380 	team->UnlockAndReleaseReference();
3381 
3382 	return B_OK;
3383 }
3384 
3385 
3386 status_t
3387 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3388 {
3389 	if (hook == NULL || teamID < 0)
3390 		return B_BAD_VALUE;
3391 
3392 	// get team and remove watcher (if present)
3393 	Team* team = Team::GetAndLock(teamID);
3394 	if (team == NULL)
3395 		return B_BAD_TEAM_ID;
3396 
3397 	// search for watcher
3398 	team_watcher* watcher = NULL;
3399 	while ((watcher = (team_watcher*)list_get_next_item(
3400 			&team->watcher_list, watcher)) != NULL) {
3401 		if (watcher->hook == hook && watcher->data == data) {
3402 			// got it!
3403 			list_remove_item(&team->watcher_list, watcher);
3404 			break;
3405 		}
3406 	}
3407 
3408 	team->UnlockAndReleaseReference();
3409 
3410 	if (watcher == NULL)
3411 		return B_ENTRY_NOT_FOUND;
3412 
3413 	free(watcher);
3414 	return B_OK;
3415 }
3416 
3417 
3418 /*!	Allocates a user_thread structure from the team.
3419 	The team lock must be held, unless the function is called for the team's
3420 	main thread. Interrupts must be enabled.
3421 */
3422 struct user_thread*
3423 team_allocate_user_thread(Team* team)
3424 {
3425 	if (team->user_data == 0)
3426 		return NULL;
3427 
3428 	// take an entry from the free list, if any
3429 	if (struct free_user_thread* entry = team->free_user_threads) {
3430 		user_thread* thread = entry->thread;
3431 		team->free_user_threads = entry->next;
3432 		free(entry);
3433 		return thread;
3434 	}
3435 
3436 	while (true) {
3437 		// enough space left?
3438 		size_t needed = ROUNDUP(sizeof(user_thread), 128);
3439 		if (team->user_data_size - team->used_user_data < needed) {
3440 			// try to resize the area
3441 			if (resize_area(team->user_data_area,
3442 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3443 				return NULL;
3444 			}
3445 
3446 			// resized user area successfully -- try to allocate the user_thread
3447 			// again
3448 			team->user_data_size += B_PAGE_SIZE;
3449 			continue;
3450 		}
3451 
3452 		// allocate the user_thread
3453 		user_thread* thread
3454 			= (user_thread*)(team->user_data + team->used_user_data);
3455 		team->used_user_data += needed;
3456 
3457 		return thread;
3458 	}
3459 }
3460 
3461 
3462 /*!	Frees the given user_thread structure.
3463 	The team's lock must not be held. Interrupts must be enabled.
3464 	\param team The team the user thread was allocated from.
3465 	\param userThread The user thread to free.
3466 */
3467 void
3468 team_free_user_thread(Team* team, struct user_thread* userThread)
3469 {
3470 	if (userThread == NULL)
3471 		return;
3472 
3473 	// create a free list entry
3474 	free_user_thread* entry
3475 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3476 	if (entry == NULL) {
3477 		// we have to leak the user thread :-/
3478 		return;
3479 	}
3480 
3481 	// add to free list
3482 	TeamLocker teamLocker(team);
3483 
3484 	entry->thread = userThread;
3485 	entry->next = team->free_user_threads;
3486 	team->free_user_threads = entry;
3487 }
3488 
3489 
3490 //	#pragma mark - Associated data interface
3491 
3492 
3493 AssociatedData::AssociatedData()
3494 	:
3495 	fOwner(NULL)
3496 {
3497 }
3498 
3499 
3500 AssociatedData::~AssociatedData()
3501 {
3502 }
3503 
3504 
3505 void
3506 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3507 {
3508 }
3509 
3510 
3511 AssociatedDataOwner::AssociatedDataOwner()
3512 {
3513 	mutex_init(&fLock, "associated data owner");
3514 }
3515 
3516 
3517 AssociatedDataOwner::~AssociatedDataOwner()
3518 {
3519 	mutex_destroy(&fLock);
3520 }
3521 
3522 
3523 bool
3524 AssociatedDataOwner::AddData(AssociatedData* data)
3525 {
3526 	MutexLocker locker(fLock);
3527 
3528 	if (data->Owner() != NULL)
3529 		return false;
3530 
3531 	data->AcquireReference();
3532 	fList.Add(data);
3533 	data->SetOwner(this);
3534 
3535 	return true;
3536 }
3537 
3538 
3539 bool
3540 AssociatedDataOwner::RemoveData(AssociatedData* data)
3541 {
3542 	MutexLocker locker(fLock);
3543 
3544 	if (data->Owner() != this)
3545 		return false;
3546 
3547 	data->SetOwner(NULL);
3548 	fList.Remove(data);
3549 
3550 	locker.Unlock();
3551 
3552 	data->ReleaseReference();
3553 
3554 	return true;
3555 }
3556 
3557 
3558 void
3559 AssociatedDataOwner::PrepareForDeletion()
3560 {
3561 	MutexLocker locker(fLock);
3562 
3563 	// move all data to a temporary list and unset the owner
3564 	DataList list;
3565 	list.MoveFrom(&fList);
3566 
3567 	for (DataList::Iterator it = list.GetIterator();
3568 		AssociatedData* data = it.Next();) {
3569 		data->SetOwner(NULL);
3570 	}
3571 
3572 	locker.Unlock();
3573 
3574 	// call the notification hooks and release our references
3575 	while (AssociatedData* data = list.RemoveHead()) {
3576 		data->OwnerDeleted(this);
3577 		data->ReleaseReference();
3578 	}
3579 }
3580 
3581 
3582 /*!	Associates data with the current team.
3583 	When the team is deleted, the data object is notified.
3584 	The team acquires a reference to the object.
3585 
3586 	\param data The data object.
3587 	\return \c true on success, \c false otherwise. Fails only when the supplied
3588 		data object is already associated with another owner.
3589 */
3590 bool
3591 team_associate_data(AssociatedData* data)
3592 {
3593 	return thread_get_current_thread()->team->AddData(data);
3594 }
3595 
3596 
3597 /*!	Dissociates data from the current team.
3598 	Balances an earlier call to team_associate_data().
3599 
3600 	\param data The data object.
3601 	\return \c true on success, \c false otherwise. Fails only when the data
3602 		object is not associated with the current team.
3603 */
3604 bool
3605 team_dissociate_data(AssociatedData* data)
3606 {
3607 	return thread_get_current_thread()->team->RemoveData(data);
3608 }
3609 
3610 
3611 //	#pragma mark - Public kernel API
3612 
3613 
3614 thread_id
3615 load_image(int32 argCount, const char** args, const char** env)
3616 {
3617 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3618 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3619 }
3620 
3621 
3622 thread_id
3623 load_image_etc(int32 argCount, const char* const* args,
3624 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3625 {
3626 	// we need to flatten the args and environment
3627 
3628 	if (args == NULL)
3629 		return B_BAD_VALUE;
3630 
3631 	// determine total needed size
3632 	int32 argSize = 0;
3633 	for (int32 i = 0; i < argCount; i++)
3634 		argSize += strlen(args[i]) + 1;
3635 
3636 	int32 envCount = 0;
3637 	int32 envSize = 0;
3638 	while (env != NULL && env[envCount] != NULL)
3639 		envSize += strlen(env[envCount++]) + 1;
3640 
3641 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3642 	if (size > MAX_PROCESS_ARGS_SIZE)
3643 		return B_TOO_MANY_ARGS;
3644 
3645 	// allocate space
3646 	char** flatArgs = (char**)malloc(size);
3647 	if (flatArgs == NULL)
3648 		return B_NO_MEMORY;
3649 
3650 	char** slot = flatArgs;
3651 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3652 
3653 	// copy arguments and environment
3654 	for (int32 i = 0; i < argCount; i++) {
3655 		int32 argSize = strlen(args[i]) + 1;
3656 		memcpy(stringSpace, args[i], argSize);
3657 		*slot++ = stringSpace;
3658 		stringSpace += argSize;
3659 	}
3660 
3661 	*slot++ = NULL;
3662 
3663 	for (int32 i = 0; i < envCount; i++) {
3664 		int32 envSize = strlen(env[i]) + 1;
3665 		memcpy(stringSpace, env[i], envSize);
3666 		*slot++ = stringSpace;
3667 		stringSpace += envSize;
3668 	}
3669 
3670 	*slot++ = NULL;
3671 
3672 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3673 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3674 
3675 	free(flatArgs);
3676 		// load_image_internal() unset our variable if it took over ownership
3677 
3678 	return thread;
3679 }
3680 
3681 
3682 status_t
3683 wait_for_team(team_id id, status_t* _returnCode)
3684 {
3685 	// check whether the team exists
3686 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3687 
3688 	Team* team = team_get_team_struct_locked(id);
3689 	if (team == NULL)
3690 		return B_BAD_TEAM_ID;
3691 
3692 	id = team->id;
3693 
3694 	teamsLocker.Unlock();
3695 
3696 	// wait for the main thread (it has the same ID as the team)
3697 	return wait_for_thread(id, _returnCode);
3698 }
3699 
3700 
3701 status_t
3702 kill_team(team_id id)
3703 {
3704 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3705 
3706 	Team* team = team_get_team_struct_locked(id);
3707 	if (team == NULL)
3708 		return B_BAD_TEAM_ID;
3709 
3710 	id = team->id;
3711 
3712 	teamsLocker.Unlock();
3713 
3714 	if (team == sKernelTeam)
3715 		return B_NOT_ALLOWED;
3716 
3717 	// Just kill the team's main thread (it has same ID as the team). The
3718 	// cleanup code there will take care of the team.
3719 	return kill_thread(id);
3720 }
3721 
3722 
3723 status_t
3724 _get_team_info(team_id id, team_info* info, size_t size)
3725 {
3726 	// get the team
3727 	Team* team = Team::Get(id);
3728 	if (team == NULL)
3729 		return B_BAD_TEAM_ID;
3730 	BReference<Team> teamReference(team, true);
3731 
3732 	// fill in the info
3733 	return fill_team_info(team, info, size);
3734 }
3735 
3736 
3737 status_t
3738 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3739 {
3740 	int32 slot = *cookie;
3741 	if (slot < 1)
3742 		slot = 1;
3743 
3744 	InterruptsSpinLocker locker(sTeamHashLock);
3745 
3746 	team_id lastTeamID = peek_next_thread_id();
3747 		// TODO: This is broken, since the id can wrap around!
3748 
3749 	// get next valid team
3750 	Team* team = NULL;
3751 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3752 		slot++;
3753 
3754 	if (team == NULL)
3755 		return B_BAD_TEAM_ID;
3756 
3757 	// get a reference to the team and unlock
3758 	BReference<Team> teamReference(team);
3759 	locker.Unlock();
3760 
3761 	// fill in the info
3762 	*cookie = ++slot;
3763 	return fill_team_info(team, info, size);
3764 }
3765 
3766 
3767 status_t
3768 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3769 {
3770 	if (size != sizeof(team_usage_info))
3771 		return B_BAD_VALUE;
3772 
3773 	return common_get_team_usage_info(id, who, info, 0);
3774 }
3775 
3776 
3777 pid_t
3778 getpid(void)
3779 {
3780 	return thread_get_current_thread()->team->id;
3781 }
3782 
3783 
3784 pid_t
3785 getppid(void)
3786 {
3787 	Team* team = thread_get_current_thread()->team;
3788 
3789 	TeamLocker teamLocker(team);
3790 
3791 	return team->parent->id;
3792 }
3793 
3794 
3795 pid_t
3796 getpgid(pid_t id)
3797 {
3798 	if (id < 0) {
3799 		errno = EINVAL;
3800 		return -1;
3801 	}
3802 
3803 	if (id == 0) {
3804 		// get process group of the calling process
3805 		Team* team = thread_get_current_thread()->team;
3806 		TeamLocker teamLocker(team);
3807 		return team->group_id;
3808 	}
3809 
3810 	// get the team
3811 	Team* team = Team::GetAndLock(id);
3812 	if (team == NULL) {
3813 		errno = ESRCH;
3814 		return -1;
3815 	}
3816 
3817 	// get the team's process group ID
3818 	pid_t groupID = team->group_id;
3819 
3820 	team->UnlockAndReleaseReference();
3821 
3822 	return groupID;
3823 }
3824 
3825 
3826 pid_t
3827 getsid(pid_t id)
3828 {
3829 	if (id < 0) {
3830 		errno = EINVAL;
3831 		return -1;
3832 	}
3833 
3834 	if (id == 0) {
3835 		// get session of the calling process
3836 		Team* team = thread_get_current_thread()->team;
3837 		TeamLocker teamLocker(team);
3838 		return team->session_id;
3839 	}
3840 
3841 	// get the team
3842 	Team* team = Team::GetAndLock(id);
3843 	if (team == NULL) {
3844 		errno = ESRCH;
3845 		return -1;
3846 	}
3847 
3848 	// get the team's session ID
3849 	pid_t sessionID = team->session_id;
3850 
3851 	team->UnlockAndReleaseReference();
3852 
3853 	return sessionID;
3854 }
3855 
3856 
3857 //	#pragma mark - User syscalls
3858 
3859 
3860 status_t
3861 _user_exec(const char* userPath, const char* const* userFlatArgs,
3862 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3863 {
3864 	// NOTE: Since this function normally doesn't return, don't use automatic
3865 	// variables that need destruction in the function scope.
3866 	char path[B_PATH_NAME_LENGTH];
3867 
3868 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3869 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3870 		return B_BAD_ADDRESS;
3871 
3872 	// copy and relocate the flat arguments
3873 	char** flatArgs;
3874 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3875 		argCount, envCount, flatArgs);
3876 
3877 	if (error == B_OK) {
3878 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3879 			envCount, umask);
3880 			// this one only returns in case of error
3881 	}
3882 
3883 	free(flatArgs);
3884 	return error;
3885 }
3886 
3887 
3888 thread_id
3889 _user_fork(void)
3890 {
3891 	return fork_team();
3892 }
3893 
3894 
3895 pid_t
3896 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3897 {
3898 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3899 		return B_BAD_ADDRESS;
3900 
3901 	siginfo_t info;
3902 	pid_t foundChild = wait_for_child(child, flags, info);
3903 	if (foundChild < 0)
3904 		return syscall_restart_handle_post(foundChild);
3905 
3906 	// copy info back to userland
3907 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3908 		return B_BAD_ADDRESS;
3909 
3910 	return foundChild;
3911 }
3912 
3913 
3914 pid_t
3915 _user_process_info(pid_t process, int32 which)
3916 {
3917 	// we only allow to return the parent of the current process
3918 	if (which == PARENT_ID
3919 		&& process != 0 && process != thread_get_current_thread()->team->id)
3920 		return B_BAD_VALUE;
3921 
3922 	pid_t result;
3923 	switch (which) {
3924 		case SESSION_ID:
3925 			result = getsid(process);
3926 			break;
3927 		case GROUP_ID:
3928 			result = getpgid(process);
3929 			break;
3930 		case PARENT_ID:
3931 			result = getppid();
3932 			break;
3933 		default:
3934 			return B_BAD_VALUE;
3935 	}
3936 
3937 	return result >= 0 ? result : errno;
3938 }
3939 
3940 
3941 pid_t
3942 _user_setpgid(pid_t processID, pid_t groupID)
3943 {
3944 	// setpgid() can be called either by the parent of the target process or
3945 	// by the process itself to do one of two things:
3946 	// * Create a new process group with the target process' ID and the target
3947 	//   process as group leader.
3948 	// * Set the target process' process group to an already existing one in the
3949 	//   same session.
3950 
3951 	if (groupID < 0)
3952 		return B_BAD_VALUE;
3953 
3954 	Team* currentTeam = thread_get_current_thread()->team;
3955 	if (processID == 0)
3956 		processID = currentTeam->id;
3957 
3958 	// if the group ID is not specified, use the target process' ID
3959 	if (groupID == 0)
3960 		groupID = processID;
3961 
3962 	// We loop when running into the following race condition: We create a new
3963 	// process group, because there isn't one with that ID yet, but later when
3964 	// trying to publish it, we find that someone else created and published
3965 	// a group with that ID in the meantime. In that case we just restart the
3966 	// whole action.
3967 	while (true) {
3968 		// Look up the process group by ID. If it doesn't exist yet and we are
3969 		// allowed to create a new one, do that.
3970 		ProcessGroup* group = ProcessGroup::Get(groupID);
3971 		bool newGroup = false;
3972 		if (group == NULL) {
3973 			if (groupID != processID)
3974 				return B_NOT_ALLOWED;
3975 
3976 			group = new(std::nothrow) ProcessGroup(groupID);
3977 			if (group == NULL)
3978 				return B_NO_MEMORY;
3979 
3980 			newGroup = true;
3981 		}
3982 		BReference<ProcessGroup> groupReference(group, true);
3983 
3984 		// get the target team
3985 		Team* team = Team::Get(processID);
3986 		if (team == NULL)
3987 			return ESRCH;
3988 		BReference<Team> teamReference(team, true);
3989 
3990 		// lock the new process group and the team's current process group
3991 		while (true) {
3992 			// lock the team's current process group
3993 			team->LockProcessGroup();
3994 
3995 			ProcessGroup* oldGroup = team->group;
3996 			if (oldGroup == group) {
3997 				// it's the same as the target group, so just bail out
3998 				oldGroup->Unlock();
3999 				return group->id;
4000 			}
4001 
4002 			oldGroup->AcquireReference();
4003 
4004 			// lock the target process group, if locking order allows it
4005 			if (newGroup || group->id > oldGroup->id) {
4006 				group->Lock();
4007 				break;
4008 			}
4009 
4010 			// try to lock
4011 			if (group->TryLock())
4012 				break;
4013 
4014 			// no dice -- unlock the team's current process group and relock in
4015 			// the correct order
4016 			oldGroup->Unlock();
4017 
4018 			group->Lock();
4019 			oldGroup->Lock();
4020 
4021 			// check whether things are still the same
4022 			TeamLocker teamLocker(team);
4023 			if (team->group == oldGroup)
4024 				break;
4025 
4026 			// something changed -- unlock everything and retry
4027 			teamLocker.Unlock();
4028 			oldGroup->Unlock();
4029 			group->Unlock();
4030 			oldGroup->ReleaseReference();
4031 		}
4032 
4033 		// we now have references and locks of both new and old process group
4034 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4035 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4036 		AutoLocker<ProcessGroup> groupLocker(group, true);
4037 
4038 		// also lock the target team and its parent
4039 		team->LockTeamAndParent(false);
4040 		TeamLocker parentLocker(team->parent, true);
4041 		TeamLocker teamLocker(team, true);
4042 
4043 		// perform the checks
4044 		if (team == currentTeam) {
4045 			// we set our own group
4046 
4047 			// we must not change our process group ID if we're a session leader
4048 			if (is_session_leader(currentTeam))
4049 				return B_NOT_ALLOWED;
4050 		} else {
4051 			// Calling team != target team. The target team must be a child of
4052 			// the calling team and in the same session. (If that's the case it
4053 			// isn't a session leader either.)
4054 			if (team->parent != currentTeam
4055 				|| team->session_id != currentTeam->session_id) {
4056 				return B_NOT_ALLOWED;
4057 			}
4058 
4059 			// The call is also supposed to fail on a child, when the child has
4060 			// already executed exec*() [EACCES].
4061 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4062 				return EACCES;
4063 		}
4064 
4065 		// If we created a new process group, publish it now.
4066 		if (newGroup) {
4067 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4068 			if (sGroupHash.Lookup(groupID)) {
4069 				// A group with the group ID appeared since we first checked.
4070 				// Back to square one.
4071 				continue;
4072 			}
4073 
4074 			group->PublishLocked(team->group->Session());
4075 		} else if (group->Session()->id != team->session_id) {
4076 			// The existing target process group belongs to a different session.
4077 			// That's not allowed.
4078 			return B_NOT_ALLOWED;
4079 		}
4080 
4081 		// Everything is ready -- set the group.
4082 		remove_team_from_group(team);
4083 		insert_team_into_group(group, team);
4084 
4085 		// Changing the process group might have changed the situation for a
4086 		// parent waiting in wait_for_child(). Hence we notify it.
4087 		team->parent->dead_children.condition_variable.NotifyAll(false);
4088 
4089 		return group->id;
4090 	}
4091 }
4092 
4093 
4094 pid_t
4095 _user_setsid(void)
4096 {
4097 	Team* team = thread_get_current_thread()->team;
4098 
4099 	// create a new process group and session
4100 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4101 	if (group == NULL)
4102 		return B_NO_MEMORY;
4103 	BReference<ProcessGroup> groupReference(group, true);
4104 	AutoLocker<ProcessGroup> groupLocker(group);
4105 
4106 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4107 	if (session == NULL)
4108 		return B_NO_MEMORY;
4109 	BReference<ProcessSession> sessionReference(session, true);
4110 
4111 	// lock the team's current process group, parent, and the team itself
4112 	team->LockTeamParentAndProcessGroup();
4113 	BReference<ProcessGroup> oldGroupReference(team->group);
4114 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4115 	TeamLocker parentLocker(team->parent, true);
4116 	TeamLocker teamLocker(team, true);
4117 
4118 	// the team must not already be a process group leader
4119 	if (is_process_group_leader(team))
4120 		return B_NOT_ALLOWED;
4121 
4122 	// remove the team from the old and add it to the new process group
4123 	remove_team_from_group(team);
4124 	group->Publish(session);
4125 	insert_team_into_group(group, team);
4126 
4127 	// Changing the process group might have changed the situation for a
4128 	// parent waiting in wait_for_child(). Hence we notify it.
4129 	team->parent->dead_children.condition_variable.NotifyAll(false);
4130 
4131 	return group->id;
4132 }
4133 
4134 
4135 status_t
4136 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4137 {
4138 	status_t returnCode;
4139 	status_t status;
4140 
4141 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4142 		return B_BAD_ADDRESS;
4143 
4144 	status = wait_for_team(id, &returnCode);
4145 	if (status >= B_OK && _userReturnCode != NULL) {
4146 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4147 				!= B_OK)
4148 			return B_BAD_ADDRESS;
4149 		return B_OK;
4150 	}
4151 
4152 	return syscall_restart_handle_post(status);
4153 }
4154 
4155 
4156 thread_id
4157 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4158 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4159 	port_id errorPort, uint32 errorToken)
4160 {
4161 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4162 
4163 	if (argCount < 1)
4164 		return B_BAD_VALUE;
4165 
4166 	// copy and relocate the flat arguments
4167 	char** flatArgs;
4168 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4169 		argCount, envCount, flatArgs);
4170 	if (error != B_OK)
4171 		return error;
4172 
4173 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4174 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4175 		errorToken);
4176 
4177 	free(flatArgs);
4178 		// load_image_internal() unset our variable if it took over ownership
4179 
4180 	return thread;
4181 }
4182 
4183 
4184 void
4185 _user_exit_team(status_t returnValue)
4186 {
4187 	Thread* thread = thread_get_current_thread();
4188 	Team* team = thread->team;
4189 
4190 	// set this thread's exit status
4191 	thread->exit.status = returnValue;
4192 
4193 	// set the team exit status
4194 	TeamLocker teamLocker(team);
4195 
4196 	if (!team->exit.initialized) {
4197 		team->exit.reason = CLD_EXITED;
4198 		team->exit.signal = 0;
4199 		team->exit.signaling_user = 0;
4200 		team->exit.status = returnValue;
4201 		team->exit.initialized = true;
4202 	}
4203 
4204 	teamLocker.Unlock();
4205 
4206 	// Stop the thread, if the team is being debugged and that has been
4207 	// requested.
4208 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4209 		user_debug_stop_thread();
4210 
4211 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4212 	// userland. The signal handling code forwards the signal to the main
4213 	// thread (if that's not already this one), which will take the team down.
4214 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4215 	send_signal_to_thread(thread, signal, 0);
4216 }
4217 
4218 
4219 status_t
4220 _user_kill_team(team_id team)
4221 {
4222 	return kill_team(team);
4223 }
4224 
4225 
4226 status_t
4227 _user_get_team_info(team_id id, team_info* userInfo)
4228 {
4229 	status_t status;
4230 	team_info info;
4231 
4232 	if (!IS_USER_ADDRESS(userInfo))
4233 		return B_BAD_ADDRESS;
4234 
4235 	status = _get_team_info(id, &info, sizeof(team_info));
4236 	if (status == B_OK) {
4237 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4238 			return B_BAD_ADDRESS;
4239 	}
4240 
4241 	return status;
4242 }
4243 
4244 
4245 status_t
4246 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4247 {
4248 	status_t status;
4249 	team_info info;
4250 	int32 cookie;
4251 
4252 	if (!IS_USER_ADDRESS(userCookie)
4253 		|| !IS_USER_ADDRESS(userInfo)
4254 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4255 		return B_BAD_ADDRESS;
4256 
4257 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4258 	if (status != B_OK)
4259 		return status;
4260 
4261 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4262 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4263 		return B_BAD_ADDRESS;
4264 
4265 	return status;
4266 }
4267 
4268 
4269 team_id
4270 _user_get_current_team(void)
4271 {
4272 	return team_get_current_team_id();
4273 }
4274 
4275 
4276 status_t
4277 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4278 	size_t size)
4279 {
4280 	if (size != sizeof(team_usage_info))
4281 		return B_BAD_VALUE;
4282 
4283 	team_usage_info info;
4284 	status_t status = common_get_team_usage_info(team, who, &info,
4285 		B_CHECK_PERMISSION);
4286 
4287 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4288 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4289 		return B_BAD_ADDRESS;
4290 	}
4291 
4292 	return status;
4293 }
4294 
4295 
4296 status_t
4297 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4298 	size_t size, size_t* _sizeNeeded)
4299 {
4300 	// check parameters
4301 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4302 		|| (buffer == NULL && size > 0)
4303 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4304 		return B_BAD_ADDRESS;
4305 	}
4306 
4307 	KMessage info;
4308 
4309 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4310 		// allocate memory for a copy of the needed team data
4311 		struct ExtendedTeamData {
4312 			team_id	id;
4313 			pid_t	group_id;
4314 			pid_t	session_id;
4315 			uid_t	real_uid;
4316 			gid_t	real_gid;
4317 			uid_t	effective_uid;
4318 			gid_t	effective_gid;
4319 			char	name[B_OS_NAME_LENGTH];
4320 		};
4321 
4322 		ExtendedTeamData* teamClone
4323 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4324 			// It would be nicer to use new, but then we'd have to use
4325 			// ObjectDeleter and declare the structure outside of the function
4326 			// due to template parameter restrictions.
4327 		if (teamClone == NULL)
4328 			return B_NO_MEMORY;
4329 		MemoryDeleter teamCloneDeleter(teamClone);
4330 
4331 		io_context* ioContext;
4332 		{
4333 			// get the team structure
4334 			Team* team = Team::GetAndLock(teamID);
4335 			if (team == NULL)
4336 				return B_BAD_TEAM_ID;
4337 			BReference<Team> teamReference(team, true);
4338 			TeamLocker teamLocker(team, true);
4339 
4340 			// copy the data
4341 			teamClone->id = team->id;
4342 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4343 			teamClone->group_id = team->group_id;
4344 			teamClone->session_id = team->session_id;
4345 			teamClone->real_uid = team->real_uid;
4346 			teamClone->real_gid = team->real_gid;
4347 			teamClone->effective_uid = team->effective_uid;
4348 			teamClone->effective_gid = team->effective_gid;
4349 
4350 			// also fetch a reference to the I/O context
4351 			ioContext = team->io_context;
4352 			vfs_get_io_context(ioContext);
4353 		}
4354 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4355 			&vfs_put_io_context);
4356 
4357 		// add the basic data to the info message
4358 		if (info.AddInt32("id", teamClone->id) != B_OK
4359 			|| info.AddString("name", teamClone->name) != B_OK
4360 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4361 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4362 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4363 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4364 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4365 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4366 			return B_NO_MEMORY;
4367 		}
4368 
4369 		// get the current working directory from the I/O context
4370 		dev_t cwdDevice;
4371 		ino_t cwdDirectory;
4372 		{
4373 			MutexLocker ioContextLocker(ioContext->io_mutex);
4374 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4375 		}
4376 
4377 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4378 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4379 			return B_NO_MEMORY;
4380 		}
4381 	}
4382 
4383 	// TODO: Support the other flags!
4384 
4385 	// copy the needed size and, if it fits, the message back to userland
4386 	size_t sizeNeeded = info.ContentSize();
4387 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4388 		return B_BAD_ADDRESS;
4389 
4390 	if (sizeNeeded > size)
4391 		return B_BUFFER_OVERFLOW;
4392 
4393 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4394 		return B_BAD_ADDRESS;
4395 
4396 	return B_OK;
4397 }
4398