xref: /haiku/src/system/kernel/team.cpp (revision 16c83730262f1e4f0fc69d80744bb36dcfbbe3af)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <commpage.h>
30 #include <boot_device.h>
31 #include <elf.h>
32 #include <file_cache.h>
33 #include <find_directory_private.h>
34 #include <fs/KPath.h>
35 #include <heap.h>
36 #include <int.h>
37 #include <kernel.h>
38 #include <kimage.h>
39 #include <kscheduler.h>
40 #include <ksignal.h>
41 #include <Notifications.h>
42 #include <port.h>
43 #include <posix/realtime_sem.h>
44 #include <posix/xsi_semaphore.h>
45 #include <sem.h>
46 #include <syscall_process_info.h>
47 #include <syscall_restart.h>
48 #include <syscalls.h>
49 #include <tls.h>
50 #include <tracing.h>
51 #include <user_runtime.h>
52 #include <user_thread.h>
53 #include <usergroup.h>
54 #include <vfs.h>
55 #include <vm/vm.h>
56 #include <vm/VMAddressSpace.h>
57 #include <util/AutoLock.h>
58 
59 #include "TeamThreadTables.h"
60 
61 
62 //#define TRACE_TEAM
63 #ifdef TRACE_TEAM
64 #	define TRACE(x) dprintf x
65 #else
66 #	define TRACE(x) ;
67 #endif
68 
69 
70 struct team_key {
71 	team_id id;
72 };
73 
74 struct team_arg {
75 	char	*path;
76 	char	**flat_args;
77 	size_t	flat_args_size;
78 	uint32	arg_count;
79 	uint32	env_count;
80 	mode_t	umask;
81 	port_id	error_port;
82 	uint32	error_token;
83 };
84 
85 
86 namespace {
87 
88 
89 class TeamNotificationService : public DefaultNotificationService {
90 public:
91 							TeamNotificationService();
92 
93 			void			Notify(uint32 eventCode, Team* team);
94 };
95 
96 
97 // #pragma mark - TeamTable
98 
99 
100 typedef BKernel::TeamThreadTable<Team> TeamTable;
101 
102 
103 // #pragma mark - ProcessGroupHashDefinition
104 
105 
106 struct ProcessGroupHashDefinition {
107 	typedef pid_t			KeyType;
108 	typedef	ProcessGroup	ValueType;
109 
110 	size_t HashKey(pid_t key) const
111 	{
112 		return key;
113 	}
114 
115 	size_t Hash(ProcessGroup* value) const
116 	{
117 		return HashKey(value->id);
118 	}
119 
120 	bool Compare(pid_t key, ProcessGroup* value) const
121 	{
122 		return value->id == key;
123 	}
124 
125 	ProcessGroup*& GetLink(ProcessGroup* value) const
126 	{
127 		return value->next;
128 	}
129 };
130 
131 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
132 
133 
134 }	// unnamed namespace
135 
136 
137 // #pragma mark -
138 
139 
140 // the team_id -> Team hash table and the lock protecting it
141 static TeamTable sTeamHash;
142 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
143 
144 // the pid_t -> ProcessGroup hash table and the lock protecting it
145 static ProcessGroupHashTable sGroupHash;
146 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
147 
148 static Team* sKernelTeam = NULL;
149 
150 // A list of process groups of children of dying session leaders that need to
151 // be signalled, if they have become orphaned and contain stopped processes.
152 static ProcessGroupList sOrphanedCheckProcessGroups;
153 static mutex sOrphanedCheckLock
154 	= MUTEX_INITIALIZER("orphaned process group check");
155 
156 // some arbitrarily chosen limits -- should probably depend on the available
157 // memory (the limit is not yet enforced)
158 static int32 sMaxTeams = 2048;
159 static int32 sUsedTeams = 1;
160 
161 static TeamNotificationService sNotificationService;
162 
163 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
164 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
165 
166 
167 // #pragma mark - TeamListIterator
168 
169 
170 TeamListIterator::TeamListIterator()
171 {
172 	// queue the entry
173 	InterruptsSpinLocker locker(sTeamHashLock);
174 	sTeamHash.InsertIteratorEntry(&fEntry);
175 }
176 
177 
178 TeamListIterator::~TeamListIterator()
179 {
180 	// remove the entry
181 	InterruptsSpinLocker locker(sTeamHashLock);
182 	sTeamHash.RemoveIteratorEntry(&fEntry);
183 }
184 
185 
186 Team*
187 TeamListIterator::Next()
188 {
189 	// get the next team -- if there is one, get reference for it
190 	InterruptsSpinLocker locker(sTeamHashLock);
191 	Team* team = sTeamHash.NextElement(&fEntry);
192 	if (team != NULL)
193 		team->AcquireReference();
194 
195 	return team;
196 }
197 
198 
199 // #pragma mark - Tracing
200 
201 
202 #if TEAM_TRACING
203 namespace TeamTracing {
204 
205 class TeamForked : public AbstractTraceEntry {
206 public:
207 	TeamForked(thread_id forkedThread)
208 		:
209 		fForkedThread(forkedThread)
210 	{
211 		Initialized();
212 	}
213 
214 	virtual void AddDump(TraceOutput& out)
215 	{
216 		out.Print("team forked, new thread %ld", fForkedThread);
217 	}
218 
219 private:
220 	thread_id			fForkedThread;
221 };
222 
223 
224 class ExecTeam : public AbstractTraceEntry {
225 public:
226 	ExecTeam(const char* path, int32 argCount, const char* const* args,
227 			int32 envCount, const char* const* env)
228 		:
229 		fArgCount(argCount),
230 		fArgs(NULL)
231 	{
232 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
233 			false);
234 
235 		// determine the buffer size we need for the args
236 		size_t argBufferSize = 0;
237 		for (int32 i = 0; i < argCount; i++)
238 			argBufferSize += strlen(args[i]) + 1;
239 
240 		// allocate a buffer
241 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
242 		if (fArgs) {
243 			char* buffer = fArgs;
244 			for (int32 i = 0; i < argCount; i++) {
245 				size_t argSize = strlen(args[i]) + 1;
246 				memcpy(buffer, args[i], argSize);
247 				buffer += argSize;
248 			}
249 		}
250 
251 		// ignore env for the time being
252 		(void)envCount;
253 		(void)env;
254 
255 		Initialized();
256 	}
257 
258 	virtual void AddDump(TraceOutput& out)
259 	{
260 		out.Print("team exec, \"%p\", args:", fPath);
261 
262 		if (fArgs != NULL) {
263 			char* args = fArgs;
264 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
265 				out.Print(" \"%s\"", args);
266 				args += strlen(args) + 1;
267 			}
268 		} else
269 			out.Print(" <too long>");
270 	}
271 
272 private:
273 	char*	fPath;
274 	int32	fArgCount;
275 	char*	fArgs;
276 };
277 
278 
279 static const char*
280 job_control_state_name(job_control_state state)
281 {
282 	switch (state) {
283 		case JOB_CONTROL_STATE_NONE:
284 			return "none";
285 		case JOB_CONTROL_STATE_STOPPED:
286 			return "stopped";
287 		case JOB_CONTROL_STATE_CONTINUED:
288 			return "continued";
289 		case JOB_CONTROL_STATE_DEAD:
290 			return "dead";
291 		default:
292 			return "invalid";
293 	}
294 }
295 
296 
297 class SetJobControlState : public AbstractTraceEntry {
298 public:
299 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
300 		:
301 		fTeam(team),
302 		fNewState(newState),
303 		fSignal(signal != NULL ? signal->Number() : 0)
304 	{
305 		Initialized();
306 	}
307 
308 	virtual void AddDump(TraceOutput& out)
309 	{
310 		out.Print("team set job control state, team %ld, "
311 			"new state: %s, signal: %d",
312 			fTeam, job_control_state_name(fNewState), fSignal);
313 	}
314 
315 private:
316 	team_id				fTeam;
317 	job_control_state	fNewState;
318 	int					fSignal;
319 };
320 
321 
322 class WaitForChild : public AbstractTraceEntry {
323 public:
324 	WaitForChild(pid_t child, uint32 flags)
325 		:
326 		fChild(child),
327 		fFlags(flags)
328 	{
329 		Initialized();
330 	}
331 
332 	virtual void AddDump(TraceOutput& out)
333 	{
334 		out.Print("team wait for child, child: %ld, "
335 			"flags: 0x%lx", fChild, fFlags);
336 	}
337 
338 private:
339 	pid_t	fChild;
340 	uint32	fFlags;
341 };
342 
343 
344 class WaitForChildDone : public AbstractTraceEntry {
345 public:
346 	WaitForChildDone(const job_control_entry& entry)
347 		:
348 		fState(entry.state),
349 		fTeam(entry.thread),
350 		fStatus(entry.status),
351 		fReason(entry.reason),
352 		fSignal(entry.signal)
353 	{
354 		Initialized();
355 	}
356 
357 	WaitForChildDone(status_t error)
358 		:
359 		fTeam(error)
360 	{
361 		Initialized();
362 	}
363 
364 	virtual void AddDump(TraceOutput& out)
365 	{
366 		if (fTeam >= 0) {
367 			out.Print("team wait for child done, team: %ld, "
368 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
369 				fTeam, job_control_state_name(fState), fStatus, fReason,
370 				fSignal);
371 		} else {
372 			out.Print("team wait for child failed, error: "
373 				"0x%lx, ", fTeam);
374 		}
375 	}
376 
377 private:
378 	job_control_state	fState;
379 	team_id				fTeam;
380 	status_t			fStatus;
381 	uint16				fReason;
382 	uint16				fSignal;
383 };
384 
385 }	// namespace TeamTracing
386 
387 #	define T(x) new(std::nothrow) TeamTracing::x;
388 #else
389 #	define T(x) ;
390 #endif
391 
392 
393 //	#pragma mark - TeamNotificationService
394 
395 
396 TeamNotificationService::TeamNotificationService()
397 	: DefaultNotificationService("teams")
398 {
399 }
400 
401 
402 void
403 TeamNotificationService::Notify(uint32 eventCode, Team* team)
404 {
405 	char eventBuffer[128];
406 	KMessage event;
407 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
408 	event.AddInt32("event", eventCode);
409 	event.AddInt32("team", team->id);
410 	event.AddPointer("teamStruct", team);
411 
412 	DefaultNotificationService::Notify(event, eventCode);
413 }
414 
415 
416 //	#pragma mark - Team
417 
418 
419 Team::Team(team_id id, bool kernel)
420 {
421 	// allocate an ID
422 	this->id = id;
423 	visible = true;
424 	serial_number = -1;
425 
426 	// init mutex
427 	if (kernel) {
428 		mutex_init(&fLock, "Team:kernel");
429 	} else {
430 		char lockName[16];
431 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
432 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
433 	}
434 
435 	hash_next = siblings_next = children = parent = NULL;
436 	fName[0] = '\0';
437 	fArgs[0] = '\0';
438 	num_threads = 0;
439 	io_context = NULL;
440 	address_space = NULL;
441 	realtime_sem_context = NULL;
442 	xsi_sem_context = NULL;
443 	thread_list = NULL;
444 	main_thread = NULL;
445 	loading_info = NULL;
446 	state = TEAM_STATE_BIRTH;
447 	flags = 0;
448 	death_entry = NULL;
449 	user_data_area = -1;
450 	user_data = 0;
451 	used_user_data = 0;
452 	user_data_size = 0;
453 	free_user_threads = NULL;
454 
455 	commpage_address = NULL;
456 
457 	supplementary_groups = NULL;
458 	supplementary_group_count = 0;
459 
460 	dead_threads_kernel_time = 0;
461 	dead_threads_user_time = 0;
462 	cpu_clock_offset = 0;
463 
464 	// dead threads
465 	list_init(&dead_threads);
466 	dead_threads_count = 0;
467 
468 	// dead children
469 	dead_children.count = 0;
470 	dead_children.kernel_time = 0;
471 	dead_children.user_time = 0;
472 
473 	// job control entry
474 	job_control_entry = new(nothrow) ::job_control_entry;
475 	if (job_control_entry != NULL) {
476 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
477 		job_control_entry->thread = id;
478 		job_control_entry->team = this;
479 	}
480 
481 	// exit status -- setting initialized to false suffices
482 	exit.initialized = false;
483 
484 	list_init(&sem_list);
485 	list_init(&port_list);
486 	list_init(&image_list);
487 	list_init(&watcher_list);
488 
489 	clear_team_debug_info(&debug_info, true);
490 
491 	// init dead/stopped/continued children condition vars
492 	dead_children.condition_variable.Init(&dead_children, "team children");
493 
494 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
495 		kernel ? -1 : MAX_QUEUED_SIGNALS);
496 	memset(fSignalActions, 0, sizeof(fSignalActions));
497 
498 	fUserDefinedTimerCount = 0;
499 }
500 
501 
502 Team::~Team()
503 {
504 	// get rid of all associated data
505 	PrepareForDeletion();
506 
507 	if (io_context != NULL)
508 		vfs_put_io_context(io_context);
509 	delete_owned_ports(this);
510 	sem_delete_owned_sems(this);
511 
512 	DeleteUserTimers(false);
513 
514 	fPendingSignals.Clear();
515 
516 	if (fQueuedSignalsCounter != NULL)
517 		fQueuedSignalsCounter->ReleaseReference();
518 
519 	while (thread_death_entry* threadDeathEntry
520 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
521 		free(threadDeathEntry);
522 	}
523 
524 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
525 		delete entry;
526 
527 	while (free_user_thread* entry = free_user_threads) {
528 		free_user_threads = entry->next;
529 		free(entry);
530 	}
531 
532 	malloc_referenced_release(supplementary_groups);
533 
534 	delete job_control_entry;
535 		// usually already NULL and transferred to the parent
536 
537 	mutex_destroy(&fLock);
538 }
539 
540 
541 /*static*/ Team*
542 Team::Create(team_id id, const char* name, bool kernel)
543 {
544 	// create the team object
545 	Team* team = new(std::nothrow) Team(id, kernel);
546 	if (team == NULL)
547 		return NULL;
548 	ObjectDeleter<Team> teamDeleter(team);
549 
550 	if (name != NULL)
551 		team->SetName(name);
552 
553 	// check initialization
554 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
555 		return NULL;
556 
557 	// finish initialization (arch specifics)
558 	if (arch_team_init_team_struct(team, kernel) != B_OK)
559 		return NULL;
560 
561 	if (!kernel) {
562 		status_t error = user_timer_create_team_timers(team);
563 		if (error != B_OK)
564 			return NULL;
565 	}
566 
567 	// everything went fine
568 	return teamDeleter.Detach();
569 }
570 
571 
572 /*!	\brief Returns the team with the given ID.
573 	Returns a reference to the team.
574 	Team and thread spinlock must not be held.
575 */
576 /*static*/ Team*
577 Team::Get(team_id id)
578 {
579 	if (id == B_CURRENT_TEAM) {
580 		Team* team = thread_get_current_thread()->team;
581 		team->AcquireReference();
582 		return team;
583 	}
584 
585 	InterruptsSpinLocker locker(sTeamHashLock);
586 	Team* team = sTeamHash.Lookup(id);
587 	if (team != NULL)
588 		team->AcquireReference();
589 	return team;
590 }
591 
592 
593 /*!	\brief Returns the team with the given ID in a locked state.
594 	Returns a reference to the team.
595 	Team and thread spinlock must not be held.
596 */
597 /*static*/ Team*
598 Team::GetAndLock(team_id id)
599 {
600 	// get the team
601 	Team* team = Get(id);
602 	if (team == NULL)
603 		return NULL;
604 
605 	// lock it
606 	team->Lock();
607 
608 	// only return the team, when it isn't already dying
609 	if (team->state >= TEAM_STATE_SHUTDOWN) {
610 		team->Unlock();
611 		team->ReleaseReference();
612 		return NULL;
613 	}
614 
615 	return team;
616 }
617 
618 
619 /*!	Locks the team and its parent team (if any).
620 	The caller must hold a reference to the team or otherwise make sure that
621 	it won't be deleted.
622 	If the team doesn't have a parent, only the team itself is locked. If the
623 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
624 	only the team itself is locked.
625 
626 	\param dontLockParentIfKernel If \c true, the team's parent team is only
627 		locked, if it is not the kernel team.
628 */
629 void
630 Team::LockTeamAndParent(bool dontLockParentIfKernel)
631 {
632 	// The locking order is parent -> child. Since the parent can change as long
633 	// as we don't lock the team, we need to do a trial and error loop.
634 	Lock();
635 
636 	while (true) {
637 		// If the team doesn't have a parent, we're done. Otherwise try to lock
638 		// the parent.This will succeed in most cases, simplifying things.
639 		Team* parent = this->parent;
640 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
641 			|| parent->TryLock()) {
642 			return;
643 		}
644 
645 		// get a temporary reference to the parent, unlock this team, lock the
646 		// parent, and re-lock this team
647 		BReference<Team> parentReference(parent);
648 
649 		Unlock();
650 		parent->Lock();
651 		Lock();
652 
653 		// If the parent hasn't changed in the meantime, we're done.
654 		if (this->parent == parent)
655 			return;
656 
657 		// The parent has changed -- unlock and retry.
658 		parent->Unlock();
659 	}
660 }
661 
662 
663 /*!	Unlocks the team and its parent team (if any).
664 */
665 void
666 Team::UnlockTeamAndParent()
667 {
668 	if (parent != NULL)
669 		parent->Unlock();
670 
671 	Unlock();
672 }
673 
674 
675 /*!	Locks the team, its parent team (if any), and the team's process group.
676 	The caller must hold a reference to the team or otherwise make sure that
677 	it won't be deleted.
678 	If the team doesn't have a parent, only the team itself is locked.
679 */
680 void
681 Team::LockTeamParentAndProcessGroup()
682 {
683 	LockTeamAndProcessGroup();
684 
685 	// We hold the group's and the team's lock, but not the parent team's lock.
686 	// If we have a parent, try to lock it.
687 	if (this->parent == NULL || this->parent->TryLock())
688 		return;
689 
690 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
691 	// the job.
692 	Unlock();
693 	LockTeamAndParent(false);
694 }
695 
696 
697 /*!	Unlocks the team, its parent team (if any), and the team's process group.
698 */
699 void
700 Team::UnlockTeamParentAndProcessGroup()
701 {
702 	group->Unlock();
703 
704 	if (parent != NULL)
705 		parent->Unlock();
706 
707 	Unlock();
708 }
709 
710 
711 void
712 Team::LockTeamAndProcessGroup()
713 {
714 	// The locking order is process group -> child. Since the process group can
715 	// change as long as we don't lock the team, we need to do a trial and error
716 	// loop.
717 	Lock();
718 
719 	while (true) {
720 		// Try to lock the group. This will succeed in most cases, simplifying
721 		// things.
722 		ProcessGroup* group = this->group;
723 		if (group->TryLock())
724 			return;
725 
726 		// get a temporary reference to the group, unlock this team, lock the
727 		// group, and re-lock this team
728 		BReference<ProcessGroup> groupReference(group);
729 
730 		Unlock();
731 		group->Lock();
732 		Lock();
733 
734 		// If the group hasn't changed in the meantime, we're done.
735 		if (this->group == group)
736 			return;
737 
738 		// The group has changed -- unlock and retry.
739 		group->Unlock();
740 	}
741 }
742 
743 
744 void
745 Team::UnlockTeamAndProcessGroup()
746 {
747 	group->Unlock();
748 	Unlock();
749 }
750 
751 
752 void
753 Team::SetName(const char* name)
754 {
755 	if (const char* lastSlash = strrchr(name, '/'))
756 		name = lastSlash + 1;
757 
758 	strlcpy(fName, name, B_OS_NAME_LENGTH);
759 }
760 
761 
762 void
763 Team::SetArgs(const char* args)
764 {
765 	strlcpy(fArgs, args, sizeof(fArgs));
766 }
767 
768 
769 void
770 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
771 {
772 	fArgs[0] = '\0';
773 	strlcpy(fArgs, path, sizeof(fArgs));
774 	for (int i = 0; i < otherArgCount; i++) {
775 		strlcat(fArgs, " ", sizeof(fArgs));
776 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
777 	}
778 }
779 
780 
781 void
782 Team::ResetSignalsOnExec()
783 {
784 	// We are supposed to keep pending signals. Signal actions shall be reset
785 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
786 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
787 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
788 	// flags, but since there aren't any handlers, they make little sense, so
789 	// we clear them.
790 
791 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
792 		struct sigaction& action = SignalActionFor(i);
793 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
794 			action.sa_handler = SIG_DFL;
795 
796 		action.sa_mask = 0;
797 		action.sa_flags = 0;
798 		action.sa_userdata = NULL;
799 	}
800 }
801 
802 
803 void
804 Team::InheritSignalActions(Team* parent)
805 {
806 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
807 }
808 
809 
810 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
811 	ID.
812 
813 	The caller must hold the team's lock.
814 
815 	\param timer The timer to be added. If it doesn't have an ID yet, it is
816 		considered user-defined and will be assigned an ID.
817 	\return \c B_OK, if the timer was added successfully, another error code
818 		otherwise.
819 */
820 status_t
821 Team::AddUserTimer(UserTimer* timer)
822 {
823 	// don't allow addition of timers when already shutting the team down
824 	if (state >= TEAM_STATE_SHUTDOWN)
825 		return B_BAD_TEAM_ID;
826 
827 	// If the timer is user-defined, check timer limit and increment
828 	// user-defined count.
829 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
830 		return EAGAIN;
831 
832 	fUserTimers.AddTimer(timer);
833 
834 	return B_OK;
835 }
836 
837 
838 /*!	Removes the given user timer from the team.
839 
840 	The caller must hold the team's lock.
841 
842 	\param timer The timer to be removed.
843 
844 */
845 void
846 Team::RemoveUserTimer(UserTimer* timer)
847 {
848 	fUserTimers.RemoveTimer(timer);
849 
850 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
851 		UserDefinedTimersRemoved(1);
852 }
853 
854 
855 /*!	Deletes all (or all user-defined) user timers of the team.
856 
857 	Timer's belonging to the team's threads are not affected.
858 	The caller must hold the team's lock.
859 
860 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
861 		otherwise all timers are deleted.
862 */
863 void
864 Team::DeleteUserTimers(bool userDefinedOnly)
865 {
866 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
867 	UserDefinedTimersRemoved(count);
868 }
869 
870 
871 /*!	If not at the limit yet, increments the team's user-defined timer count.
872 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
873 */
874 bool
875 Team::CheckAddUserDefinedTimer()
876 {
877 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
878 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
879 		atomic_add(&fUserDefinedTimerCount, -1);
880 		return false;
881 	}
882 
883 	return true;
884 }
885 
886 
887 /*!	Subtracts the given count for the team's user-defined timer count.
888 	\param count The count to subtract.
889 */
890 void
891 Team::UserDefinedTimersRemoved(int32 count)
892 {
893 	atomic_add(&fUserDefinedTimerCount, -count);
894 }
895 
896 
897 void
898 Team::DeactivateCPUTimeUserTimers()
899 {
900 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
901 		timer->Deactivate();
902 
903 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
904 		timer->Deactivate();
905 }
906 
907 
908 /*!	Returns the team's current total CPU time (kernel + user + offset).
909 
910 	The caller must hold the scheduler lock.
911 
912 	\param ignoreCurrentRun If \c true and the current thread is one team's
913 		threads, don't add the time since the last time \c last_time was
914 		updated. Should be used in "thread unscheduled" scheduler callbacks,
915 		since although the thread is still running at that time, its time has
916 		already been stopped.
917 	\return The team's current total CPU time.
918 */
919 bigtime_t
920 Team::CPUTime(bool ignoreCurrentRun) const
921 {
922 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
923 		+ dead_threads_user_time;
924 
925 	Thread* currentThread = thread_get_current_thread();
926 	bigtime_t now = system_time();
927 
928 	for (Thread* thread = thread_list; thread != NULL;
929 			thread = thread->team_next) {
930 		SpinLocker threadTimeLocker(thread->time_lock);
931 		time += thread->kernel_time + thread->user_time;
932 
933 		if (thread->IsRunning()) {
934 			if (!ignoreCurrentRun || thread != currentThread)
935 				time += now - thread->last_time;
936 		}
937 	}
938 
939 	return time;
940 }
941 
942 
943 /*!	Returns the team's current user CPU time.
944 
945 	The caller must hold the scheduler lock.
946 
947 	\return The team's current user CPU time.
948 */
949 bigtime_t
950 Team::UserCPUTime() const
951 {
952 	bigtime_t time = dead_threads_user_time;
953 
954 	bigtime_t now = system_time();
955 
956 	for (Thread* thread = thread_list; thread != NULL;
957 			thread = thread->team_next) {
958 		SpinLocker threadTimeLocker(thread->time_lock);
959 		time += thread->user_time;
960 
961 		if (thread->IsRunning() && !thread->in_kernel)
962 			time += now - thread->last_time;
963 	}
964 
965 	return time;
966 }
967 
968 
969 //	#pragma mark - ProcessGroup
970 
971 
972 ProcessGroup::ProcessGroup(pid_t id)
973 	:
974 	id(id),
975 	teams(NULL),
976 	fSession(NULL),
977 	fInOrphanedCheckList(false)
978 {
979 	char lockName[32];
980 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
981 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
982 }
983 
984 
985 ProcessGroup::~ProcessGroup()
986 {
987 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
988 
989 	// If the group is in the orphaned check list, remove it.
990 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
991 
992 	if (fInOrphanedCheckList)
993 		sOrphanedCheckProcessGroups.Remove(this);
994 
995 	orphanedCheckLocker.Unlock();
996 
997 	// remove group from the hash table and from the session
998 	if (fSession != NULL) {
999 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1000 		sGroupHash.RemoveUnchecked(this);
1001 		groupHashLocker.Unlock();
1002 
1003 		fSession->ReleaseReference();
1004 	}
1005 
1006 	mutex_destroy(&fLock);
1007 }
1008 
1009 
1010 /*static*/ ProcessGroup*
1011 ProcessGroup::Get(pid_t id)
1012 {
1013 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1014 	ProcessGroup* group = sGroupHash.Lookup(id);
1015 	if (group != NULL)
1016 		group->AcquireReference();
1017 	return group;
1018 }
1019 
1020 
1021 /*!	Adds the group the given session and makes it publicly accessible.
1022 	The caller must not hold the process group hash lock.
1023 */
1024 void
1025 ProcessGroup::Publish(ProcessSession* session)
1026 {
1027 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1028 	PublishLocked(session);
1029 }
1030 
1031 
1032 /*!	Adds the group to the given session and makes it publicly accessible.
1033 	The caller must hold the process group hash lock.
1034 */
1035 void
1036 ProcessGroup::PublishLocked(ProcessSession* session)
1037 {
1038 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1039 
1040 	fSession = session;
1041 	fSession->AcquireReference();
1042 
1043 	sGroupHash.InsertUnchecked(this);
1044 }
1045 
1046 
1047 /*!	Checks whether the process group is orphaned.
1048 	The caller must hold the group's lock.
1049 	\return \c true, if the group is orphaned, \c false otherwise.
1050 */
1051 bool
1052 ProcessGroup::IsOrphaned() const
1053 {
1054 	// Orphaned Process Group: "A process group in which the parent of every
1055 	// member is either itself a member of the group or is not a member of the
1056 	// group's session." (Open Group Base Specs Issue 7)
1057 	bool orphaned = true;
1058 
1059 	Team* team = teams;
1060 	while (orphaned && team != NULL) {
1061 		team->LockTeamAndParent(false);
1062 
1063 		Team* parent = team->parent;
1064 		if (parent != NULL && parent->group_id != id
1065 			&& parent->session_id == fSession->id) {
1066 			orphaned = false;
1067 		}
1068 
1069 		team->UnlockTeamAndParent();
1070 
1071 		team = team->group_next;
1072 	}
1073 
1074 	return orphaned;
1075 }
1076 
1077 
1078 void
1079 ProcessGroup::ScheduleOrphanedCheck()
1080 {
1081 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1082 
1083 	if (!fInOrphanedCheckList) {
1084 		sOrphanedCheckProcessGroups.Add(this);
1085 		fInOrphanedCheckList = true;
1086 	}
1087 }
1088 
1089 
1090 void
1091 ProcessGroup::UnsetOrphanedCheck()
1092 {
1093 	fInOrphanedCheckList = false;
1094 }
1095 
1096 
1097 //	#pragma mark - ProcessSession
1098 
1099 
1100 ProcessSession::ProcessSession(pid_t id)
1101 	:
1102 	id(id),
1103 	controlling_tty(-1),
1104 	foreground_group(-1)
1105 {
1106 	char lockName[32];
1107 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1108 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1109 }
1110 
1111 
1112 ProcessSession::~ProcessSession()
1113 {
1114 	mutex_destroy(&fLock);
1115 }
1116 
1117 
1118 //	#pragma mark - KDL functions
1119 
1120 
1121 static void
1122 _dump_team_info(Team* team)
1123 {
1124 	kprintf("TEAM: %p\n", team);
1125 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1126 		team->id);
1127 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1128 	kprintf("name:             '%s'\n", team->Name());
1129 	kprintf("args:             '%s'\n", team->Args());
1130 	kprintf("hash_next:        %p\n", team->hash_next);
1131 	kprintf("parent:           %p", team->parent);
1132 	if (team->parent != NULL) {
1133 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1134 	} else
1135 		kprintf("\n");
1136 
1137 	kprintf("children:         %p\n", team->children);
1138 	kprintf("num_threads:      %d\n", team->num_threads);
1139 	kprintf("state:            %d\n", team->state);
1140 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1141 	kprintf("io_context:       %p\n", team->io_context);
1142 	if (team->address_space)
1143 		kprintf("address_space:    %p\n", team->address_space);
1144 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1145 		(void*)team->user_data, team->user_data_area);
1146 	kprintf("free user thread: %p\n", team->free_user_threads);
1147 	kprintf("main_thread:      %p\n", team->main_thread);
1148 	kprintf("thread_list:      %p\n", team->thread_list);
1149 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1150 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1151 }
1152 
1153 
1154 static int
1155 dump_team_info(int argc, char** argv)
1156 {
1157 	ulong arg;
1158 	bool found = false;
1159 
1160 	if (argc < 2) {
1161 		Thread* thread = thread_get_current_thread();
1162 		if (thread != NULL && thread->team != NULL)
1163 			_dump_team_info(thread->team);
1164 		else
1165 			kprintf("No current team!\n");
1166 		return 0;
1167 	}
1168 
1169 	arg = strtoul(argv[1], NULL, 0);
1170 	if (IS_KERNEL_ADDRESS(arg)) {
1171 		// semi-hack
1172 		_dump_team_info((Team*)arg);
1173 		return 0;
1174 	}
1175 
1176 	// walk through the thread list, trying to match name or id
1177 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1178 		Team* team = it.Next();) {
1179 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1180 			|| team->id == (team_id)arg) {
1181 			_dump_team_info(team);
1182 			found = true;
1183 			break;
1184 		}
1185 	}
1186 
1187 	if (!found)
1188 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1189 	return 0;
1190 }
1191 
1192 
1193 static int
1194 dump_teams(int argc, char** argv)
1195 {
1196 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1197 		B_PRINTF_POINTER_WIDTH, "parent");
1198 
1199 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1200 		Team* team = it.Next();) {
1201 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 
1208 //	#pragma mark - Private functions
1209 
1210 
1211 /*!	Inserts team \a team into the child list of team \a parent.
1212 
1213 	The caller must hold the lock of both \a parent and \a team.
1214 
1215 	\param parent The parent team.
1216 	\param team The team to be inserted into \a parent's child list.
1217 */
1218 static void
1219 insert_team_into_parent(Team* parent, Team* team)
1220 {
1221 	ASSERT(parent != NULL);
1222 
1223 	team->siblings_next = parent->children;
1224 	parent->children = team;
1225 	team->parent = parent;
1226 }
1227 
1228 
1229 /*!	Removes team \a team from the child list of team \a parent.
1230 
1231 	The caller must hold the lock of both \a parent and \a team.
1232 
1233 	\param parent The parent team.
1234 	\param team The team to be removed from \a parent's child list.
1235 */
1236 static void
1237 remove_team_from_parent(Team* parent, Team* team)
1238 {
1239 	Team* child;
1240 	Team* last = NULL;
1241 
1242 	for (child = parent->children; child != NULL;
1243 			child = child->siblings_next) {
1244 		if (child == team) {
1245 			if (last == NULL)
1246 				parent->children = child->siblings_next;
1247 			else
1248 				last->siblings_next = child->siblings_next;
1249 
1250 			team->parent = NULL;
1251 			break;
1252 		}
1253 		last = child;
1254 	}
1255 }
1256 
1257 
1258 /*!	Returns whether the given team is a session leader.
1259 	The caller must hold the team's lock or its process group's lock.
1260 */
1261 static bool
1262 is_session_leader(Team* team)
1263 {
1264 	return team->session_id == team->id;
1265 }
1266 
1267 
1268 /*!	Returns whether the given team is a process group leader.
1269 	The caller must hold the team's lock or its process group's lock.
1270 */
1271 static bool
1272 is_process_group_leader(Team* team)
1273 {
1274 	return team->group_id == team->id;
1275 }
1276 
1277 
1278 /*!	Inserts the given team into the given process group.
1279 	The caller must hold the process group's lock, the team's lock, and the
1280 	team's parent's lock.
1281 */
1282 static void
1283 insert_team_into_group(ProcessGroup* group, Team* team)
1284 {
1285 	team->group = group;
1286 	team->group_id = group->id;
1287 	team->session_id = group->Session()->id;
1288 
1289 	team->group_next = group->teams;
1290 	group->teams = team;
1291 	group->AcquireReference();
1292 }
1293 
1294 
1295 /*!	Removes the given team from its process group.
1296 
1297 	The caller must hold the process group's lock, the team's lock, and the
1298 	team's parent's lock. Interrupts must be enabled.
1299 
1300 	\param team The team that'll be removed from its process group.
1301 */
1302 static void
1303 remove_team_from_group(Team* team)
1304 {
1305 	ProcessGroup* group = team->group;
1306 	Team* current;
1307 	Team* last = NULL;
1308 
1309 	// the team must be in a process group to let this function have any effect
1310 	if  (group == NULL)
1311 		return;
1312 
1313 	for (current = group->teams; current != NULL;
1314 			current = current->group_next) {
1315 		if (current == team) {
1316 			if (last == NULL)
1317 				group->teams = current->group_next;
1318 			else
1319 				last->group_next = current->group_next;
1320 
1321 			team->group = NULL;
1322 			break;
1323 		}
1324 		last = current;
1325 	}
1326 
1327 	team->group = NULL;
1328 	team->group_next = NULL;
1329 
1330 	group->ReleaseReference();
1331 }
1332 
1333 
1334 static status_t
1335 create_team_user_data(Team* team, void* exactAddress = NULL)
1336 {
1337 	void* address;
1338 	uint32 addressSpec;
1339 
1340 	if (exactAddress != NULL) {
1341 		address = exactAddress;
1342 		addressSpec = B_EXACT_ADDRESS;
1343 	} else {
1344 		address = (void*)KERNEL_USER_DATA_BASE;
1345 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1346 	}
1347 
1348 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1349 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1350 
1351 	virtual_address_restrictions virtualRestrictions = {};
1352 	if (result == B_OK || exactAddress != NULL) {
1353 		if (exactAddress != NULL)
1354 			virtualRestrictions.address = exactAddress;
1355 		else
1356 			virtualRestrictions.address = address;
1357 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1358 	} else {
1359 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1360 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1361 	}
1362 
1363 	physical_address_restrictions physicalRestrictions = {};
1364 	team->user_data_area = create_area_etc(team->id, "user area",
1365 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1366 		&virtualRestrictions, &physicalRestrictions, &address);
1367 	if (team->user_data_area < 0)
1368 		return team->user_data_area;
1369 
1370 	team->user_data = (addr_t)address;
1371 	team->used_user_data = 0;
1372 	team->user_data_size = kTeamUserDataInitialSize;
1373 	team->free_user_threads = NULL;
1374 
1375 	return B_OK;
1376 }
1377 
1378 
1379 static void
1380 delete_team_user_data(Team* team)
1381 {
1382 	if (team->user_data_area >= 0) {
1383 		vm_delete_area(team->id, team->user_data_area, true);
1384 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1385 			kTeamUserDataReservedSize);
1386 
1387 		team->user_data = 0;
1388 		team->used_user_data = 0;
1389 		team->user_data_size = 0;
1390 		team->user_data_area = -1;
1391 		while (free_user_thread* entry = team->free_user_threads) {
1392 			team->free_user_threads = entry->next;
1393 			free(entry);
1394 		}
1395 	}
1396 }
1397 
1398 
1399 static status_t
1400 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1401 	int32 argCount, int32 envCount, char**& _flatArgs)
1402 {
1403 	if (argCount < 0 || envCount < 0)
1404 		return B_BAD_VALUE;
1405 
1406 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1407 		return B_TOO_MANY_ARGS;
1408 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1409 		return B_BAD_VALUE;
1410 
1411 	if (!IS_USER_ADDRESS(userFlatArgs))
1412 		return B_BAD_ADDRESS;
1413 
1414 	// allocate kernel memory
1415 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1416 	if (flatArgs == NULL)
1417 		return B_NO_MEMORY;
1418 
1419 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1420 		free(flatArgs);
1421 		return B_BAD_ADDRESS;
1422 	}
1423 
1424 	// check and relocate the array
1425 	status_t error = B_OK;
1426 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1427 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1428 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1429 		if (i == argCount || i == argCount + envCount + 1) {
1430 			// check array null termination
1431 			if (flatArgs[i] != NULL) {
1432 				error = B_BAD_VALUE;
1433 				break;
1434 			}
1435 		} else {
1436 			// check string
1437 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1438 			size_t maxLen = stringEnd - arg;
1439 			if (arg < stringBase || arg >= stringEnd
1440 					|| strnlen(arg, maxLen) == maxLen) {
1441 				error = B_BAD_VALUE;
1442 				break;
1443 			}
1444 
1445 			flatArgs[i] = arg;
1446 		}
1447 	}
1448 
1449 	if (error == B_OK)
1450 		_flatArgs = flatArgs;
1451 	else
1452 		free(flatArgs);
1453 
1454 	return error;
1455 }
1456 
1457 
1458 static void
1459 free_team_arg(struct team_arg* teamArg)
1460 {
1461 	if (teamArg != NULL) {
1462 		free(teamArg->flat_args);
1463 		free(teamArg->path);
1464 		free(teamArg);
1465 	}
1466 }
1467 
1468 
1469 static status_t
1470 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1471 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1472 	port_id port, uint32 token)
1473 {
1474 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1475 	if (teamArg == NULL)
1476 		return B_NO_MEMORY;
1477 
1478 	teamArg->path = strdup(path);
1479 	if (teamArg->path == NULL) {
1480 		free(teamArg);
1481 		return B_NO_MEMORY;
1482 	}
1483 
1484 	// copy the args over
1485 
1486 	teamArg->flat_args = flatArgs;
1487 	teamArg->flat_args_size = flatArgsSize;
1488 	teamArg->arg_count = argCount;
1489 	teamArg->env_count = envCount;
1490 	teamArg->umask = umask;
1491 	teamArg->error_port = port;
1492 	teamArg->error_token = token;
1493 
1494 	*_teamArg = teamArg;
1495 	return B_OK;
1496 }
1497 
1498 
1499 static status_t
1500 team_create_thread_start_internal(void* args)
1501 {
1502 	status_t err;
1503 	Thread* thread;
1504 	Team* team;
1505 	struct team_arg* teamArgs = (struct team_arg*)args;
1506 	const char* path;
1507 	addr_t entry;
1508 	char** userArgs;
1509 	char** userEnv;
1510 	struct user_space_program_args* programArgs;
1511 	uint32 argCount, envCount;
1512 
1513 	thread = thread_get_current_thread();
1514 	team = thread->team;
1515 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1516 
1517 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1518 		thread->id));
1519 
1520 	// Main stack area layout is currently as follows (starting from 0):
1521 	//
1522 	// size								| usage
1523 	// ---------------------------------+--------------------------------
1524 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1525 	// TLS_SIZE							| TLS data
1526 	// sizeof(user_space_program_args)	| argument structure for the runtime
1527 	//									| loader
1528 	// flat arguments size				| flat process arguments and environment
1529 
1530 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1531 	// the heap
1532 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1533 
1534 	argCount = teamArgs->arg_count;
1535 	envCount = teamArgs->env_count;
1536 
1537 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1538 		+ thread->user_stack_size + TLS_SIZE);
1539 
1540 	userArgs = (char**)(programArgs + 1);
1541 	userEnv = userArgs + argCount + 1;
1542 	path = teamArgs->path;
1543 
1544 	if (user_strlcpy(programArgs->program_path, path,
1545 				sizeof(programArgs->program_path)) < B_OK
1546 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1547 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1548 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1549 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1550 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1551 				sizeof(port_id)) < B_OK
1552 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1553 				sizeof(uint32)) < B_OK
1554 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1555 		|| user_memcpy(userArgs, teamArgs->flat_args,
1556 				teamArgs->flat_args_size) < B_OK) {
1557 		// the team deletion process will clean this mess
1558 		free_team_arg(teamArgs);
1559 		return B_BAD_ADDRESS;
1560 	}
1561 
1562 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1563 
1564 	// set team args and update state
1565 	team->Lock();
1566 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1567 	team->state = TEAM_STATE_NORMAL;
1568 	team->Unlock();
1569 
1570 	free_team_arg(teamArgs);
1571 		// the arguments are already on the user stack, we no longer need
1572 		// them in this form
1573 
1574 	// Clone commpage area
1575 	area_id commPageArea = clone_commpage_area(team->id,
1576 		&team->commpage_address);
1577 	if (commPageArea  < B_OK) {
1578 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1579 			strerror(commPageArea)));
1580 		return commPageArea;
1581 	}
1582 
1583 	// Register commpage image
1584 	image_id commPageImage = get_commpage_image();
1585 	image_info imageInfo;
1586 	err = get_image_info(commPageImage, &imageInfo);
1587 	if (err != B_OK) {
1588 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1589 			strerror(err)));
1590 		return err;
1591 	}
1592 	imageInfo.text = team->commpage_address;
1593 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1594 	if (image < 0) {
1595 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1596 			strerror(image)));
1597 		return image;
1598 	}
1599 
1600 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1601 	// automatic variables with function scope will never be destroyed.
1602 	{
1603 		// find runtime_loader path
1604 		KPath runtimeLoaderPath;
1605 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1606 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1607 		if (err < B_OK) {
1608 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1609 				strerror(err)));
1610 			return err;
1611 		}
1612 		runtimeLoaderPath.UnlockBuffer();
1613 		err = runtimeLoaderPath.Append("runtime_loader");
1614 
1615 		if (err == B_OK) {
1616 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1617 				&entry);
1618 		}
1619 	}
1620 
1621 	if (err < B_OK) {
1622 		// Luckily, we don't have to clean up the mess we created - that's
1623 		// done for us by the normal team deletion process
1624 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1625 			"%s\n", strerror(err)));
1626 		return err;
1627 	}
1628 
1629 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1630 
1631 	// enter userspace -- returns only in case of error
1632 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1633 		programArgs, team->commpage_address);
1634 }
1635 
1636 
1637 static status_t
1638 team_create_thread_start(void* args)
1639 {
1640 	team_create_thread_start_internal(args);
1641 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1642 	thread_exit();
1643 		// does not return
1644 	return B_OK;
1645 }
1646 
1647 
1648 static thread_id
1649 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1650 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1651 	port_id errorPort, uint32 errorToken)
1652 {
1653 	char** flatArgs = _flatArgs;
1654 	thread_id thread;
1655 	status_t status;
1656 	struct team_arg* teamArgs;
1657 	struct team_loading_info loadingInfo;
1658 	io_context* parentIOContext = NULL;
1659 	team_id teamID;
1660 
1661 	if (flatArgs == NULL || argCount == 0)
1662 		return B_BAD_VALUE;
1663 
1664 	const char* path = flatArgs[0];
1665 
1666 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1667 		"\n", path, flatArgs, argCount));
1668 
1669 	// cut the path from the main thread name
1670 	const char* threadName = strrchr(path, '/');
1671 	if (threadName != NULL)
1672 		threadName++;
1673 	else
1674 		threadName = path;
1675 
1676 	// create the main thread object
1677 	Thread* mainThread;
1678 	status = Thread::Create(threadName, mainThread);
1679 	if (status != B_OK)
1680 		return status;
1681 	BReference<Thread> mainThreadReference(mainThread, true);
1682 
1683 	// create team object
1684 	Team* team = Team::Create(mainThread->id, path, false);
1685 	if (team == NULL)
1686 		return B_NO_MEMORY;
1687 	BReference<Team> teamReference(team, true);
1688 
1689 	if (flags & B_WAIT_TILL_LOADED) {
1690 		loadingInfo.thread = thread_get_current_thread();
1691 		loadingInfo.result = B_ERROR;
1692 		loadingInfo.done = false;
1693 		team->loading_info = &loadingInfo;
1694 	}
1695 
1696 	// get the parent team
1697 	Team* parent = Team::Get(parentID);
1698 	if (parent == NULL)
1699 		return B_BAD_TEAM_ID;
1700 	BReference<Team> parentReference(parent, true);
1701 
1702 	parent->LockTeamAndProcessGroup();
1703 	team->Lock();
1704 
1705 	// inherit the parent's user/group
1706 	inherit_parent_user_and_group(team, parent);
1707 
1708  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1709 
1710 	sTeamHash.Insert(team);
1711 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
1712 	if (!teamLimitReached)
1713 		sUsedTeams++;
1714 
1715 	teamsLocker.Unlock();
1716 
1717 	insert_team_into_parent(parent, team);
1718 	insert_team_into_group(parent->group, team);
1719 
1720 	// get a reference to the parent's I/O context -- we need it to create ours
1721 	parentIOContext = parent->io_context;
1722 	vfs_get_io_context(parentIOContext);
1723 
1724 	team->Unlock();
1725 	parent->UnlockTeamAndProcessGroup();
1726 
1727 	// notify team listeners
1728 	sNotificationService.Notify(TEAM_ADDED, team);
1729 
1730 	// check the executable's set-user/group-id permission
1731 	update_set_id_user_and_group(team, path);
1732 
1733 	if (teamLimitReached) {
1734 		status = B_NO_MORE_TEAMS;
1735 		goto err1;
1736 	}
1737 
1738 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1739 		envCount, (mode_t)-1, errorPort, errorToken);
1740 	if (status != B_OK)
1741 		goto err1;
1742 
1743 	_flatArgs = NULL;
1744 		// args are owned by the team_arg structure now
1745 
1746 	// create a new io_context for this team
1747 	team->io_context = vfs_new_io_context(parentIOContext, true);
1748 	if (!team->io_context) {
1749 		status = B_NO_MEMORY;
1750 		goto err2;
1751 	}
1752 
1753 	// We don't need the parent's I/O context any longer.
1754 	vfs_put_io_context(parentIOContext);
1755 	parentIOContext = NULL;
1756 
1757 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1758 	vfs_exec_io_context(team->io_context);
1759 
1760 	// create an address space for this team
1761 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1762 		&team->address_space);
1763 	if (status != B_OK)
1764 		goto err2;
1765 
1766 	// create the user data area
1767 	status = create_team_user_data(team);
1768 	if (status != B_OK)
1769 		goto err4;
1770 
1771 	// In case we start the main thread, we shouldn't access the team object
1772 	// afterwards, so cache the team's ID.
1773 	teamID = team->id;
1774 
1775 	// Create a kernel thread, but under the context of the new team
1776 	// The new thread will take over ownership of teamArgs.
1777 	{
1778 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1779 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1780 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1781 			+ teamArgs->flat_args_size;
1782 		thread = thread_create_thread(threadAttributes, false);
1783 		if (thread < 0) {
1784 			status = thread;
1785 			goto err5;
1786 		}
1787 	}
1788 
1789 	// The team has been created successfully, so we keep the reference. Or
1790 	// more precisely: It's owned by the team's main thread, now.
1791 	teamReference.Detach();
1792 
1793 	// wait for the loader of the new team to finish its work
1794 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1795 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1796 
1797 		// resume the team's main thread
1798 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1799 			scheduler_enqueue_in_run_queue(mainThread);
1800 
1801 		// Now suspend ourselves until loading is finished. We will be woken
1802 		// either by the thread, when it finished or aborted loading, or when
1803 		// the team is going to die (e.g. is killed). In either case the one
1804 		// setting `loadingInfo.done' is responsible for removing the info from
1805 		// the team structure.
1806 		while (!loadingInfo.done) {
1807 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1808 			scheduler_reschedule();
1809 		}
1810 
1811 		schedulerLocker.Unlock();
1812 
1813 		if (loadingInfo.result < B_OK)
1814 			return loadingInfo.result;
1815 	}
1816 
1817 	// notify the debugger
1818 	user_debug_team_created(teamID);
1819 
1820 	return thread;
1821 
1822 err5:
1823 	delete_team_user_data(team);
1824 err4:
1825 	team->address_space->Put();
1826 err2:
1827 	free_team_arg(teamArgs);
1828 err1:
1829 	if (parentIOContext != NULL)
1830 		vfs_put_io_context(parentIOContext);
1831 
1832 	// Remove the team structure from the process group, the parent team, and
1833 	// the team hash table and delete the team structure.
1834 	parent->LockTeamAndProcessGroup();
1835 	team->Lock();
1836 
1837 	remove_team_from_group(team);
1838 	remove_team_from_parent(team->parent, team);
1839 
1840 	team->Unlock();
1841 	parent->UnlockTeamAndProcessGroup();
1842 
1843 	teamsLocker.Lock();
1844 	sTeamHash.Remove(team);
1845 	if (!teamLimitReached)
1846 		sUsedTeams--;
1847 	teamsLocker.Unlock();
1848 
1849 	sNotificationService.Notify(TEAM_REMOVED, team);
1850 
1851 	return status;
1852 }
1853 
1854 
1855 /*!	Almost shuts down the current team and loads a new image into it.
1856 	If successful, this function does not return and will takeover ownership of
1857 	the arguments provided.
1858 	This function may only be called in a userland team (caused by one of the
1859 	exec*() syscalls).
1860 */
1861 static status_t
1862 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1863 	int32 argCount, int32 envCount, mode_t umask)
1864 {
1865 	// NOTE: Since this function normally doesn't return, don't use automatic
1866 	// variables that need destruction in the function scope.
1867 	char** flatArgs = _flatArgs;
1868 	Team* team = thread_get_current_thread()->team;
1869 	struct team_arg* teamArgs;
1870 	const char* threadName;
1871 	thread_id nubThreadID = -1;
1872 
1873 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1874 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1875 		team->id));
1876 
1877 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1878 
1879 	// switching the kernel at run time is probably not a good idea :)
1880 	if (team == team_get_kernel_team())
1881 		return B_NOT_ALLOWED;
1882 
1883 	// we currently need to be single threaded here
1884 	// TODO: maybe we should just kill all other threads and
1885 	//	make the current thread the team's main thread?
1886 	Thread* currentThread = thread_get_current_thread();
1887 	if (currentThread != team->main_thread)
1888 		return B_NOT_ALLOWED;
1889 
1890 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1891 	// We iterate through the thread list to make sure that there's no other
1892 	// thread.
1893 	TeamLocker teamLocker(team);
1894 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1895 
1896 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1897 		nubThreadID = team->debug_info.nub_thread;
1898 
1899 	debugInfoLocker.Unlock();
1900 
1901 	for (Thread* thread = team->thread_list; thread != NULL;
1902 			thread = thread->team_next) {
1903 		if (thread != team->main_thread && thread->id != nubThreadID)
1904 			return B_NOT_ALLOWED;
1905 	}
1906 
1907 	team->DeleteUserTimers(true);
1908 	team->ResetSignalsOnExec();
1909 
1910 	teamLocker.Unlock();
1911 
1912 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1913 		argCount, envCount, umask, -1, 0);
1914 	if (status != B_OK)
1915 		return status;
1916 
1917 	_flatArgs = NULL;
1918 		// args are owned by the team_arg structure now
1919 
1920 	// TODO: remove team resources if there are any left
1921 	// thread_atkernel_exit() might not be called at all
1922 
1923 	thread_reset_for_exec();
1924 
1925 	user_debug_prepare_for_exec();
1926 
1927 	delete_team_user_data(team);
1928 	vm_delete_areas(team->address_space, false);
1929 	xsi_sem_undo(team);
1930 	delete_owned_ports(team);
1931 	sem_delete_owned_sems(team);
1932 	remove_images(team);
1933 	vfs_exec_io_context(team->io_context);
1934 	delete_realtime_sem_context(team->realtime_sem_context);
1935 	team->realtime_sem_context = NULL;
1936 
1937 	status = create_team_user_data(team);
1938 	if (status != B_OK) {
1939 		// creating the user data failed -- we're toast
1940 		// TODO: We should better keep the old user area in the first place.
1941 		free_team_arg(teamArgs);
1942 		exit_thread(status);
1943 		return status;
1944 	}
1945 
1946 	user_debug_finish_after_exec();
1947 
1948 	// rename the team
1949 
1950 	team->Lock();
1951 	team->SetName(path);
1952 	team->Unlock();
1953 
1954 	// cut the path from the team name and rename the main thread, too
1955 	threadName = strrchr(path, '/');
1956 	if (threadName != NULL)
1957 		threadName++;
1958 	else
1959 		threadName = path;
1960 	rename_thread(thread_get_current_thread_id(), threadName);
1961 
1962 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1963 
1964 	// Update user/group according to the executable's set-user/group-id
1965 	// permission.
1966 	update_set_id_user_and_group(team, path);
1967 
1968 	user_debug_team_exec();
1969 
1970 	// notify team listeners
1971 	sNotificationService.Notify(TEAM_EXEC, team);
1972 
1973 	// get a user thread for the thread
1974 	user_thread* userThread = team_allocate_user_thread(team);
1975 		// cannot fail (the allocation for the team would have failed already)
1976 	ThreadLocker currentThreadLocker(currentThread);
1977 	currentThread->user_thread = userThread;
1978 	currentThreadLocker.Unlock();
1979 
1980 	// create the user stack for the thread
1981 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1982 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
1983 	if (status == B_OK) {
1984 		// prepare the stack, load the runtime loader, and enter userspace
1985 		team_create_thread_start(teamArgs);
1986 			// does never return
1987 	} else
1988 		free_team_arg(teamArgs);
1989 
1990 	// Sorry, we have to kill ourselves, there is no way out anymore
1991 	// (without any areas left and all that).
1992 	exit_thread(status);
1993 
1994 	// We return a status here since the signal that is sent by the
1995 	// call above is not immediately handled.
1996 	return B_ERROR;
1997 }
1998 
1999 
2000 static thread_id
2001 fork_team(void)
2002 {
2003 	Thread* parentThread = thread_get_current_thread();
2004 	Team* parentTeam = parentThread->team;
2005 	Team* team;
2006 	arch_fork_arg* forkArgs;
2007 	struct area_info info;
2008 	thread_id threadID;
2009 	status_t status;
2010 	ssize_t areaCookie;
2011 	int32 imageCookie;
2012 
2013 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2014 
2015 	if (parentTeam == team_get_kernel_team())
2016 		return B_NOT_ALLOWED;
2017 
2018 	// create a new team
2019 	// TODO: this is very similar to load_image_internal() - maybe we can do
2020 	// something about it :)
2021 
2022 	// create the main thread object
2023 	Thread* thread;
2024 	status = Thread::Create(parentThread->name, thread);
2025 	if (status != B_OK)
2026 		return status;
2027 	BReference<Thread> threadReference(thread, true);
2028 
2029 	// create the team object
2030 	team = Team::Create(thread->id, NULL, false);
2031 	if (team == NULL)
2032 		return B_NO_MEMORY;
2033 
2034 	parentTeam->LockTeamAndProcessGroup();
2035 	team->Lock();
2036 
2037 	team->SetName(parentTeam->Name());
2038 	team->SetArgs(parentTeam->Args());
2039 
2040 	team->commpage_address = parentTeam->commpage_address;
2041 
2042 	// Inherit the parent's user/group.
2043 	inherit_parent_user_and_group(team, parentTeam);
2044 
2045 	// inherit signal handlers
2046 	team->InheritSignalActions(parentTeam);
2047 
2048 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2049 
2050 	sTeamHash.Insert(team);
2051 	bool teamLimitReached = sUsedTeams >= sMaxTeams;
2052 	if (!teamLimitReached)
2053 		sUsedTeams++;
2054 
2055 	teamsLocker.Unlock();
2056 
2057 	insert_team_into_parent(parentTeam, team);
2058 	insert_team_into_group(parentTeam->group, team);
2059 
2060 	team->Unlock();
2061 	parentTeam->UnlockTeamAndProcessGroup();
2062 
2063 	// notify team listeners
2064 	sNotificationService.Notify(TEAM_ADDED, team);
2065 
2066 	// inherit some team debug flags
2067 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2068 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2069 
2070 	if (teamLimitReached) {
2071 		status = B_NO_MORE_TEAMS;
2072 		goto err1;
2073 	}
2074 
2075 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2076 	if (forkArgs == NULL) {
2077 		status = B_NO_MEMORY;
2078 		goto err1;
2079 	}
2080 
2081 	// create a new io_context for this team
2082 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2083 	if (!team->io_context) {
2084 		status = B_NO_MEMORY;
2085 		goto err2;
2086 	}
2087 
2088 	// duplicate the realtime sem context
2089 	if (parentTeam->realtime_sem_context) {
2090 		team->realtime_sem_context = clone_realtime_sem_context(
2091 			parentTeam->realtime_sem_context);
2092 		if (team->realtime_sem_context == NULL) {
2093 			status = B_NO_MEMORY;
2094 			goto err2;
2095 		}
2096 	}
2097 
2098 	// create an address space for this team
2099 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2100 		&team->address_space);
2101 	if (status < B_OK)
2102 		goto err3;
2103 
2104 	// copy all areas of the team
2105 	// TODO: should be able to handle stack areas differently (ie. don't have
2106 	// them copy-on-write)
2107 
2108 	areaCookie = 0;
2109 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2110 		if (info.area == parentTeam->user_data_area) {
2111 			// don't clone the user area; just create a new one
2112 			status = create_team_user_data(team, info.address);
2113 			if (status != B_OK)
2114 				break;
2115 
2116 			thread->user_thread = team_allocate_user_thread(team);
2117 		} else {
2118 			void* address;
2119 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2120 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2121 			if (area < B_OK) {
2122 				status = area;
2123 				break;
2124 			}
2125 
2126 			if (info.area == parentThread->user_stack_area)
2127 				thread->user_stack_area = area;
2128 		}
2129 	}
2130 
2131 	if (status < B_OK)
2132 		goto err4;
2133 
2134 	if (thread->user_thread == NULL) {
2135 #if KDEBUG
2136 		panic("user data area not found, parent area is %" B_PRId32,
2137 			parentTeam->user_data_area);
2138 #endif
2139 		status = B_ERROR;
2140 		goto err4;
2141 	}
2142 
2143 	thread->user_stack_base = parentThread->user_stack_base;
2144 	thread->user_stack_size = parentThread->user_stack_size;
2145 	thread->user_local_storage = parentThread->user_local_storage;
2146 	thread->sig_block_mask = parentThread->sig_block_mask;
2147 	thread->signal_stack_base = parentThread->signal_stack_base;
2148 	thread->signal_stack_size = parentThread->signal_stack_size;
2149 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2150 
2151 	arch_store_fork_frame(forkArgs);
2152 
2153 	// copy image list
2154 	image_info imageInfo;
2155 	imageCookie = 0;
2156 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2157 			== B_OK) {
2158 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2159 		if (image < 0)
2160 			goto err5;
2161 	}
2162 
2163 	// create the main thread
2164 	{
2165 		ThreadCreationAttributes threadCreationAttributes(NULL,
2166 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2167 		threadCreationAttributes.forkArgs = forkArgs;
2168 		threadID = thread_create_thread(threadCreationAttributes, false);
2169 		if (threadID < 0) {
2170 			status = threadID;
2171 			goto err5;
2172 		}
2173 	}
2174 
2175 	// notify the debugger
2176 	user_debug_team_created(team->id);
2177 
2178 	T(TeamForked(threadID));
2179 
2180 	resume_thread(threadID);
2181 	return threadID;
2182 
2183 err5:
2184 	remove_images(team);
2185 err4:
2186 	team->address_space->RemoveAndPut();
2187 err3:
2188 	delete_realtime_sem_context(team->realtime_sem_context);
2189 err2:
2190 	free(forkArgs);
2191 err1:
2192 	// Remove the team structure from the process group, the parent team, and
2193 	// the team hash table and delete the team structure.
2194 	parentTeam->LockTeamAndProcessGroup();
2195 	team->Lock();
2196 
2197 	remove_team_from_group(team);
2198 	remove_team_from_parent(team->parent, team);
2199 
2200 	team->Unlock();
2201 	parentTeam->UnlockTeamAndProcessGroup();
2202 
2203 	teamsLocker.Lock();
2204 	sTeamHash.Remove(team);
2205 	if (!teamLimitReached)
2206 		sUsedTeams--;
2207 	teamsLocker.Unlock();
2208 
2209 	sNotificationService.Notify(TEAM_REMOVED, team);
2210 
2211 	team->ReleaseReference();
2212 
2213 	return status;
2214 }
2215 
2216 
2217 /*!	Returns if the specified team \a parent has any children belonging to the
2218 	process group with the specified ID \a groupID.
2219 	The caller must hold \a parent's lock.
2220 */
2221 static bool
2222 has_children_in_group(Team* parent, pid_t groupID)
2223 {
2224 	for (Team* child = parent->children; child != NULL;
2225 			child = child->siblings_next) {
2226 		TeamLocker childLocker(child);
2227 		if (child->group_id == groupID)
2228 			return true;
2229 	}
2230 
2231 	return false;
2232 }
2233 
2234 
2235 /*!	Returns the first job control entry from \a children, which matches \a id.
2236 	\a id can be:
2237 	- \code > 0 \endcode: Matching an entry with that team ID.
2238 	- \code == -1 \endcode: Matching any entry.
2239 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2240 	\c 0 is an invalid value for \a id.
2241 
2242 	The caller must hold the lock of the team that \a children belongs to.
2243 
2244 	\param children The job control entry list to check.
2245 	\param id The match criterion.
2246 	\return The first matching entry or \c NULL, if none matches.
2247 */
2248 static job_control_entry*
2249 get_job_control_entry(team_job_control_children& children, pid_t id)
2250 {
2251 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2252 		 job_control_entry* entry = it.Next();) {
2253 
2254 		if (id > 0) {
2255 			if (entry->thread == id)
2256 				return entry;
2257 		} else if (id == -1) {
2258 			return entry;
2259 		} else {
2260 			pid_t processGroup
2261 				= (entry->team ? entry->team->group_id : entry->group_id);
2262 			if (processGroup == -id)
2263 				return entry;
2264 		}
2265 	}
2266 
2267 	return NULL;
2268 }
2269 
2270 
2271 /*!	Returns the first job control entry from one of team's dead, continued, or
2272     stopped children which matches \a id.
2273 	\a id can be:
2274 	- \code > 0 \endcode: Matching an entry with that team ID.
2275 	- \code == -1 \endcode: Matching any entry.
2276 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2277 	\c 0 is an invalid value for \a id.
2278 
2279 	The caller must hold \a team's lock.
2280 
2281 	\param team The team whose dead, stopped, and continued child lists shall be
2282 		checked.
2283 	\param id The match criterion.
2284 	\param flags Specifies which children shall be considered. Dead children
2285 		always are. Stopped children are considered when \a flags is ORed
2286 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2287 		bitwise with \c WCONTINUED.
2288 	\return The first matching entry or \c NULL, if none matches.
2289 */
2290 static job_control_entry*
2291 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2292 {
2293 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2294 
2295 	if (entry == NULL && (flags & WCONTINUED) != 0)
2296 		entry = get_job_control_entry(team->continued_children, id);
2297 
2298 	if (entry == NULL && (flags & WUNTRACED) != 0)
2299 		entry = get_job_control_entry(team->stopped_children, id);
2300 
2301 	return entry;
2302 }
2303 
2304 
2305 job_control_entry::job_control_entry()
2306 	:
2307 	has_group_ref(false)
2308 {
2309 }
2310 
2311 
2312 job_control_entry::~job_control_entry()
2313 {
2314 	if (has_group_ref) {
2315 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2316 
2317 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2318 		if (group == NULL) {
2319 			panic("job_control_entry::~job_control_entry(): unknown group "
2320 				"ID: %" B_PRId32, group_id);
2321 			return;
2322 		}
2323 
2324 		groupHashLocker.Unlock();
2325 
2326 		group->ReleaseReference();
2327 	}
2328 }
2329 
2330 
2331 /*!	Invoked when the owning team is dying, initializing the entry according to
2332 	the dead state.
2333 
2334 	The caller must hold the owning team's lock and the scheduler lock.
2335 */
2336 void
2337 job_control_entry::InitDeadState()
2338 {
2339 	if (team != NULL) {
2340 		ASSERT(team->exit.initialized);
2341 
2342 		group_id = team->group_id;
2343 		team->group->AcquireReference();
2344 		has_group_ref = true;
2345 
2346 		thread = team->id;
2347 		status = team->exit.status;
2348 		reason = team->exit.reason;
2349 		signal = team->exit.signal;
2350 		signaling_user = team->exit.signaling_user;
2351 
2352 		team = NULL;
2353 	}
2354 }
2355 
2356 
2357 job_control_entry&
2358 job_control_entry::operator=(const job_control_entry& other)
2359 {
2360 	state = other.state;
2361 	thread = other.thread;
2362 	signal = other.signal;
2363 	has_group_ref = false;
2364 	signaling_user = other.signaling_user;
2365 	team = other.team;
2366 	group_id = other.group_id;
2367 	status = other.status;
2368 	reason = other.reason;
2369 
2370 	return *this;
2371 }
2372 
2373 
2374 /*! This is the kernel backend for waitid().
2375 */
2376 static thread_id
2377 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2378 {
2379 	Thread* thread = thread_get_current_thread();
2380 	Team* team = thread->team;
2381 	struct job_control_entry foundEntry;
2382 	struct job_control_entry* freeDeathEntry = NULL;
2383 	status_t status = B_OK;
2384 
2385 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2386 		child, flags));
2387 
2388 	T(WaitForChild(child, flags));
2389 
2390 	pid_t originalChild = child;
2391 
2392 	bool ignoreFoundEntries = false;
2393 	bool ignoreFoundEntriesChecked = false;
2394 
2395 	while (true) {
2396 		// lock the team
2397 		TeamLocker teamLocker(team);
2398 
2399 		// A 0 child argument means to wait for all children in the process
2400 		// group of the calling team.
2401 		child = originalChild == 0 ? -team->group_id : originalChild;
2402 
2403 		// check whether any condition holds
2404 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2405 
2406 		// If we don't have an entry yet, check whether there are any children
2407 		// complying to the process group specification at all.
2408 		if (entry == NULL) {
2409 			// No success yet -- check whether there are any children complying
2410 			// to the process group specification at all.
2411 			bool childrenExist = false;
2412 			if (child == -1) {
2413 				childrenExist = team->children != NULL;
2414 			} else if (child < -1) {
2415 				childrenExist = has_children_in_group(team, -child);
2416 			} else {
2417 				if (Team* childTeam = Team::Get(child)) {
2418 					BReference<Team> childTeamReference(childTeam, true);
2419 					TeamLocker childTeamLocker(childTeam);
2420 					childrenExist = childTeam->parent == team;
2421 				}
2422 			}
2423 
2424 			if (!childrenExist) {
2425 				// there is no child we could wait for
2426 				status = ECHILD;
2427 			} else {
2428 				// the children we're waiting for are still running
2429 				status = B_WOULD_BLOCK;
2430 			}
2431 		} else {
2432 			// got something
2433 			foundEntry = *entry;
2434 
2435 			// unless WNOWAIT has been specified, "consume" the wait state
2436 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2437 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2438 					// The child is dead. Reap its death entry.
2439 					freeDeathEntry = entry;
2440 					team->dead_children.entries.Remove(entry);
2441 					team->dead_children.count--;
2442 				} else {
2443 					// The child is well. Reset its job control state.
2444 					team_set_job_control_state(entry->team,
2445 						JOB_CONTROL_STATE_NONE, NULL, false);
2446 				}
2447 			}
2448 		}
2449 
2450 		// If we haven't got anything yet, prepare for waiting for the
2451 		// condition variable.
2452 		ConditionVariableEntry deadWaitEntry;
2453 
2454 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2455 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2456 
2457 		teamLocker.Unlock();
2458 
2459 		// we got our entry and can return to our caller
2460 		if (status == B_OK) {
2461 			if (ignoreFoundEntries) {
2462 				// ... unless we shall ignore found entries
2463 				delete freeDeathEntry;
2464 				freeDeathEntry = NULL;
2465 				continue;
2466 			}
2467 
2468 			break;
2469 		}
2470 
2471 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2472 			T(WaitForChildDone(status));
2473 			return status;
2474 		}
2475 
2476 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2477 		if (status == B_INTERRUPTED) {
2478 			T(WaitForChildDone(status));
2479 			return status;
2480 		}
2481 
2482 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2483 		// all our children are dead and fail with ECHILD. We check the
2484 		// condition at this point.
2485 		if (!ignoreFoundEntriesChecked) {
2486 			teamLocker.Lock();
2487 
2488 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2489 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2490 				|| handler.sa_handler == SIG_IGN) {
2491 				ignoreFoundEntries = true;
2492 			}
2493 
2494 			teamLocker.Unlock();
2495 
2496 			ignoreFoundEntriesChecked = true;
2497 		}
2498 	}
2499 
2500 	delete freeDeathEntry;
2501 
2502 	// When we got here, we have a valid death entry, and already got
2503 	// unregistered from the team or group. Fill in the returned info.
2504 	memset(&_info, 0, sizeof(_info));
2505 	_info.si_signo = SIGCHLD;
2506 	_info.si_pid = foundEntry.thread;
2507 	_info.si_uid = foundEntry.signaling_user;
2508 	// TODO: Fill in si_errno?
2509 
2510 	switch (foundEntry.state) {
2511 		case JOB_CONTROL_STATE_DEAD:
2512 			_info.si_code = foundEntry.reason;
2513 			_info.si_status = foundEntry.reason == CLD_EXITED
2514 				? foundEntry.status : foundEntry.signal;
2515 			break;
2516 		case JOB_CONTROL_STATE_STOPPED:
2517 			_info.si_code = CLD_STOPPED;
2518 			_info.si_status = foundEntry.signal;
2519 			break;
2520 		case JOB_CONTROL_STATE_CONTINUED:
2521 			_info.si_code = CLD_CONTINUED;
2522 			_info.si_status = 0;
2523 			break;
2524 		case JOB_CONTROL_STATE_NONE:
2525 			// can't happen
2526 			break;
2527 	}
2528 
2529 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2530 	// status is available.
2531 	TeamLocker teamLocker(team);
2532 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2533 
2534 	if (is_team_signal_blocked(team, SIGCHLD)) {
2535 		if (get_job_control_entry(team, child, flags) == NULL)
2536 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2537 	}
2538 
2539 	schedulerLocker.Unlock();
2540 	teamLocker.Unlock();
2541 
2542 	// When the team is dead, the main thread continues to live in the kernel
2543 	// team for a very short time. To avoid surprises for the caller we rather
2544 	// wait until the thread is really gone.
2545 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2546 		wait_for_thread(foundEntry.thread, NULL);
2547 
2548 	T(WaitForChildDone(foundEntry));
2549 
2550 	return foundEntry.thread;
2551 }
2552 
2553 
2554 /*! Fills the team_info structure with information from the specified team.
2555 	Interrupts must be enabled. The team must not be locked.
2556 */
2557 static status_t
2558 fill_team_info(Team* team, team_info* info, size_t size)
2559 {
2560 	if (size != sizeof(team_info))
2561 		return B_BAD_VALUE;
2562 
2563 	// TODO: Set more informations for team_info
2564 	memset(info, 0, size);
2565 
2566 	info->team = team->id;
2567 		// immutable
2568 	info->image_count = count_images(team);
2569 		// protected by sImageMutex
2570 
2571 	TeamLocker teamLocker(team);
2572 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2573 
2574 	info->thread_count = team->num_threads;
2575 	//info->area_count =
2576 	info->debugger_nub_thread = team->debug_info.nub_thread;
2577 	info->debugger_nub_port = team->debug_info.nub_port;
2578 	info->uid = team->effective_uid;
2579 	info->gid = team->effective_gid;
2580 
2581 	strlcpy(info->args, team->Args(), sizeof(info->args));
2582 	info->argc = 1;
2583 
2584 	return B_OK;
2585 }
2586 
2587 
2588 /*!	Returns whether the process group contains stopped processes.
2589 	The caller must hold the process group's lock.
2590 */
2591 static bool
2592 process_group_has_stopped_processes(ProcessGroup* group)
2593 {
2594 	Team* team = group->teams;
2595 	while (team != NULL) {
2596 		// the parent team's lock guards the job control entry -- acquire it
2597 		team->LockTeamAndParent(false);
2598 
2599 		if (team->job_control_entry != NULL
2600 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2601 			team->UnlockTeamAndParent();
2602 			return true;
2603 		}
2604 
2605 		team->UnlockTeamAndParent();
2606 
2607 		team = team->group_next;
2608 	}
2609 
2610 	return false;
2611 }
2612 
2613 
2614 /*!	Iterates through all process groups queued in team_remove_team() and signals
2615 	those that are orphaned and have stopped processes.
2616 	The caller must not hold any team or process group locks.
2617 */
2618 static void
2619 orphaned_process_group_check()
2620 {
2621 	// process as long as there are groups in the list
2622 	while (true) {
2623 		// remove the head from the list
2624 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2625 
2626 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2627 		if (group == NULL)
2628 			return;
2629 
2630 		group->UnsetOrphanedCheck();
2631 		BReference<ProcessGroup> groupReference(group);
2632 
2633 		orphanedCheckLocker.Unlock();
2634 
2635 		AutoLocker<ProcessGroup> groupLocker(group);
2636 
2637 		// If the group is orphaned and contains stopped processes, we're
2638 		// supposed to send SIGHUP + SIGCONT.
2639 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2640 			Thread* currentThread = thread_get_current_thread();
2641 
2642 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2643 			send_signal_to_process_group_locked(group, signal, 0);
2644 
2645 			signal.SetNumber(SIGCONT);
2646 			send_signal_to_process_group_locked(group, signal, 0);
2647 		}
2648 	}
2649 }
2650 
2651 
2652 static status_t
2653 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2654 	uint32 flags)
2655 {
2656 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2657 		return B_BAD_VALUE;
2658 
2659 	// get the team
2660 	Team* team = Team::GetAndLock(id);
2661 	if (team == NULL)
2662 		return B_BAD_TEAM_ID;
2663 	BReference<Team> teamReference(team, true);
2664 	TeamLocker teamLocker(team, true);
2665 
2666 	if ((flags & B_CHECK_PERMISSION) != 0) {
2667 		uid_t uid = geteuid();
2668 		if (uid != 0 && uid != team->effective_uid)
2669 			return B_NOT_ALLOWED;
2670 	}
2671 
2672 	bigtime_t kernelTime = 0;
2673 	bigtime_t userTime = 0;
2674 
2675 	switch (who) {
2676 		case B_TEAM_USAGE_SELF:
2677 		{
2678 			Thread* thread = team->thread_list;
2679 
2680 			for (; thread != NULL; thread = thread->team_next) {
2681 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2682 				kernelTime += thread->kernel_time;
2683 				userTime += thread->user_time;
2684 			}
2685 
2686 			kernelTime += team->dead_threads_kernel_time;
2687 			userTime += team->dead_threads_user_time;
2688 			break;
2689 		}
2690 
2691 		case B_TEAM_USAGE_CHILDREN:
2692 		{
2693 			Team* child = team->children;
2694 			for (; child != NULL; child = child->siblings_next) {
2695 				TeamLocker childLocker(child);
2696 
2697 				Thread* thread = team->thread_list;
2698 
2699 				for (; thread != NULL; thread = thread->team_next) {
2700 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2701 					kernelTime += thread->kernel_time;
2702 					userTime += thread->user_time;
2703 				}
2704 
2705 				kernelTime += child->dead_threads_kernel_time;
2706 				userTime += child->dead_threads_user_time;
2707 			}
2708 
2709 			kernelTime += team->dead_children.kernel_time;
2710 			userTime += team->dead_children.user_time;
2711 			break;
2712 		}
2713 	}
2714 
2715 	info->kernel_time = kernelTime;
2716 	info->user_time = userTime;
2717 
2718 	return B_OK;
2719 }
2720 
2721 
2722 //	#pragma mark - Private kernel API
2723 
2724 
2725 status_t
2726 team_init(kernel_args* args)
2727 {
2728 	// create the team hash table
2729 	new(&sTeamHash) TeamTable;
2730 	if (sTeamHash.Init(64) != B_OK)
2731 		panic("Failed to init team hash table!");
2732 
2733 	new(&sGroupHash) ProcessGroupHashTable;
2734 	if (sGroupHash.Init() != B_OK)
2735 		panic("Failed to init process group hash table!");
2736 
2737 	// create initial session and process groups
2738 
2739 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2740 	if (session == NULL)
2741 		panic("Could not create initial session.\n");
2742 	BReference<ProcessSession> sessionReference(session, true);
2743 
2744 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2745 	if (group == NULL)
2746 		panic("Could not create initial process group.\n");
2747 	BReference<ProcessGroup> groupReference(group, true);
2748 
2749 	group->Publish(session);
2750 
2751 	// create the kernel team
2752 	sKernelTeam = Team::Create(1, "kernel_team", true);
2753 	if (sKernelTeam == NULL)
2754 		panic("could not create kernel team!\n");
2755 	sKernelTeam->SetArgs(sKernelTeam->Name());
2756 	sKernelTeam->state = TEAM_STATE_NORMAL;
2757 
2758 	sKernelTeam->saved_set_uid = 0;
2759 	sKernelTeam->real_uid = 0;
2760 	sKernelTeam->effective_uid = 0;
2761 	sKernelTeam->saved_set_gid = 0;
2762 	sKernelTeam->real_gid = 0;
2763 	sKernelTeam->effective_gid = 0;
2764 	sKernelTeam->supplementary_groups = NULL;
2765 	sKernelTeam->supplementary_group_count = 0;
2766 
2767 	insert_team_into_group(group, sKernelTeam);
2768 
2769 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2770 	if (sKernelTeam->io_context == NULL)
2771 		panic("could not create io_context for kernel team!\n");
2772 
2773 	// stick it in the team hash
2774 	sTeamHash.Insert(sKernelTeam);
2775 
2776 	add_debugger_command_etc("team", &dump_team_info,
2777 		"Dump info about a particular team",
2778 		"[ <id> | <address> | <name> ]\n"
2779 		"Prints information about the specified team. If no argument is given\n"
2780 		"the current team is selected.\n"
2781 		"  <id>       - The ID of the team.\n"
2782 		"  <address>  - The address of the team structure.\n"
2783 		"  <name>     - The team's name.\n", 0);
2784 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2785 		"\n"
2786 		"Prints a list of all existing teams.\n", 0);
2787 
2788 	new(&sNotificationService) TeamNotificationService();
2789 
2790 	sNotificationService.Register();
2791 
2792 	return B_OK;
2793 }
2794 
2795 
2796 int32
2797 team_max_teams(void)
2798 {
2799 	return sMaxTeams;
2800 }
2801 
2802 
2803 int32
2804 team_used_teams(void)
2805 {
2806 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2807 	return sUsedTeams;
2808 }
2809 
2810 
2811 /*! Returns a death entry of a child team specified by ID (if any).
2812 	The caller must hold the team's lock.
2813 
2814 	\param team The team whose dead children list to check.
2815 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2816 	\param _deleteEntry Return variable, indicating whether the caller needs to
2817 		delete the returned entry.
2818 	\return The death entry of the matching team, or \c NULL, if no death entry
2819 		for the team was found.
2820 */
2821 job_control_entry*
2822 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2823 {
2824 	if (child <= 0)
2825 		return NULL;
2826 
2827 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2828 		child);
2829 	if (entry) {
2830 		// remove the entry only, if the caller is the parent of the found team
2831 		if (team_get_current_team_id() == entry->thread) {
2832 			team->dead_children.entries.Remove(entry);
2833 			team->dead_children.count--;
2834 			*_deleteEntry = true;
2835 		} else {
2836 			*_deleteEntry = false;
2837 		}
2838 	}
2839 
2840 	return entry;
2841 }
2842 
2843 
2844 /*! Quick check to see if we have a valid team ID. */
2845 bool
2846 team_is_valid(team_id id)
2847 {
2848 	if (id <= 0)
2849 		return false;
2850 
2851 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2852 
2853 	return team_get_team_struct_locked(id) != NULL;
2854 }
2855 
2856 
2857 Team*
2858 team_get_team_struct_locked(team_id id)
2859 {
2860 	return sTeamHash.Lookup(id);
2861 }
2862 
2863 
2864 void
2865 team_set_controlling_tty(int32 ttyIndex)
2866 {
2867 	// lock the team, so its session won't change while we're playing with it
2868 	Team* team = thread_get_current_thread()->team;
2869 	TeamLocker teamLocker(team);
2870 
2871 	// get and lock the session
2872 	ProcessSession* session = team->group->Session();
2873 	AutoLocker<ProcessSession> sessionLocker(session);
2874 
2875 	// set the session's fields
2876 	session->controlling_tty = ttyIndex;
2877 	session->foreground_group = -1;
2878 }
2879 
2880 
2881 int32
2882 team_get_controlling_tty()
2883 {
2884 	// lock the team, so its session won't change while we're playing with it
2885 	Team* team = thread_get_current_thread()->team;
2886 	TeamLocker teamLocker(team);
2887 
2888 	// get and lock the session
2889 	ProcessSession* session = team->group->Session();
2890 	AutoLocker<ProcessSession> sessionLocker(session);
2891 
2892 	// get the session's field
2893 	return session->controlling_tty;
2894 }
2895 
2896 
2897 status_t
2898 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2899 {
2900 	// lock the team, so its session won't change while we're playing with it
2901 	Thread* thread = thread_get_current_thread();
2902 	Team* team = thread->team;
2903 	TeamLocker teamLocker(team);
2904 
2905 	// get and lock the session
2906 	ProcessSession* session = team->group->Session();
2907 	AutoLocker<ProcessSession> sessionLocker(session);
2908 
2909 	// check given TTY -- must be the controlling tty of the calling process
2910 	if (session->controlling_tty != ttyIndex)
2911 		return ENOTTY;
2912 
2913 	// check given process group -- must belong to our session
2914 	{
2915 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2916 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2917 		if (group == NULL || group->Session() != session)
2918 			return B_BAD_VALUE;
2919 	}
2920 
2921 	// If we are a background group, we can do that unharmed only when we
2922 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2923 	if (session->foreground_group != -1
2924 		&& session->foreground_group != team->group_id
2925 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2926 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2927 
2928 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2929 			pid_t groupID = team->group_id;
2930 
2931 			schedulerLocker.Unlock();
2932 			sessionLocker.Unlock();
2933 			teamLocker.Unlock();
2934 
2935 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2936 			send_signal_to_process_group(groupID, signal, 0);
2937 			return B_INTERRUPTED;
2938 		}
2939 	}
2940 
2941 	session->foreground_group = processGroupID;
2942 
2943 	return B_OK;
2944 }
2945 
2946 
2947 /*!	Removes the specified team from the global team hash, from its process
2948 	group, and from its parent.
2949 	It also moves all of its children to the kernel team.
2950 
2951 	The caller must hold the following locks:
2952 	- \a team's process group's lock,
2953 	- the kernel team's lock,
2954 	- \a team's parent team's lock (might be the kernel team), and
2955 	- \a team's lock.
2956 */
2957 void
2958 team_remove_team(Team* team, pid_t& _signalGroup)
2959 {
2960 	Team* parent = team->parent;
2961 
2962 	// remember how long this team lasted
2963 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2964 		+ team->dead_children.kernel_time;
2965 	parent->dead_children.user_time += team->dead_threads_user_time
2966 		+ team->dead_children.user_time;
2967 
2968 	// remove the team from the hash table
2969 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2970 	sTeamHash.Remove(team);
2971 	sUsedTeams--;
2972 	teamsLocker.Unlock();
2973 
2974 	// The team can no longer be accessed by ID. Navigation to it is still
2975 	// possible from its process group and its parent and children, but that
2976 	// will be rectified shortly.
2977 	team->state = TEAM_STATE_DEATH;
2978 
2979 	// If we're a controlling process (i.e. a session leader with controlling
2980 	// terminal), there's a bit of signalling we have to do. We can't do any of
2981 	// the signaling here due to the bunch of locks we're holding, but we need
2982 	// to determine, whom to signal.
2983 	_signalGroup = -1;
2984 	bool isSessionLeader = false;
2985 	if (team->session_id == team->id
2986 		&& team->group->Session()->controlling_tty >= 0) {
2987 		isSessionLeader = true;
2988 
2989 		ProcessSession* session = team->group->Session();
2990 
2991 		AutoLocker<ProcessSession> sessionLocker(session);
2992 
2993 		session->controlling_tty = -1;
2994 		_signalGroup = session->foreground_group;
2995 	}
2996 
2997 	// remove us from our process group
2998 	remove_team_from_group(team);
2999 
3000 	// move the team's children to the kernel team
3001 	while (Team* child = team->children) {
3002 		// remove the child from the current team and add it to the kernel team
3003 		TeamLocker childLocker(child);
3004 
3005 		remove_team_from_parent(team, child);
3006 		insert_team_into_parent(sKernelTeam, child);
3007 
3008 		// move job control entries too
3009 		sKernelTeam->stopped_children.entries.MoveFrom(
3010 			&team->stopped_children.entries);
3011 		sKernelTeam->continued_children.entries.MoveFrom(
3012 			&team->continued_children.entries);
3013 
3014 		// If the team was a session leader with controlling terminal,
3015 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3016 		// groups with stopped processes. Due to locking complications we can't
3017 		// do that here, so we only check whether we were a reason for the
3018 		// child's process group not being an orphan and, if so, schedule a
3019 		// later check (cf. orphaned_process_group_check()).
3020 		if (isSessionLeader) {
3021 			ProcessGroup* childGroup = child->group;
3022 			if (childGroup->Session()->id == team->session_id
3023 				&& childGroup->id != team->group_id) {
3024 				childGroup->ScheduleOrphanedCheck();
3025 			}
3026 		}
3027 
3028 		// Note, we don't move the dead children entries. Those will be deleted
3029 		// when the team structure is deleted.
3030 	}
3031 
3032 	// remove us from our parent
3033 	remove_team_from_parent(parent, team);
3034 }
3035 
3036 
3037 /*!	Kills all threads but the main thread of the team and shuts down user
3038 	debugging for it.
3039 	To be called on exit of the team's main thread. No locks must be held.
3040 
3041 	\param team The team in question.
3042 	\return The port of the debugger for the team, -1 if none. To be passed to
3043 		team_delete_team().
3044 */
3045 port_id
3046 team_shutdown_team(Team* team)
3047 {
3048 	ASSERT(thread_get_current_thread() == team->main_thread);
3049 
3050 	TeamLocker teamLocker(team);
3051 
3052 	// Make sure debugging changes won't happen anymore.
3053 	port_id debuggerPort = -1;
3054 	while (true) {
3055 		// If a debugger change is in progress for the team, we'll have to
3056 		// wait until it is done.
3057 		ConditionVariableEntry waitForDebuggerEntry;
3058 		bool waitForDebugger = false;
3059 
3060 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3061 
3062 		if (team->debug_info.debugger_changed_condition != NULL) {
3063 			team->debug_info.debugger_changed_condition->Add(
3064 				&waitForDebuggerEntry);
3065 			waitForDebugger = true;
3066 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3067 			// The team is being debugged. That will stop with the termination
3068 			// of the nub thread. Since we set the team state to death, no one
3069 			// can install a debugger anymore. We fetch the debugger's port to
3070 			// send it a message at the bitter end.
3071 			debuggerPort = team->debug_info.debugger_port;
3072 		}
3073 
3074 		debugInfoLocker.Unlock();
3075 
3076 		if (!waitForDebugger)
3077 			break;
3078 
3079 		// wait for the debugger change to be finished
3080 		teamLocker.Unlock();
3081 
3082 		waitForDebuggerEntry.Wait();
3083 
3084 		teamLocker.Lock();
3085 	}
3086 
3087 	// Mark the team as shutting down. That will prevent new threads from being
3088 	// created and debugger changes from taking place.
3089 	team->state = TEAM_STATE_SHUTDOWN;
3090 
3091 	// delete all timers
3092 	team->DeleteUserTimers(false);
3093 
3094 	// deactivate CPU time user timers for the team
3095 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3096 
3097 	if (team->HasActiveCPUTimeUserTimers())
3098 		team->DeactivateCPUTimeUserTimers();
3099 
3100 	schedulerLocker.Unlock();
3101 
3102 	// kill all threads but the main thread
3103 	team_death_entry deathEntry;
3104 	deathEntry.condition.Init(team, "team death");
3105 
3106 	while (true) {
3107 		team->death_entry = &deathEntry;
3108 		deathEntry.remaining_threads = 0;
3109 
3110 		Thread* thread = team->thread_list;
3111 		while (thread != NULL) {
3112 			if (thread != team->main_thread) {
3113 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3114 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3115 				deathEntry.remaining_threads++;
3116 			}
3117 
3118 			thread = thread->team_next;
3119 		}
3120 
3121 		if (deathEntry.remaining_threads == 0)
3122 			break;
3123 
3124 		// there are threads to wait for
3125 		ConditionVariableEntry entry;
3126 		deathEntry.condition.Add(&entry);
3127 
3128 		teamLocker.Unlock();
3129 
3130 		entry.Wait();
3131 
3132 		teamLocker.Lock();
3133 	}
3134 
3135 	team->death_entry = NULL;
3136 
3137 	return debuggerPort;
3138 }
3139 
3140 
3141 /*!	Called on team exit to notify threads waiting on the team and free most
3142 	resources associated with it.
3143 	The caller shouldn't hold any locks.
3144 */
3145 void
3146 team_delete_team(Team* team, port_id debuggerPort)
3147 {
3148 	// Not quite in our job description, but work that has been left by
3149 	// team_remove_team() and that can be done now that we're not holding any
3150 	// locks.
3151 	orphaned_process_group_check();
3152 
3153 	team_id teamID = team->id;
3154 
3155 	ASSERT(team->num_threads == 0);
3156 
3157 	// If someone is waiting for this team to be loaded, but it dies
3158 	// unexpectedly before being done, we need to notify the waiting
3159 	// thread now.
3160 
3161 	TeamLocker teamLocker(team);
3162 
3163 	if (team->loading_info) {
3164 		// there's indeed someone waiting
3165 		struct team_loading_info* loadingInfo = team->loading_info;
3166 		team->loading_info = NULL;
3167 
3168 		loadingInfo->result = B_ERROR;
3169 		loadingInfo->done = true;
3170 
3171 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3172 
3173 		// wake up the waiting thread
3174 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3175 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3176 	}
3177 
3178 	// notify team watchers
3179 
3180 	{
3181 		// we're not reachable from anyone anymore at this point, so we
3182 		// can safely access the list without any locking
3183 		struct team_watcher* watcher;
3184 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3185 				&team->watcher_list)) != NULL) {
3186 			watcher->hook(teamID, watcher->data);
3187 			free(watcher);
3188 		}
3189 	}
3190 
3191 	teamLocker.Unlock();
3192 
3193 	sNotificationService.Notify(TEAM_REMOVED, team);
3194 
3195 	// free team resources
3196 
3197 	delete_realtime_sem_context(team->realtime_sem_context);
3198 	xsi_sem_undo(team);
3199 	remove_images(team);
3200 	team->address_space->RemoveAndPut();
3201 
3202 	team->ReleaseReference();
3203 
3204 	// notify the debugger, that the team is gone
3205 	user_debug_team_deleted(teamID, debuggerPort);
3206 }
3207 
3208 
3209 Team*
3210 team_get_kernel_team(void)
3211 {
3212 	return sKernelTeam;
3213 }
3214 
3215 
3216 team_id
3217 team_get_kernel_team_id(void)
3218 {
3219 	if (!sKernelTeam)
3220 		return 0;
3221 
3222 	return sKernelTeam->id;
3223 }
3224 
3225 
3226 team_id
3227 team_get_current_team_id(void)
3228 {
3229 	return thread_get_current_thread()->team->id;
3230 }
3231 
3232 
3233 status_t
3234 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3235 {
3236 	if (id == sKernelTeam->id) {
3237 		// we're the kernel team, so we don't have to go through all
3238 		// the hassle (locking and hash lookup)
3239 		*_addressSpace = VMAddressSpace::GetKernel();
3240 		return B_OK;
3241 	}
3242 
3243 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3244 
3245 	Team* team = team_get_team_struct_locked(id);
3246 	if (team == NULL)
3247 		return B_BAD_VALUE;
3248 
3249 	team->address_space->Get();
3250 	*_addressSpace = team->address_space;
3251 	return B_OK;
3252 }
3253 
3254 
3255 /*!	Sets the team's job control state.
3256 	The caller must hold the parent team's lock. Interrupts are allowed to be
3257 	enabled or disabled. In the latter case the scheduler lock may be held as
3258 	well.
3259 	\a team The team whose job control state shall be set.
3260 	\a newState The new state to be set.
3261 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3262 		the caller is responsible for filling in the following fields of the
3263 		entry before releasing the parent team's lock, unless the new state is
3264 		\c JOB_CONTROL_STATE_NONE:
3265 		- \c signal: The number of the signal causing the state change.
3266 		- \c signaling_user: The real UID of the user sending the signal.
3267 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3268 */
3269 void
3270 team_set_job_control_state(Team* team, job_control_state newState,
3271 	Signal* signal, bool schedulerLocked)
3272 {
3273 	if (team == NULL || team->job_control_entry == NULL)
3274 		return;
3275 
3276 	// don't touch anything, if the state stays the same or the team is already
3277 	// dead
3278 	job_control_entry* entry = team->job_control_entry;
3279 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3280 		return;
3281 
3282 	T(SetJobControlState(team->id, newState, signal));
3283 
3284 	// remove from the old list
3285 	switch (entry->state) {
3286 		case JOB_CONTROL_STATE_NONE:
3287 			// entry is in no list ATM
3288 			break;
3289 		case JOB_CONTROL_STATE_DEAD:
3290 			// can't get here
3291 			break;
3292 		case JOB_CONTROL_STATE_STOPPED:
3293 			team->parent->stopped_children.entries.Remove(entry);
3294 			break;
3295 		case JOB_CONTROL_STATE_CONTINUED:
3296 			team->parent->continued_children.entries.Remove(entry);
3297 			break;
3298 	}
3299 
3300 	entry->state = newState;
3301 
3302 	if (signal != NULL) {
3303 		entry->signal = signal->Number();
3304 		entry->signaling_user = signal->SendingUser();
3305 	}
3306 
3307 	// add to new list
3308 	team_job_control_children* childList = NULL;
3309 	switch (entry->state) {
3310 		case JOB_CONTROL_STATE_NONE:
3311 			// entry doesn't get into any list
3312 			break;
3313 		case JOB_CONTROL_STATE_DEAD:
3314 			childList = &team->parent->dead_children;
3315 			team->parent->dead_children.count++;
3316 			break;
3317 		case JOB_CONTROL_STATE_STOPPED:
3318 			childList = &team->parent->stopped_children;
3319 			break;
3320 		case JOB_CONTROL_STATE_CONTINUED:
3321 			childList = &team->parent->continued_children;
3322 			break;
3323 	}
3324 
3325 	if (childList != NULL) {
3326 		childList->entries.Add(entry);
3327 		team->parent->dead_children.condition_variable.NotifyAll(
3328 			schedulerLocked);
3329 	}
3330 }
3331 
3332 
3333 /*!	Inits the given team's exit information, if not yet initialized, to some
3334 	generic "killed" status.
3335 	The caller must not hold the team's lock. Interrupts must be enabled.
3336 
3337 	\param team The team whose exit info shall be initialized.
3338 */
3339 void
3340 team_init_exit_info_on_error(Team* team)
3341 {
3342 	TeamLocker teamLocker(team);
3343 
3344 	if (!team->exit.initialized) {
3345 		team->exit.reason = CLD_KILLED;
3346 		team->exit.signal = SIGKILL;
3347 		team->exit.signaling_user = geteuid();
3348 		team->exit.status = 0;
3349 		team->exit.initialized = true;
3350 	}
3351 }
3352 
3353 
3354 /*! Adds a hook to the team that is called as soon as this team goes away.
3355 	This call might get public in the future.
3356 */
3357 status_t
3358 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3359 {
3360 	if (hook == NULL || teamID < B_OK)
3361 		return B_BAD_VALUE;
3362 
3363 	// create the watcher object
3364 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3365 	if (watcher == NULL)
3366 		return B_NO_MEMORY;
3367 
3368 	watcher->hook = hook;
3369 	watcher->data = data;
3370 
3371 	// add watcher, if the team isn't already dying
3372 	// get the team
3373 	Team* team = Team::GetAndLock(teamID);
3374 	if (team == NULL) {
3375 		free(watcher);
3376 		return B_BAD_TEAM_ID;
3377 	}
3378 
3379 	list_add_item(&team->watcher_list, watcher);
3380 
3381 	team->UnlockAndReleaseReference();
3382 
3383 	return B_OK;
3384 }
3385 
3386 
3387 status_t
3388 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3389 {
3390 	if (hook == NULL || teamID < 0)
3391 		return B_BAD_VALUE;
3392 
3393 	// get team and remove watcher (if present)
3394 	Team* team = Team::GetAndLock(teamID);
3395 	if (team == NULL)
3396 		return B_BAD_TEAM_ID;
3397 
3398 	// search for watcher
3399 	team_watcher* watcher = NULL;
3400 	while ((watcher = (team_watcher*)list_get_next_item(
3401 			&team->watcher_list, watcher)) != NULL) {
3402 		if (watcher->hook == hook && watcher->data == data) {
3403 			// got it!
3404 			list_remove_item(&team->watcher_list, watcher);
3405 			break;
3406 		}
3407 	}
3408 
3409 	team->UnlockAndReleaseReference();
3410 
3411 	if (watcher == NULL)
3412 		return B_ENTRY_NOT_FOUND;
3413 
3414 	free(watcher);
3415 	return B_OK;
3416 }
3417 
3418 
3419 /*!	Allocates a user_thread structure from the team.
3420 	The team lock must be held, unless the function is called for the team's
3421 	main thread. Interrupts must be enabled.
3422 */
3423 struct user_thread*
3424 team_allocate_user_thread(Team* team)
3425 {
3426 	if (team->user_data == 0)
3427 		return NULL;
3428 
3429 	// take an entry from the free list, if any
3430 	if (struct free_user_thread* entry = team->free_user_threads) {
3431 		user_thread* thread = entry->thread;
3432 		team->free_user_threads = entry->next;
3433 		free(entry);
3434 		return thread;
3435 	}
3436 
3437 	while (true) {
3438 		// enough space left?
3439 		size_t needed = ROUNDUP(sizeof(user_thread), 128);
3440 		if (team->user_data_size - team->used_user_data < needed) {
3441 			// try to resize the area
3442 			if (resize_area(team->user_data_area,
3443 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3444 				return NULL;
3445 			}
3446 
3447 			// resized user area successfully -- try to allocate the user_thread
3448 			// again
3449 			team->user_data_size += B_PAGE_SIZE;
3450 			continue;
3451 		}
3452 
3453 		// allocate the user_thread
3454 		user_thread* thread
3455 			= (user_thread*)(team->user_data + team->used_user_data);
3456 		team->used_user_data += needed;
3457 
3458 		return thread;
3459 	}
3460 }
3461 
3462 
3463 /*!	Frees the given user_thread structure.
3464 	The team's lock must not be held. Interrupts must be enabled.
3465 	\param team The team the user thread was allocated from.
3466 	\param userThread The user thread to free.
3467 */
3468 void
3469 team_free_user_thread(Team* team, struct user_thread* userThread)
3470 {
3471 	if (userThread == NULL)
3472 		return;
3473 
3474 	// create a free list entry
3475 	free_user_thread* entry
3476 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3477 	if (entry == NULL) {
3478 		// we have to leak the user thread :-/
3479 		return;
3480 	}
3481 
3482 	// add to free list
3483 	TeamLocker teamLocker(team);
3484 
3485 	entry->thread = userThread;
3486 	entry->next = team->free_user_threads;
3487 	team->free_user_threads = entry;
3488 }
3489 
3490 
3491 //	#pragma mark - Associated data interface
3492 
3493 
3494 AssociatedData::AssociatedData()
3495 	:
3496 	fOwner(NULL)
3497 {
3498 }
3499 
3500 
3501 AssociatedData::~AssociatedData()
3502 {
3503 }
3504 
3505 
3506 void
3507 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3508 {
3509 }
3510 
3511 
3512 AssociatedDataOwner::AssociatedDataOwner()
3513 {
3514 	mutex_init(&fLock, "associated data owner");
3515 }
3516 
3517 
3518 AssociatedDataOwner::~AssociatedDataOwner()
3519 {
3520 	mutex_destroy(&fLock);
3521 }
3522 
3523 
3524 bool
3525 AssociatedDataOwner::AddData(AssociatedData* data)
3526 {
3527 	MutexLocker locker(fLock);
3528 
3529 	if (data->Owner() != NULL)
3530 		return false;
3531 
3532 	data->AcquireReference();
3533 	fList.Add(data);
3534 	data->SetOwner(this);
3535 
3536 	return true;
3537 }
3538 
3539 
3540 bool
3541 AssociatedDataOwner::RemoveData(AssociatedData* data)
3542 {
3543 	MutexLocker locker(fLock);
3544 
3545 	if (data->Owner() != this)
3546 		return false;
3547 
3548 	data->SetOwner(NULL);
3549 	fList.Remove(data);
3550 
3551 	locker.Unlock();
3552 
3553 	data->ReleaseReference();
3554 
3555 	return true;
3556 }
3557 
3558 
3559 void
3560 AssociatedDataOwner::PrepareForDeletion()
3561 {
3562 	MutexLocker locker(fLock);
3563 
3564 	// move all data to a temporary list and unset the owner
3565 	DataList list;
3566 	list.MoveFrom(&fList);
3567 
3568 	for (DataList::Iterator it = list.GetIterator();
3569 		AssociatedData* data = it.Next();) {
3570 		data->SetOwner(NULL);
3571 	}
3572 
3573 	locker.Unlock();
3574 
3575 	// call the notification hooks and release our references
3576 	while (AssociatedData* data = list.RemoveHead()) {
3577 		data->OwnerDeleted(this);
3578 		data->ReleaseReference();
3579 	}
3580 }
3581 
3582 
3583 /*!	Associates data with the current team.
3584 	When the team is deleted, the data object is notified.
3585 	The team acquires a reference to the object.
3586 
3587 	\param data The data object.
3588 	\return \c true on success, \c false otherwise. Fails only when the supplied
3589 		data object is already associated with another owner.
3590 */
3591 bool
3592 team_associate_data(AssociatedData* data)
3593 {
3594 	return thread_get_current_thread()->team->AddData(data);
3595 }
3596 
3597 
3598 /*!	Dissociates data from the current team.
3599 	Balances an earlier call to team_associate_data().
3600 
3601 	\param data The data object.
3602 	\return \c true on success, \c false otherwise. Fails only when the data
3603 		object is not associated with the current team.
3604 */
3605 bool
3606 team_dissociate_data(AssociatedData* data)
3607 {
3608 	return thread_get_current_thread()->team->RemoveData(data);
3609 }
3610 
3611 
3612 //	#pragma mark - Public kernel API
3613 
3614 
3615 thread_id
3616 load_image(int32 argCount, const char** args, const char** env)
3617 {
3618 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3619 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3620 }
3621 
3622 
3623 thread_id
3624 load_image_etc(int32 argCount, const char* const* args,
3625 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3626 {
3627 	// we need to flatten the args and environment
3628 
3629 	if (args == NULL)
3630 		return B_BAD_VALUE;
3631 
3632 	// determine total needed size
3633 	int32 argSize = 0;
3634 	for (int32 i = 0; i < argCount; i++)
3635 		argSize += strlen(args[i]) + 1;
3636 
3637 	int32 envCount = 0;
3638 	int32 envSize = 0;
3639 	while (env != NULL && env[envCount] != NULL)
3640 		envSize += strlen(env[envCount++]) + 1;
3641 
3642 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3643 	if (size > MAX_PROCESS_ARGS_SIZE)
3644 		return B_TOO_MANY_ARGS;
3645 
3646 	// allocate space
3647 	char** flatArgs = (char**)malloc(size);
3648 	if (flatArgs == NULL)
3649 		return B_NO_MEMORY;
3650 
3651 	char** slot = flatArgs;
3652 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3653 
3654 	// copy arguments and environment
3655 	for (int32 i = 0; i < argCount; i++) {
3656 		int32 argSize = strlen(args[i]) + 1;
3657 		memcpy(stringSpace, args[i], argSize);
3658 		*slot++ = stringSpace;
3659 		stringSpace += argSize;
3660 	}
3661 
3662 	*slot++ = NULL;
3663 
3664 	for (int32 i = 0; i < envCount; i++) {
3665 		int32 envSize = strlen(env[i]) + 1;
3666 		memcpy(stringSpace, env[i], envSize);
3667 		*slot++ = stringSpace;
3668 		stringSpace += envSize;
3669 	}
3670 
3671 	*slot++ = NULL;
3672 
3673 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3674 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3675 
3676 	free(flatArgs);
3677 		// load_image_internal() unset our variable if it took over ownership
3678 
3679 	return thread;
3680 }
3681 
3682 
3683 status_t
3684 wait_for_team(team_id id, status_t* _returnCode)
3685 {
3686 	// check whether the team exists
3687 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3688 
3689 	Team* team = team_get_team_struct_locked(id);
3690 	if (team == NULL)
3691 		return B_BAD_TEAM_ID;
3692 
3693 	id = team->id;
3694 
3695 	teamsLocker.Unlock();
3696 
3697 	// wait for the main thread (it has the same ID as the team)
3698 	return wait_for_thread(id, _returnCode);
3699 }
3700 
3701 
3702 status_t
3703 kill_team(team_id id)
3704 {
3705 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3706 
3707 	Team* team = team_get_team_struct_locked(id);
3708 	if (team == NULL)
3709 		return B_BAD_TEAM_ID;
3710 
3711 	id = team->id;
3712 
3713 	teamsLocker.Unlock();
3714 
3715 	if (team == sKernelTeam)
3716 		return B_NOT_ALLOWED;
3717 
3718 	// Just kill the team's main thread (it has same ID as the team). The
3719 	// cleanup code there will take care of the team.
3720 	return kill_thread(id);
3721 }
3722 
3723 
3724 status_t
3725 _get_team_info(team_id id, team_info* info, size_t size)
3726 {
3727 	// get the team
3728 	Team* team = Team::Get(id);
3729 	if (team == NULL)
3730 		return B_BAD_TEAM_ID;
3731 	BReference<Team> teamReference(team, true);
3732 
3733 	// fill in the info
3734 	return fill_team_info(team, info, size);
3735 }
3736 
3737 
3738 status_t
3739 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3740 {
3741 	int32 slot = *cookie;
3742 	if (slot < 1)
3743 		slot = 1;
3744 
3745 	InterruptsSpinLocker locker(sTeamHashLock);
3746 
3747 	team_id lastTeamID = peek_next_thread_id();
3748 		// TODO: This is broken, since the id can wrap around!
3749 
3750 	// get next valid team
3751 	Team* team = NULL;
3752 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3753 		slot++;
3754 
3755 	if (team == NULL)
3756 		return B_BAD_TEAM_ID;
3757 
3758 	// get a reference to the team and unlock
3759 	BReference<Team> teamReference(team);
3760 	locker.Unlock();
3761 
3762 	// fill in the info
3763 	*cookie = ++slot;
3764 	return fill_team_info(team, info, size);
3765 }
3766 
3767 
3768 status_t
3769 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3770 {
3771 	if (size != sizeof(team_usage_info))
3772 		return B_BAD_VALUE;
3773 
3774 	return common_get_team_usage_info(id, who, info, 0);
3775 }
3776 
3777 
3778 pid_t
3779 getpid(void)
3780 {
3781 	return thread_get_current_thread()->team->id;
3782 }
3783 
3784 
3785 pid_t
3786 getppid(void)
3787 {
3788 	Team* team = thread_get_current_thread()->team;
3789 
3790 	TeamLocker teamLocker(team);
3791 
3792 	return team->parent->id;
3793 }
3794 
3795 
3796 pid_t
3797 getpgid(pid_t id)
3798 {
3799 	if (id < 0) {
3800 		errno = EINVAL;
3801 		return -1;
3802 	}
3803 
3804 	if (id == 0) {
3805 		// get process group of the calling process
3806 		Team* team = thread_get_current_thread()->team;
3807 		TeamLocker teamLocker(team);
3808 		return team->group_id;
3809 	}
3810 
3811 	// get the team
3812 	Team* team = Team::GetAndLock(id);
3813 	if (team == NULL) {
3814 		errno = ESRCH;
3815 		return -1;
3816 	}
3817 
3818 	// get the team's process group ID
3819 	pid_t groupID = team->group_id;
3820 
3821 	team->UnlockAndReleaseReference();
3822 
3823 	return groupID;
3824 }
3825 
3826 
3827 pid_t
3828 getsid(pid_t id)
3829 {
3830 	if (id < 0) {
3831 		errno = EINVAL;
3832 		return -1;
3833 	}
3834 
3835 	if (id == 0) {
3836 		// get session of the calling process
3837 		Team* team = thread_get_current_thread()->team;
3838 		TeamLocker teamLocker(team);
3839 		return team->session_id;
3840 	}
3841 
3842 	// get the team
3843 	Team* team = Team::GetAndLock(id);
3844 	if (team == NULL) {
3845 		errno = ESRCH;
3846 		return -1;
3847 	}
3848 
3849 	// get the team's session ID
3850 	pid_t sessionID = team->session_id;
3851 
3852 	team->UnlockAndReleaseReference();
3853 
3854 	return sessionID;
3855 }
3856 
3857 
3858 //	#pragma mark - User syscalls
3859 
3860 
3861 status_t
3862 _user_exec(const char* userPath, const char* const* userFlatArgs,
3863 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3864 {
3865 	// NOTE: Since this function normally doesn't return, don't use automatic
3866 	// variables that need destruction in the function scope.
3867 	char path[B_PATH_NAME_LENGTH];
3868 
3869 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3870 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3871 		return B_BAD_ADDRESS;
3872 
3873 	// copy and relocate the flat arguments
3874 	char** flatArgs;
3875 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3876 		argCount, envCount, flatArgs);
3877 
3878 	if (error == B_OK) {
3879 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3880 			envCount, umask);
3881 			// this one only returns in case of error
3882 	}
3883 
3884 	free(flatArgs);
3885 	return error;
3886 }
3887 
3888 
3889 thread_id
3890 _user_fork(void)
3891 {
3892 	return fork_team();
3893 }
3894 
3895 
3896 pid_t
3897 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3898 {
3899 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3900 		return B_BAD_ADDRESS;
3901 
3902 	siginfo_t info;
3903 	pid_t foundChild = wait_for_child(child, flags, info);
3904 	if (foundChild < 0)
3905 		return syscall_restart_handle_post(foundChild);
3906 
3907 	// copy info back to userland
3908 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3909 		return B_BAD_ADDRESS;
3910 
3911 	return foundChild;
3912 }
3913 
3914 
3915 pid_t
3916 _user_process_info(pid_t process, int32 which)
3917 {
3918 	// we only allow to return the parent of the current process
3919 	if (which == PARENT_ID
3920 		&& process != 0 && process != thread_get_current_thread()->team->id)
3921 		return B_BAD_VALUE;
3922 
3923 	pid_t result;
3924 	switch (which) {
3925 		case SESSION_ID:
3926 			result = getsid(process);
3927 			break;
3928 		case GROUP_ID:
3929 			result = getpgid(process);
3930 			break;
3931 		case PARENT_ID:
3932 			result = getppid();
3933 			break;
3934 		default:
3935 			return B_BAD_VALUE;
3936 	}
3937 
3938 	return result >= 0 ? result : errno;
3939 }
3940 
3941 
3942 pid_t
3943 _user_setpgid(pid_t processID, pid_t groupID)
3944 {
3945 	// setpgid() can be called either by the parent of the target process or
3946 	// by the process itself to do one of two things:
3947 	// * Create a new process group with the target process' ID and the target
3948 	//   process as group leader.
3949 	// * Set the target process' process group to an already existing one in the
3950 	//   same session.
3951 
3952 	if (groupID < 0)
3953 		return B_BAD_VALUE;
3954 
3955 	Team* currentTeam = thread_get_current_thread()->team;
3956 	if (processID == 0)
3957 		processID = currentTeam->id;
3958 
3959 	// if the group ID is not specified, use the target process' ID
3960 	if (groupID == 0)
3961 		groupID = processID;
3962 
3963 	// We loop when running into the following race condition: We create a new
3964 	// process group, because there isn't one with that ID yet, but later when
3965 	// trying to publish it, we find that someone else created and published
3966 	// a group with that ID in the meantime. In that case we just restart the
3967 	// whole action.
3968 	while (true) {
3969 		// Look up the process group by ID. If it doesn't exist yet and we are
3970 		// allowed to create a new one, do that.
3971 		ProcessGroup* group = ProcessGroup::Get(groupID);
3972 		bool newGroup = false;
3973 		if (group == NULL) {
3974 			if (groupID != processID)
3975 				return B_NOT_ALLOWED;
3976 
3977 			group = new(std::nothrow) ProcessGroup(groupID);
3978 			if (group == NULL)
3979 				return B_NO_MEMORY;
3980 
3981 			newGroup = true;
3982 		}
3983 		BReference<ProcessGroup> groupReference(group, true);
3984 
3985 		// get the target team
3986 		Team* team = Team::Get(processID);
3987 		if (team == NULL)
3988 			return ESRCH;
3989 		BReference<Team> teamReference(team, true);
3990 
3991 		// lock the new process group and the team's current process group
3992 		while (true) {
3993 			// lock the team's current process group
3994 			team->LockProcessGroup();
3995 
3996 			ProcessGroup* oldGroup = team->group;
3997 			if (oldGroup == group) {
3998 				// it's the same as the target group, so just bail out
3999 				oldGroup->Unlock();
4000 				return group->id;
4001 			}
4002 
4003 			oldGroup->AcquireReference();
4004 
4005 			// lock the target process group, if locking order allows it
4006 			if (newGroup || group->id > oldGroup->id) {
4007 				group->Lock();
4008 				break;
4009 			}
4010 
4011 			// try to lock
4012 			if (group->TryLock())
4013 				break;
4014 
4015 			// no dice -- unlock the team's current process group and relock in
4016 			// the correct order
4017 			oldGroup->Unlock();
4018 
4019 			group->Lock();
4020 			oldGroup->Lock();
4021 
4022 			// check whether things are still the same
4023 			TeamLocker teamLocker(team);
4024 			if (team->group == oldGroup)
4025 				break;
4026 
4027 			// something changed -- unlock everything and retry
4028 			teamLocker.Unlock();
4029 			oldGroup->Unlock();
4030 			group->Unlock();
4031 			oldGroup->ReleaseReference();
4032 		}
4033 
4034 		// we now have references and locks of both new and old process group
4035 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4036 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4037 		AutoLocker<ProcessGroup> groupLocker(group, true);
4038 
4039 		// also lock the target team and its parent
4040 		team->LockTeamAndParent(false);
4041 		TeamLocker parentLocker(team->parent, true);
4042 		TeamLocker teamLocker(team, true);
4043 
4044 		// perform the checks
4045 		if (team == currentTeam) {
4046 			// we set our own group
4047 
4048 			// we must not change our process group ID if we're a session leader
4049 			if (is_session_leader(currentTeam))
4050 				return B_NOT_ALLOWED;
4051 		} else {
4052 			// Calling team != target team. The target team must be a child of
4053 			// the calling team and in the same session. (If that's the case it
4054 			// isn't a session leader either.)
4055 			if (team->parent != currentTeam
4056 				|| team->session_id != currentTeam->session_id) {
4057 				return B_NOT_ALLOWED;
4058 			}
4059 
4060 			// The call is also supposed to fail on a child, when the child has
4061 			// already executed exec*() [EACCES].
4062 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4063 				return EACCES;
4064 		}
4065 
4066 		// If we created a new process group, publish it now.
4067 		if (newGroup) {
4068 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4069 			if (sGroupHash.Lookup(groupID)) {
4070 				// A group with the group ID appeared since we first checked.
4071 				// Back to square one.
4072 				continue;
4073 			}
4074 
4075 			group->PublishLocked(team->group->Session());
4076 		} else if (group->Session()->id != team->session_id) {
4077 			// The existing target process group belongs to a different session.
4078 			// That's not allowed.
4079 			return B_NOT_ALLOWED;
4080 		}
4081 
4082 		// Everything is ready -- set the group.
4083 		remove_team_from_group(team);
4084 		insert_team_into_group(group, team);
4085 
4086 		// Changing the process group might have changed the situation for a
4087 		// parent waiting in wait_for_child(). Hence we notify it.
4088 		team->parent->dead_children.condition_variable.NotifyAll(false);
4089 
4090 		return group->id;
4091 	}
4092 }
4093 
4094 
4095 pid_t
4096 _user_setsid(void)
4097 {
4098 	Team* team = thread_get_current_thread()->team;
4099 
4100 	// create a new process group and session
4101 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4102 	if (group == NULL)
4103 		return B_NO_MEMORY;
4104 	BReference<ProcessGroup> groupReference(group, true);
4105 	AutoLocker<ProcessGroup> groupLocker(group);
4106 
4107 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4108 	if (session == NULL)
4109 		return B_NO_MEMORY;
4110 	BReference<ProcessSession> sessionReference(session, true);
4111 
4112 	// lock the team's current process group, parent, and the team itself
4113 	team->LockTeamParentAndProcessGroup();
4114 	BReference<ProcessGroup> oldGroupReference(team->group);
4115 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4116 	TeamLocker parentLocker(team->parent, true);
4117 	TeamLocker teamLocker(team, true);
4118 
4119 	// the team must not already be a process group leader
4120 	if (is_process_group_leader(team))
4121 		return B_NOT_ALLOWED;
4122 
4123 	// remove the team from the old and add it to the new process group
4124 	remove_team_from_group(team);
4125 	group->Publish(session);
4126 	insert_team_into_group(group, team);
4127 
4128 	// Changing the process group might have changed the situation for a
4129 	// parent waiting in wait_for_child(). Hence we notify it.
4130 	team->parent->dead_children.condition_variable.NotifyAll(false);
4131 
4132 	return group->id;
4133 }
4134 
4135 
4136 status_t
4137 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4138 {
4139 	status_t returnCode;
4140 	status_t status;
4141 
4142 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4143 		return B_BAD_ADDRESS;
4144 
4145 	status = wait_for_team(id, &returnCode);
4146 	if (status >= B_OK && _userReturnCode != NULL) {
4147 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4148 				!= B_OK)
4149 			return B_BAD_ADDRESS;
4150 		return B_OK;
4151 	}
4152 
4153 	return syscall_restart_handle_post(status);
4154 }
4155 
4156 
4157 thread_id
4158 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4159 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4160 	port_id errorPort, uint32 errorToken)
4161 {
4162 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4163 
4164 	if (argCount < 1)
4165 		return B_BAD_VALUE;
4166 
4167 	// copy and relocate the flat arguments
4168 	char** flatArgs;
4169 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4170 		argCount, envCount, flatArgs);
4171 	if (error != B_OK)
4172 		return error;
4173 
4174 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4175 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4176 		errorToken);
4177 
4178 	free(flatArgs);
4179 		// load_image_internal() unset our variable if it took over ownership
4180 
4181 	return thread;
4182 }
4183 
4184 
4185 void
4186 _user_exit_team(status_t returnValue)
4187 {
4188 	Thread* thread = thread_get_current_thread();
4189 	Team* team = thread->team;
4190 
4191 	// set this thread's exit status
4192 	thread->exit.status = returnValue;
4193 
4194 	// set the team exit status
4195 	TeamLocker teamLocker(team);
4196 
4197 	if (!team->exit.initialized) {
4198 		team->exit.reason = CLD_EXITED;
4199 		team->exit.signal = 0;
4200 		team->exit.signaling_user = 0;
4201 		team->exit.status = returnValue;
4202 		team->exit.initialized = true;
4203 	}
4204 
4205 	teamLocker.Unlock();
4206 
4207 	// Stop the thread, if the team is being debugged and that has been
4208 	// requested.
4209 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4210 		user_debug_stop_thread();
4211 
4212 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4213 	// userland. The signal handling code forwards the signal to the main
4214 	// thread (if that's not already this one), which will take the team down.
4215 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4216 	send_signal_to_thread(thread, signal, 0);
4217 }
4218 
4219 
4220 status_t
4221 _user_kill_team(team_id team)
4222 {
4223 	return kill_team(team);
4224 }
4225 
4226 
4227 status_t
4228 _user_get_team_info(team_id id, team_info* userInfo)
4229 {
4230 	status_t status;
4231 	team_info info;
4232 
4233 	if (!IS_USER_ADDRESS(userInfo))
4234 		return B_BAD_ADDRESS;
4235 
4236 	status = _get_team_info(id, &info, sizeof(team_info));
4237 	if (status == B_OK) {
4238 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4239 			return B_BAD_ADDRESS;
4240 	}
4241 
4242 	return status;
4243 }
4244 
4245 
4246 status_t
4247 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4248 {
4249 	status_t status;
4250 	team_info info;
4251 	int32 cookie;
4252 
4253 	if (!IS_USER_ADDRESS(userCookie)
4254 		|| !IS_USER_ADDRESS(userInfo)
4255 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4256 		return B_BAD_ADDRESS;
4257 
4258 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4259 	if (status != B_OK)
4260 		return status;
4261 
4262 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4263 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4264 		return B_BAD_ADDRESS;
4265 
4266 	return status;
4267 }
4268 
4269 
4270 team_id
4271 _user_get_current_team(void)
4272 {
4273 	return team_get_current_team_id();
4274 }
4275 
4276 
4277 status_t
4278 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4279 	size_t size)
4280 {
4281 	if (size != sizeof(team_usage_info))
4282 		return B_BAD_VALUE;
4283 
4284 	team_usage_info info;
4285 	status_t status = common_get_team_usage_info(team, who, &info,
4286 		B_CHECK_PERMISSION);
4287 
4288 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4289 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4290 		return B_BAD_ADDRESS;
4291 	}
4292 
4293 	return status;
4294 }
4295 
4296 
4297 status_t
4298 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4299 	size_t size, size_t* _sizeNeeded)
4300 {
4301 	// check parameters
4302 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4303 		|| (buffer == NULL && size > 0)
4304 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4305 		return B_BAD_ADDRESS;
4306 	}
4307 
4308 	KMessage info;
4309 
4310 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4311 		// allocate memory for a copy of the needed team data
4312 		struct ExtendedTeamData {
4313 			team_id	id;
4314 			pid_t	group_id;
4315 			pid_t	session_id;
4316 			uid_t	real_uid;
4317 			gid_t	real_gid;
4318 			uid_t	effective_uid;
4319 			gid_t	effective_gid;
4320 			char	name[B_OS_NAME_LENGTH];
4321 		};
4322 
4323 		ExtendedTeamData* teamClone
4324 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4325 			// It would be nicer to use new, but then we'd have to use
4326 			// ObjectDeleter and declare the structure outside of the function
4327 			// due to template parameter restrictions.
4328 		if (teamClone == NULL)
4329 			return B_NO_MEMORY;
4330 		MemoryDeleter teamCloneDeleter(teamClone);
4331 
4332 		io_context* ioContext;
4333 		{
4334 			// get the team structure
4335 			Team* team = Team::GetAndLock(teamID);
4336 			if (team == NULL)
4337 				return B_BAD_TEAM_ID;
4338 			BReference<Team> teamReference(team, true);
4339 			TeamLocker teamLocker(team, true);
4340 
4341 			// copy the data
4342 			teamClone->id = team->id;
4343 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4344 			teamClone->group_id = team->group_id;
4345 			teamClone->session_id = team->session_id;
4346 			teamClone->real_uid = team->real_uid;
4347 			teamClone->real_gid = team->real_gid;
4348 			teamClone->effective_uid = team->effective_uid;
4349 			teamClone->effective_gid = team->effective_gid;
4350 
4351 			// also fetch a reference to the I/O context
4352 			ioContext = team->io_context;
4353 			vfs_get_io_context(ioContext);
4354 		}
4355 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4356 			&vfs_put_io_context);
4357 
4358 		// add the basic data to the info message
4359 		if (info.AddInt32("id", teamClone->id) != B_OK
4360 			|| info.AddString("name", teamClone->name) != B_OK
4361 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4362 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4363 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4364 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4365 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4366 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4367 			return B_NO_MEMORY;
4368 		}
4369 
4370 		// get the current working directory from the I/O context
4371 		dev_t cwdDevice;
4372 		ino_t cwdDirectory;
4373 		{
4374 			MutexLocker ioContextLocker(ioContext->io_mutex);
4375 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4376 		}
4377 
4378 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4379 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4380 			return B_NO_MEMORY;
4381 		}
4382 	}
4383 
4384 	// TODO: Support the other flags!
4385 
4386 	// copy the needed size and, if it fits, the message back to userland
4387 	size_t sizeNeeded = info.ContentSize();
4388 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4389 		return B_BAD_ADDRESS;
4390 
4391 	if (sizeNeeded > size)
4392 		return B_BUFFER_OVERFLOW;
4393 
4394 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4395 		return B_BAD_ADDRESS;
4396 
4397 	return B_OK;
4398 }
4399