xref: /haiku/src/system/kernel/team.cpp (revision e5d65858f2361fe0552495b61620c84dcee6bc00)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <commpage.h>
30 #include <boot_device.h>
31 #include <elf.h>
32 #include <file_cache.h>
33 #include <fs/KPath.h>
34 #include <heap.h>
35 #include <int.h>
36 #include <kernel.h>
37 #include <kimage.h>
38 #include <kscheduler.h>
39 #include <ksignal.h>
40 #include <Notifications.h>
41 #include <port.h>
42 #include <posix/realtime_sem.h>
43 #include <posix/xsi_semaphore.h>
44 #include <sem.h>
45 #include <syscall_process_info.h>
46 #include <syscall_restart.h>
47 #include <syscalls.h>
48 #include <tls.h>
49 #include <tracing.h>
50 #include <user_runtime.h>
51 #include <user_thread.h>
52 #include <usergroup.h>
53 #include <vfs.h>
54 #include <vm/vm.h>
55 #include <vm/VMAddressSpace.h>
56 #include <util/AutoLock.h>
57 
58 #include "TeamThreadTables.h"
59 
60 
61 //#define TRACE_TEAM
62 #ifdef TRACE_TEAM
63 #	define TRACE(x) dprintf x
64 #else
65 #	define TRACE(x) ;
66 #endif
67 
68 
69 struct team_key {
70 	team_id id;
71 };
72 
73 struct team_arg {
74 	char	*path;
75 	char	**flat_args;
76 	size_t	flat_args_size;
77 	uint32	arg_count;
78 	uint32	env_count;
79 	mode_t	umask;
80 	port_id	error_port;
81 	uint32	error_token;
82 };
83 
84 
85 namespace {
86 
87 
88 class TeamNotificationService : public DefaultNotificationService {
89 public:
90 							TeamNotificationService();
91 
92 			void			Notify(uint32 eventCode, Team* team);
93 };
94 
95 
96 // #pragma mark - TeamTable
97 
98 
99 typedef BKernel::TeamThreadTable<Team> TeamTable;
100 
101 
102 // #pragma mark - ProcessGroupHashDefinition
103 
104 
105 struct ProcessGroupHashDefinition {
106 	typedef pid_t			KeyType;
107 	typedef	ProcessGroup	ValueType;
108 
109 	size_t HashKey(pid_t key) const
110 	{
111 		return key;
112 	}
113 
114 	size_t Hash(ProcessGroup* value) const
115 	{
116 		return HashKey(value->id);
117 	}
118 
119 	bool Compare(pid_t key, ProcessGroup* value) const
120 	{
121 		return value->id == key;
122 	}
123 
124 	ProcessGroup*& GetLink(ProcessGroup* value) const
125 	{
126 		return value->next;
127 	}
128 };
129 
130 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
131 
132 
133 }	// unnamed namespace
134 
135 
136 // #pragma mark -
137 
138 
139 // the team_id -> Team hash table and the lock protecting it
140 static TeamTable sTeamHash;
141 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
142 
143 // the pid_t -> ProcessGroup hash table and the lock protecting it
144 static ProcessGroupHashTable sGroupHash;
145 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
146 
147 static Team* sKernelTeam = NULL;
148 
149 // A list of process groups of children of dying session leaders that need to
150 // be signalled, if they have become orphaned and contain stopped processes.
151 static ProcessGroupList sOrphanedCheckProcessGroups;
152 static mutex sOrphanedCheckLock
153 	= MUTEX_INITIALIZER("orphaned process group check");
154 
155 // some arbitrarily chosen limits -- should probably depend on the available
156 // memory (the limit is not yet enforced)
157 static int32 sMaxTeams = 2048;
158 static int32 sUsedTeams = 1;
159 
160 static TeamNotificationService sNotificationService;
161 
162 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
163 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
164 
165 
166 // #pragma mark - TeamListIterator
167 
168 
169 TeamListIterator::TeamListIterator()
170 {
171 	// queue the entry
172 	InterruptsSpinLocker locker(sTeamHashLock);
173 	sTeamHash.InsertIteratorEntry(&fEntry);
174 }
175 
176 
177 TeamListIterator::~TeamListIterator()
178 {
179 	// remove the entry
180 	InterruptsSpinLocker locker(sTeamHashLock);
181 	sTeamHash.RemoveIteratorEntry(&fEntry);
182 }
183 
184 
185 Team*
186 TeamListIterator::Next()
187 {
188 	// get the next team -- if there is one, get reference for it
189 	InterruptsSpinLocker locker(sTeamHashLock);
190 	Team* team = sTeamHash.NextElement(&fEntry);
191 	if (team != NULL)
192 		team->AcquireReference();
193 
194 	return team;
195 }
196 
197 
198 // #pragma mark - Tracing
199 
200 
201 #if TEAM_TRACING
202 namespace TeamTracing {
203 
204 class TeamForked : public AbstractTraceEntry {
205 public:
206 	TeamForked(thread_id forkedThread)
207 		:
208 		fForkedThread(forkedThread)
209 	{
210 		Initialized();
211 	}
212 
213 	virtual void AddDump(TraceOutput& out)
214 	{
215 		out.Print("team forked, new thread %ld", fForkedThread);
216 	}
217 
218 private:
219 	thread_id			fForkedThread;
220 };
221 
222 
223 class ExecTeam : public AbstractTraceEntry {
224 public:
225 	ExecTeam(const char* path, int32 argCount, const char* const* args,
226 			int32 envCount, const char* const* env)
227 		:
228 		fArgCount(argCount),
229 		fArgs(NULL)
230 	{
231 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
232 			false);
233 
234 		// determine the buffer size we need for the args
235 		size_t argBufferSize = 0;
236 		for (int32 i = 0; i < argCount; i++)
237 			argBufferSize += strlen(args[i]) + 1;
238 
239 		// allocate a buffer
240 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
241 		if (fArgs) {
242 			char* buffer = fArgs;
243 			for (int32 i = 0; i < argCount; i++) {
244 				size_t argSize = strlen(args[i]) + 1;
245 				memcpy(buffer, args[i], argSize);
246 				buffer += argSize;
247 			}
248 		}
249 
250 		// ignore env for the time being
251 		(void)envCount;
252 		(void)env;
253 
254 		Initialized();
255 	}
256 
257 	virtual void AddDump(TraceOutput& out)
258 	{
259 		out.Print("team exec, \"%p\", args:", fPath);
260 
261 		if (fArgs != NULL) {
262 			char* args = fArgs;
263 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
264 				out.Print(" \"%s\"", args);
265 				args += strlen(args) + 1;
266 			}
267 		} else
268 			out.Print(" <too long>");
269 	}
270 
271 private:
272 	char*	fPath;
273 	int32	fArgCount;
274 	char*	fArgs;
275 };
276 
277 
278 static const char*
279 job_control_state_name(job_control_state state)
280 {
281 	switch (state) {
282 		case JOB_CONTROL_STATE_NONE:
283 			return "none";
284 		case JOB_CONTROL_STATE_STOPPED:
285 			return "stopped";
286 		case JOB_CONTROL_STATE_CONTINUED:
287 			return "continued";
288 		case JOB_CONTROL_STATE_DEAD:
289 			return "dead";
290 		default:
291 			return "invalid";
292 	}
293 }
294 
295 
296 class SetJobControlState : public AbstractTraceEntry {
297 public:
298 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
299 		:
300 		fTeam(team),
301 		fNewState(newState),
302 		fSignal(signal != NULL ? signal->Number() : 0)
303 	{
304 		Initialized();
305 	}
306 
307 	virtual void AddDump(TraceOutput& out)
308 	{
309 		out.Print("team set job control state, team %ld, "
310 			"new state: %s, signal: %d",
311 			fTeam, job_control_state_name(fNewState), fSignal);
312 	}
313 
314 private:
315 	team_id				fTeam;
316 	job_control_state	fNewState;
317 	int					fSignal;
318 };
319 
320 
321 class WaitForChild : public AbstractTraceEntry {
322 public:
323 	WaitForChild(pid_t child, uint32 flags)
324 		:
325 		fChild(child),
326 		fFlags(flags)
327 	{
328 		Initialized();
329 	}
330 
331 	virtual void AddDump(TraceOutput& out)
332 	{
333 		out.Print("team wait for child, child: %ld, "
334 			"flags: 0x%lx", fChild, fFlags);
335 	}
336 
337 private:
338 	pid_t	fChild;
339 	uint32	fFlags;
340 };
341 
342 
343 class WaitForChildDone : public AbstractTraceEntry {
344 public:
345 	WaitForChildDone(const job_control_entry& entry)
346 		:
347 		fState(entry.state),
348 		fTeam(entry.thread),
349 		fStatus(entry.status),
350 		fReason(entry.reason),
351 		fSignal(entry.signal)
352 	{
353 		Initialized();
354 	}
355 
356 	WaitForChildDone(status_t error)
357 		:
358 		fTeam(error)
359 	{
360 		Initialized();
361 	}
362 
363 	virtual void AddDump(TraceOutput& out)
364 	{
365 		if (fTeam >= 0) {
366 			out.Print("team wait for child done, team: %ld, "
367 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
368 				fTeam, job_control_state_name(fState), fStatus, fReason,
369 				fSignal);
370 		} else {
371 			out.Print("team wait for child failed, error: "
372 				"0x%lx, ", fTeam);
373 		}
374 	}
375 
376 private:
377 	job_control_state	fState;
378 	team_id				fTeam;
379 	status_t			fStatus;
380 	uint16				fReason;
381 	uint16				fSignal;
382 };
383 
384 }	// namespace TeamTracing
385 
386 #	define T(x) new(std::nothrow) TeamTracing::x;
387 #else
388 #	define T(x) ;
389 #endif
390 
391 
392 //	#pragma mark - TeamNotificationService
393 
394 
395 TeamNotificationService::TeamNotificationService()
396 	: DefaultNotificationService("teams")
397 {
398 }
399 
400 
401 void
402 TeamNotificationService::Notify(uint32 eventCode, Team* team)
403 {
404 	char eventBuffer[128];
405 	KMessage event;
406 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
407 	event.AddInt32("event", eventCode);
408 	event.AddInt32("team", team->id);
409 	event.AddPointer("teamStruct", team);
410 
411 	DefaultNotificationService::Notify(event, eventCode);
412 }
413 
414 
415 //	#pragma mark - Team
416 
417 
418 Team::Team(team_id id, bool kernel)
419 {
420 	// allocate an ID
421 	this->id = id;
422 	visible = true;
423 	serial_number = -1;
424 
425 	// init mutex
426 	if (kernel) {
427 		mutex_init(&fLock, "Team:kernel");
428 	} else {
429 		char lockName[16];
430 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
431 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
432 	}
433 
434 	hash_next = siblings_next = children = parent = NULL;
435 	fName[0] = '\0';
436 	fArgs[0] = '\0';
437 	num_threads = 0;
438 	io_context = NULL;
439 	address_space = NULL;
440 	realtime_sem_context = NULL;
441 	xsi_sem_context = NULL;
442 	thread_list = NULL;
443 	main_thread = NULL;
444 	loading_info = NULL;
445 	state = TEAM_STATE_BIRTH;
446 	flags = 0;
447 	death_entry = NULL;
448 	user_data_area = -1;
449 	user_data = 0;
450 	used_user_data = 0;
451 	user_data_size = 0;
452 	free_user_threads = NULL;
453 
454 	commpage_address = NULL;
455 
456 	supplementary_groups = NULL;
457 	supplementary_group_count = 0;
458 
459 	dead_threads_kernel_time = 0;
460 	dead_threads_user_time = 0;
461 	cpu_clock_offset = 0;
462 
463 	// dead threads
464 	list_init(&dead_threads);
465 	dead_threads_count = 0;
466 
467 	// dead children
468 	dead_children.count = 0;
469 	dead_children.kernel_time = 0;
470 	dead_children.user_time = 0;
471 
472 	// job control entry
473 	job_control_entry = new(nothrow) ::job_control_entry;
474 	if (job_control_entry != NULL) {
475 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
476 		job_control_entry->thread = id;
477 		job_control_entry->team = this;
478 	}
479 
480 	// exit status -- setting initialized to false suffices
481 	exit.initialized = false;
482 
483 	list_init(&sem_list);
484 	list_init(&port_list);
485 	list_init(&image_list);
486 	list_init(&watcher_list);
487 
488 	clear_team_debug_info(&debug_info, true);
489 
490 	// init dead/stopped/continued children condition vars
491 	dead_children.condition_variable.Init(&dead_children, "team children");
492 
493 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
494 		kernel ? -1 : MAX_QUEUED_SIGNALS);
495 	memset(fSignalActions, 0, sizeof(fSignalActions));
496 
497 	fUserDefinedTimerCount = 0;
498 }
499 
500 
501 Team::~Team()
502 {
503 	// get rid of all associated data
504 	PrepareForDeletion();
505 
506 	vfs_put_io_context(io_context);
507 	delete_owned_ports(this);
508 	sem_delete_owned_sems(this);
509 
510 	DeleteUserTimers(false);
511 
512 	fPendingSignals.Clear();
513 
514 	if (fQueuedSignalsCounter != NULL)
515 		fQueuedSignalsCounter->ReleaseReference();
516 
517 	while (thread_death_entry* threadDeathEntry
518 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
519 		free(threadDeathEntry);
520 	}
521 
522 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
523 		delete entry;
524 
525 	while (free_user_thread* entry = free_user_threads) {
526 		free_user_threads = entry->next;
527 		free(entry);
528 	}
529 
530 	malloc_referenced_release(supplementary_groups);
531 
532 	delete job_control_entry;
533 		// usually already NULL and transferred to the parent
534 
535 	mutex_destroy(&fLock);
536 }
537 
538 
539 /*static*/ Team*
540 Team::Create(team_id id, const char* name, bool kernel)
541 {
542 	// create the team object
543 	Team* team = new(std::nothrow) Team(id, kernel);
544 	if (team == NULL)
545 		return NULL;
546 	ObjectDeleter<Team> teamDeleter(team);
547 
548 	if (name != NULL)
549 		team->SetName(name);
550 
551 	// check initialization
552 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
553 		return NULL;
554 
555 	// finish initialization (arch specifics)
556 	if (arch_team_init_team_struct(team, kernel) != B_OK)
557 		return NULL;
558 
559 	if (!kernel) {
560 		status_t error = user_timer_create_team_timers(team);
561 		if (error != B_OK)
562 			return NULL;
563 	}
564 
565 	// everything went fine
566 	return teamDeleter.Detach();
567 }
568 
569 
570 /*!	\brief Returns the team with the given ID.
571 	Returns a reference to the team.
572 	Team and thread spinlock must not be held.
573 */
574 /*static*/ Team*
575 Team::Get(team_id id)
576 {
577 	if (id == B_CURRENT_TEAM) {
578 		Team* team = thread_get_current_thread()->team;
579 		team->AcquireReference();
580 		return team;
581 	}
582 
583 	InterruptsSpinLocker locker(sTeamHashLock);
584 	Team* team = sTeamHash.Lookup(id);
585 	if (team != NULL)
586 		team->AcquireReference();
587 	return team;
588 }
589 
590 
591 /*!	\brief Returns the team with the given ID in a locked state.
592 	Returns a reference to the team.
593 	Team and thread spinlock must not be held.
594 */
595 /*static*/ Team*
596 Team::GetAndLock(team_id id)
597 {
598 	// get the team
599 	Team* team = Get(id);
600 	if (team == NULL)
601 		return NULL;
602 
603 	// lock it
604 	team->Lock();
605 
606 	// only return the team, when it isn't already dying
607 	if (team->state >= TEAM_STATE_SHUTDOWN) {
608 		team->Unlock();
609 		team->ReleaseReference();
610 		return NULL;
611 	}
612 
613 	return team;
614 }
615 
616 
617 /*!	Locks the team and its parent team (if any).
618 	The caller must hold a reference to the team or otherwise make sure that
619 	it won't be deleted.
620 	If the team doesn't have a parent, only the team itself is locked. If the
621 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
622 	only the team itself is locked.
623 
624 	\param dontLockParentIfKernel If \c true, the team's parent team is only
625 		locked, if it is not the kernel team.
626 */
627 void
628 Team::LockTeamAndParent(bool dontLockParentIfKernel)
629 {
630 	// The locking order is parent -> child. Since the parent can change as long
631 	// as we don't lock the team, we need to do a trial and error loop.
632 	Lock();
633 
634 	while (true) {
635 		// If the team doesn't have a parent, we're done. Otherwise try to lock
636 		// the parent.This will succeed in most cases, simplifying things.
637 		Team* parent = this->parent;
638 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
639 			|| parent->TryLock()) {
640 			return;
641 		}
642 
643 		// get a temporary reference to the parent, unlock this team, lock the
644 		// parent, and re-lock this team
645 		BReference<Team> parentReference(parent);
646 
647 		Unlock();
648 		parent->Lock();
649 		Lock();
650 
651 		// If the parent hasn't changed in the meantime, we're done.
652 		if (this->parent == parent)
653 			return;
654 
655 		// The parent has changed -- unlock and retry.
656 		parent->Unlock();
657 	}
658 }
659 
660 
661 /*!	Unlocks the team and its parent team (if any).
662 */
663 void
664 Team::UnlockTeamAndParent()
665 {
666 	if (parent != NULL)
667 		parent->Unlock();
668 
669 	Unlock();
670 }
671 
672 
673 /*!	Locks the team, its parent team (if any), and the team's process group.
674 	The caller must hold a reference to the team or otherwise make sure that
675 	it won't be deleted.
676 	If the team doesn't have a parent, only the team itself is locked.
677 */
678 void
679 Team::LockTeamParentAndProcessGroup()
680 {
681 	LockTeamAndProcessGroup();
682 
683 	// We hold the group's and the team's lock, but not the parent team's lock.
684 	// If we have a parent, try to lock it.
685 	if (this->parent == NULL || this->parent->TryLock())
686 		return;
687 
688 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
689 	// the job.
690 	Unlock();
691 	LockTeamAndParent(false);
692 }
693 
694 
695 /*!	Unlocks the team, its parent team (if any), and the team's process group.
696 */
697 void
698 Team::UnlockTeamParentAndProcessGroup()
699 {
700 	group->Unlock();
701 
702 	if (parent != NULL)
703 		parent->Unlock();
704 
705 	Unlock();
706 }
707 
708 
709 void
710 Team::LockTeamAndProcessGroup()
711 {
712 	// The locking order is process group -> child. Since the process group can
713 	// change as long as we don't lock the team, we need to do a trial and error
714 	// loop.
715 	Lock();
716 
717 	while (true) {
718 		// Try to lock the group. This will succeed in most cases, simplifying
719 		// things.
720 		ProcessGroup* group = this->group;
721 		if (group->TryLock())
722 			return;
723 
724 		// get a temporary reference to the group, unlock this team, lock the
725 		// group, and re-lock this team
726 		BReference<ProcessGroup> groupReference(group);
727 
728 		Unlock();
729 		group->Lock();
730 		Lock();
731 
732 		// If the group hasn't changed in the meantime, we're done.
733 		if (this->group == group)
734 			return;
735 
736 		// The group has changed -- unlock and retry.
737 		group->Unlock();
738 	}
739 }
740 
741 
742 void
743 Team::UnlockTeamAndProcessGroup()
744 {
745 	group->Unlock();
746 	Unlock();
747 }
748 
749 
750 void
751 Team::SetName(const char* name)
752 {
753 	if (const char* lastSlash = strrchr(name, '/'))
754 		name = lastSlash + 1;
755 
756 	strlcpy(fName, name, B_OS_NAME_LENGTH);
757 }
758 
759 
760 void
761 Team::SetArgs(const char* args)
762 {
763 	strlcpy(fArgs, args, sizeof(fArgs));
764 }
765 
766 
767 void
768 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
769 {
770 	fArgs[0] = '\0';
771 	strlcpy(fArgs, path, sizeof(fArgs));
772 	for (int i = 0; i < otherArgCount; i++) {
773 		strlcat(fArgs, " ", sizeof(fArgs));
774 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
775 	}
776 }
777 
778 
779 void
780 Team::ResetSignalsOnExec()
781 {
782 	// We are supposed to keep pending signals. Signal actions shall be reset
783 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
784 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
785 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
786 	// flags, but since there aren't any handlers, they make little sense, so
787 	// we clear them.
788 
789 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
790 		struct sigaction& action = SignalActionFor(i);
791 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
792 			action.sa_handler = SIG_DFL;
793 
794 		action.sa_mask = 0;
795 		action.sa_flags = 0;
796 		action.sa_userdata = NULL;
797 	}
798 }
799 
800 
801 void
802 Team::InheritSignalActions(Team* parent)
803 {
804 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
805 }
806 
807 
808 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
809 	ID.
810 
811 	The caller must hold the team's lock.
812 
813 	\param timer The timer to be added. If it doesn't have an ID yet, it is
814 		considered user-defined and will be assigned an ID.
815 	\return \c B_OK, if the timer was added successfully, another error code
816 		otherwise.
817 */
818 status_t
819 Team::AddUserTimer(UserTimer* timer)
820 {
821 	// don't allow addition of timers when already shutting the team down
822 	if (state >= TEAM_STATE_SHUTDOWN)
823 		return B_BAD_TEAM_ID;
824 
825 	// If the timer is user-defined, check timer limit and increment
826 	// user-defined count.
827 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
828 		return EAGAIN;
829 
830 	fUserTimers.AddTimer(timer);
831 
832 	return B_OK;
833 }
834 
835 
836 /*!	Removes the given user timer from the team.
837 
838 	The caller must hold the team's lock.
839 
840 	\param timer The timer to be removed.
841 
842 */
843 void
844 Team::RemoveUserTimer(UserTimer* timer)
845 {
846 	fUserTimers.RemoveTimer(timer);
847 
848 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
849 		UserDefinedTimersRemoved(1);
850 }
851 
852 
853 /*!	Deletes all (or all user-defined) user timers of the team.
854 
855 	Timer's belonging to the team's threads are not affected.
856 	The caller must hold the team's lock.
857 
858 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
859 		otherwise all timers are deleted.
860 */
861 void
862 Team::DeleteUserTimers(bool userDefinedOnly)
863 {
864 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
865 	UserDefinedTimersRemoved(count);
866 }
867 
868 
869 /*!	If not at the limit yet, increments the team's user-defined timer count.
870 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
871 */
872 bool
873 Team::CheckAddUserDefinedTimer()
874 {
875 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
876 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
877 		atomic_add(&fUserDefinedTimerCount, -1);
878 		return false;
879 	}
880 
881 	return true;
882 }
883 
884 
885 /*!	Subtracts the given count for the team's user-defined timer count.
886 	\param count The count to subtract.
887 */
888 void
889 Team::UserDefinedTimersRemoved(int32 count)
890 {
891 	atomic_add(&fUserDefinedTimerCount, -count);
892 }
893 
894 
895 void
896 Team::DeactivateCPUTimeUserTimers()
897 {
898 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
899 		timer->Deactivate();
900 
901 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
902 		timer->Deactivate();
903 }
904 
905 
906 /*!	Returns the team's current total CPU time (kernel + user + offset).
907 
908 	The caller must hold the scheduler lock.
909 
910 	\param ignoreCurrentRun If \c true and the current thread is one team's
911 		threads, don't add the time since the last time \c last_time was
912 		updated. Should be used in "thread unscheduled" scheduler callbacks,
913 		since although the thread is still running at that time, its time has
914 		already been stopped.
915 	\return The team's current total CPU time.
916 */
917 bigtime_t
918 Team::CPUTime(bool ignoreCurrentRun) const
919 {
920 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
921 		+ dead_threads_user_time;
922 
923 	Thread* currentThread = thread_get_current_thread();
924 	bigtime_t now = system_time();
925 
926 	for (Thread* thread = thread_list; thread != NULL;
927 			thread = thread->team_next) {
928 		SpinLocker threadTimeLocker(thread->time_lock);
929 		time += thread->kernel_time + thread->user_time;
930 
931 		if (thread->IsRunning()) {
932 			if (!ignoreCurrentRun || thread != currentThread)
933 				time += now - thread->last_time;
934 		}
935 	}
936 
937 	return time;
938 }
939 
940 
941 /*!	Returns the team's current user CPU time.
942 
943 	The caller must hold the scheduler lock.
944 
945 	\return The team's current user CPU time.
946 */
947 bigtime_t
948 Team::UserCPUTime() const
949 {
950 	bigtime_t time = dead_threads_user_time;
951 
952 	bigtime_t now = system_time();
953 
954 	for (Thread* thread = thread_list; thread != NULL;
955 			thread = thread->team_next) {
956 		SpinLocker threadTimeLocker(thread->time_lock);
957 		time += thread->user_time;
958 
959 		if (thread->IsRunning() && !thread->in_kernel)
960 			time += now - thread->last_time;
961 	}
962 
963 	return time;
964 }
965 
966 
967 //	#pragma mark - ProcessGroup
968 
969 
970 ProcessGroup::ProcessGroup(pid_t id)
971 	:
972 	id(id),
973 	teams(NULL),
974 	fSession(NULL),
975 	fInOrphanedCheckList(false)
976 {
977 	char lockName[32];
978 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
979 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
980 }
981 
982 
983 ProcessGroup::~ProcessGroup()
984 {
985 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
986 
987 	// If the group is in the orphaned check list, remove it.
988 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
989 
990 	if (fInOrphanedCheckList)
991 		sOrphanedCheckProcessGroups.Remove(this);
992 
993 	orphanedCheckLocker.Unlock();
994 
995 	// remove group from the hash table and from the session
996 	if (fSession != NULL) {
997 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
998 		sGroupHash.RemoveUnchecked(this);
999 		groupHashLocker.Unlock();
1000 
1001 		fSession->ReleaseReference();
1002 	}
1003 
1004 	mutex_destroy(&fLock);
1005 }
1006 
1007 
1008 /*static*/ ProcessGroup*
1009 ProcessGroup::Get(pid_t id)
1010 {
1011 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1012 	ProcessGroup* group = sGroupHash.Lookup(id);
1013 	if (group != NULL)
1014 		group->AcquireReference();
1015 	return group;
1016 }
1017 
1018 
1019 /*!	Adds the group the given session and makes it publicly accessible.
1020 	The caller must not hold the process group hash lock.
1021 */
1022 void
1023 ProcessGroup::Publish(ProcessSession* session)
1024 {
1025 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1026 	PublishLocked(session);
1027 }
1028 
1029 
1030 /*!	Adds the group to the given session and makes it publicly accessible.
1031 	The caller must hold the process group hash lock.
1032 */
1033 void
1034 ProcessGroup::PublishLocked(ProcessSession* session)
1035 {
1036 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1037 
1038 	fSession = session;
1039 	fSession->AcquireReference();
1040 
1041 	sGroupHash.InsertUnchecked(this);
1042 }
1043 
1044 
1045 /*!	Checks whether the process group is orphaned.
1046 	The caller must hold the group's lock.
1047 	\return \c true, if the group is orphaned, \c false otherwise.
1048 */
1049 bool
1050 ProcessGroup::IsOrphaned() const
1051 {
1052 	// Orphaned Process Group: "A process group in which the parent of every
1053 	// member is either itself a member of the group or is not a member of the
1054 	// group's session." (Open Group Base Specs Issue 7)
1055 	bool orphaned = true;
1056 
1057 	Team* team = teams;
1058 	while (orphaned && team != NULL) {
1059 		team->LockTeamAndParent(false);
1060 
1061 		Team* parent = team->parent;
1062 		if (parent != NULL && parent->group_id != id
1063 			&& parent->session_id == fSession->id) {
1064 			orphaned = false;
1065 		}
1066 
1067 		team->UnlockTeamAndParent();
1068 
1069 		team = team->group_next;
1070 	}
1071 
1072 	return orphaned;
1073 }
1074 
1075 
1076 void
1077 ProcessGroup::ScheduleOrphanedCheck()
1078 {
1079 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1080 
1081 	if (!fInOrphanedCheckList) {
1082 		sOrphanedCheckProcessGroups.Add(this);
1083 		fInOrphanedCheckList = true;
1084 	}
1085 }
1086 
1087 
1088 void
1089 ProcessGroup::UnsetOrphanedCheck()
1090 {
1091 	fInOrphanedCheckList = false;
1092 }
1093 
1094 
1095 //	#pragma mark - ProcessSession
1096 
1097 
1098 ProcessSession::ProcessSession(pid_t id)
1099 	:
1100 	id(id),
1101 	controlling_tty(-1),
1102 	foreground_group(-1)
1103 {
1104 	char lockName[32];
1105 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1106 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1107 }
1108 
1109 
1110 ProcessSession::~ProcessSession()
1111 {
1112 	mutex_destroy(&fLock);
1113 }
1114 
1115 
1116 //	#pragma mark - KDL functions
1117 
1118 
1119 static void
1120 _dump_team_info(Team* team)
1121 {
1122 	kprintf("TEAM: %p\n", team);
1123 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1124 		team->id);
1125 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1126 	kprintf("name:             '%s'\n", team->Name());
1127 	kprintf("args:             '%s'\n", team->Args());
1128 	kprintf("hash_next:        %p\n", team->hash_next);
1129 	kprintf("parent:           %p", team->parent);
1130 	if (team->parent != NULL) {
1131 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1132 	} else
1133 		kprintf("\n");
1134 
1135 	kprintf("children:         %p\n", team->children);
1136 	kprintf("num_threads:      %d\n", team->num_threads);
1137 	kprintf("state:            %d\n", team->state);
1138 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1139 	kprintf("io_context:       %p\n", team->io_context);
1140 	if (team->address_space)
1141 		kprintf("address_space:    %p\n", team->address_space);
1142 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1143 		(void*)team->user_data, team->user_data_area);
1144 	kprintf("free user thread: %p\n", team->free_user_threads);
1145 	kprintf("main_thread:      %p\n", team->main_thread);
1146 	kprintf("thread_list:      %p\n", team->thread_list);
1147 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1148 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1149 }
1150 
1151 
1152 static int
1153 dump_team_info(int argc, char** argv)
1154 {
1155 	ulong arg;
1156 	bool found = false;
1157 
1158 	if (argc < 2) {
1159 		Thread* thread = thread_get_current_thread();
1160 		if (thread != NULL && thread->team != NULL)
1161 			_dump_team_info(thread->team);
1162 		else
1163 			kprintf("No current team!\n");
1164 		return 0;
1165 	}
1166 
1167 	arg = strtoul(argv[1], NULL, 0);
1168 	if (IS_KERNEL_ADDRESS(arg)) {
1169 		// semi-hack
1170 		_dump_team_info((Team*)arg);
1171 		return 0;
1172 	}
1173 
1174 	// walk through the thread list, trying to match name or id
1175 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1176 		Team* team = it.Next();) {
1177 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1178 			|| team->id == (team_id)arg) {
1179 			_dump_team_info(team);
1180 			found = true;
1181 			break;
1182 		}
1183 	}
1184 
1185 	if (!found)
1186 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1187 	return 0;
1188 }
1189 
1190 
1191 static int
1192 dump_teams(int argc, char** argv)
1193 {
1194 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1195 		B_PRINTF_POINTER_WIDTH, "parent");
1196 
1197 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1198 		Team* team = it.Next();) {
1199 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1200 	}
1201 
1202 	return 0;
1203 }
1204 
1205 
1206 //	#pragma mark - Private functions
1207 
1208 
1209 /*!	Inserts team \a team into the child list of team \a parent.
1210 
1211 	The caller must hold the lock of both \a parent and \a team.
1212 
1213 	\param parent The parent team.
1214 	\param team The team to be inserted into \a parent's child list.
1215 */
1216 static void
1217 insert_team_into_parent(Team* parent, Team* team)
1218 {
1219 	ASSERT(parent != NULL);
1220 
1221 	team->siblings_next = parent->children;
1222 	parent->children = team;
1223 	team->parent = parent;
1224 }
1225 
1226 
1227 /*!	Removes team \a team from the child list of team \a parent.
1228 
1229 	The caller must hold the lock of both \a parent and \a team.
1230 
1231 	\param parent The parent team.
1232 	\param team The team to be removed from \a parent's child list.
1233 */
1234 static void
1235 remove_team_from_parent(Team* parent, Team* team)
1236 {
1237 	Team* child;
1238 	Team* last = NULL;
1239 
1240 	for (child = parent->children; child != NULL;
1241 			child = child->siblings_next) {
1242 		if (child == team) {
1243 			if (last == NULL)
1244 				parent->children = child->siblings_next;
1245 			else
1246 				last->siblings_next = child->siblings_next;
1247 
1248 			team->parent = NULL;
1249 			break;
1250 		}
1251 		last = child;
1252 	}
1253 }
1254 
1255 
1256 /*!	Returns whether the given team is a session leader.
1257 	The caller must hold the team's lock or its process group's lock.
1258 */
1259 static bool
1260 is_session_leader(Team* team)
1261 {
1262 	return team->session_id == team->id;
1263 }
1264 
1265 
1266 /*!	Returns whether the given team is a process group leader.
1267 	The caller must hold the team's lock or its process group's lock.
1268 */
1269 static bool
1270 is_process_group_leader(Team* team)
1271 {
1272 	return team->group_id == team->id;
1273 }
1274 
1275 
1276 /*!	Inserts the given team into the given process group.
1277 	The caller must hold the process group's lock, the team's lock, and the
1278 	team's parent's lock.
1279 */
1280 static void
1281 insert_team_into_group(ProcessGroup* group, Team* team)
1282 {
1283 	team->group = group;
1284 	team->group_id = group->id;
1285 	team->session_id = group->Session()->id;
1286 
1287 	team->group_next = group->teams;
1288 	group->teams = team;
1289 	group->AcquireReference();
1290 }
1291 
1292 
1293 /*!	Removes the given team from its process group.
1294 
1295 	The caller must hold the process group's lock, the team's lock, and the
1296 	team's parent's lock. Interrupts must be enabled.
1297 
1298 	\param team The team that'll be removed from its process group.
1299 */
1300 static void
1301 remove_team_from_group(Team* team)
1302 {
1303 	ProcessGroup* group = team->group;
1304 	Team* current;
1305 	Team* last = NULL;
1306 
1307 	// the team must be in a process group to let this function have any effect
1308 	if  (group == NULL)
1309 		return;
1310 
1311 	for (current = group->teams; current != NULL;
1312 			current = current->group_next) {
1313 		if (current == team) {
1314 			if (last == NULL)
1315 				group->teams = current->group_next;
1316 			else
1317 				last->group_next = current->group_next;
1318 
1319 			team->group = NULL;
1320 			break;
1321 		}
1322 		last = current;
1323 	}
1324 
1325 	team->group = NULL;
1326 	team->group_next = NULL;
1327 
1328 	group->ReleaseReference();
1329 }
1330 
1331 
1332 static status_t
1333 create_team_user_data(Team* team, void* exactAddress = NULL)
1334 {
1335 	void* address;
1336 	uint32 addressSpec;
1337 
1338 	if (exactAddress != NULL) {
1339 		address = exactAddress;
1340 		addressSpec = B_EXACT_ADDRESS;
1341 	} else {
1342 		address = (void*)KERNEL_USER_DATA_BASE;
1343 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1344 	}
1345 
1346 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1347 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1348 
1349 	virtual_address_restrictions virtualRestrictions = {};
1350 	if (result == B_OK || exactAddress != NULL) {
1351 		if (exactAddress != NULL)
1352 			virtualRestrictions.address = exactAddress;
1353 		else
1354 			virtualRestrictions.address = address;
1355 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1356 	} else {
1357 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1358 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1359 	}
1360 
1361 	physical_address_restrictions physicalRestrictions = {};
1362 	team->user_data_area = create_area_etc(team->id, "user area",
1363 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1364 		&virtualRestrictions, &physicalRestrictions, &address);
1365 	if (team->user_data_area < 0)
1366 		return team->user_data_area;
1367 
1368 	team->user_data = (addr_t)address;
1369 	team->used_user_data = 0;
1370 	team->user_data_size = kTeamUserDataInitialSize;
1371 	team->free_user_threads = NULL;
1372 
1373 	return B_OK;
1374 }
1375 
1376 
1377 static void
1378 delete_team_user_data(Team* team)
1379 {
1380 	if (team->user_data_area >= 0) {
1381 		vm_delete_area(team->id, team->user_data_area, true);
1382 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1383 			kTeamUserDataReservedSize);
1384 
1385 		team->user_data = 0;
1386 		team->used_user_data = 0;
1387 		team->user_data_size = 0;
1388 		team->user_data_area = -1;
1389 		while (free_user_thread* entry = team->free_user_threads) {
1390 			team->free_user_threads = entry->next;
1391 			free(entry);
1392 		}
1393 	}
1394 }
1395 
1396 
1397 static status_t
1398 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1399 	int32 argCount, int32 envCount, char**& _flatArgs)
1400 {
1401 	if (argCount < 0 || envCount < 0)
1402 		return B_BAD_VALUE;
1403 
1404 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1405 		return B_TOO_MANY_ARGS;
1406 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1407 		return B_BAD_VALUE;
1408 
1409 	if (!IS_USER_ADDRESS(userFlatArgs))
1410 		return B_BAD_ADDRESS;
1411 
1412 	// allocate kernel memory
1413 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1414 	if (flatArgs == NULL)
1415 		return B_NO_MEMORY;
1416 
1417 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1418 		free(flatArgs);
1419 		return B_BAD_ADDRESS;
1420 	}
1421 
1422 	// check and relocate the array
1423 	status_t error = B_OK;
1424 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1425 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1426 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1427 		if (i == argCount || i == argCount + envCount + 1) {
1428 			// check array null termination
1429 			if (flatArgs[i] != NULL) {
1430 				error = B_BAD_VALUE;
1431 				break;
1432 			}
1433 		} else {
1434 			// check string
1435 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1436 			size_t maxLen = stringEnd - arg;
1437 			if (arg < stringBase || arg >= stringEnd
1438 					|| strnlen(arg, maxLen) == maxLen) {
1439 				error = B_BAD_VALUE;
1440 				break;
1441 			}
1442 
1443 			flatArgs[i] = arg;
1444 		}
1445 	}
1446 
1447 	if (error == B_OK)
1448 		_flatArgs = flatArgs;
1449 	else
1450 		free(flatArgs);
1451 
1452 	return error;
1453 }
1454 
1455 
1456 static void
1457 free_team_arg(struct team_arg* teamArg)
1458 {
1459 	if (teamArg != NULL) {
1460 		free(teamArg->flat_args);
1461 		free(teamArg->path);
1462 		free(teamArg);
1463 	}
1464 }
1465 
1466 
1467 static status_t
1468 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1469 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1470 	port_id port, uint32 token)
1471 {
1472 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1473 	if (teamArg == NULL)
1474 		return B_NO_MEMORY;
1475 
1476 	teamArg->path = strdup(path);
1477 	if (teamArg->path == NULL) {
1478 		free(teamArg);
1479 		return B_NO_MEMORY;
1480 	}
1481 
1482 	// copy the args over
1483 
1484 	teamArg->flat_args = flatArgs;
1485 	teamArg->flat_args_size = flatArgsSize;
1486 	teamArg->arg_count = argCount;
1487 	teamArg->env_count = envCount;
1488 	teamArg->umask = umask;
1489 	teamArg->error_port = port;
1490 	teamArg->error_token = token;
1491 
1492 	*_teamArg = teamArg;
1493 	return B_OK;
1494 }
1495 
1496 
1497 static status_t
1498 team_create_thread_start_internal(void* args)
1499 {
1500 	status_t err;
1501 	Thread* thread;
1502 	Team* team;
1503 	struct team_arg* teamArgs = (struct team_arg*)args;
1504 	const char* path;
1505 	addr_t entry;
1506 	char** userArgs;
1507 	char** userEnv;
1508 	struct user_space_program_args* programArgs;
1509 	uint32 argCount, envCount;
1510 
1511 	thread = thread_get_current_thread();
1512 	team = thread->team;
1513 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1514 
1515 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1516 		thread->id));
1517 
1518 	// Main stack area layout is currently as follows (starting from 0):
1519 	//
1520 	// size								| usage
1521 	// ---------------------------------+--------------------------------
1522 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1523 	// TLS_SIZE							| TLS data
1524 	// sizeof(user_space_program_args)	| argument structure for the runtime
1525 	//									| loader
1526 	// flat arguments size				| flat process arguments and environment
1527 
1528 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1529 	// the heap
1530 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1531 
1532 	argCount = teamArgs->arg_count;
1533 	envCount = teamArgs->env_count;
1534 
1535 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1536 		+ thread->user_stack_size + TLS_SIZE);
1537 
1538 	userArgs = (char**)(programArgs + 1);
1539 	userEnv = userArgs + argCount + 1;
1540 	path = teamArgs->path;
1541 
1542 	if (user_strlcpy(programArgs->program_path, path,
1543 				sizeof(programArgs->program_path)) < B_OK
1544 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1545 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1546 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1547 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1548 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1549 				sizeof(port_id)) < B_OK
1550 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1551 				sizeof(uint32)) < B_OK
1552 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1553 		|| user_memcpy(userArgs, teamArgs->flat_args,
1554 				teamArgs->flat_args_size) < B_OK) {
1555 		// the team deletion process will clean this mess
1556 		free_team_arg(teamArgs);
1557 		return B_BAD_ADDRESS;
1558 	}
1559 
1560 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1561 
1562 	// set team args and update state
1563 	team->Lock();
1564 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1565 	team->state = TEAM_STATE_NORMAL;
1566 	team->Unlock();
1567 
1568 	free_team_arg(teamArgs);
1569 		// the arguments are already on the user stack, we no longer need
1570 		// them in this form
1571 
1572 	// Clone commpage area
1573 	area_id commPageArea = clone_commpage_area(team->id,
1574 		&team->commpage_address);
1575 	if (commPageArea  < B_OK) {
1576 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1577 			strerror(commPageArea)));
1578 		return commPageArea;
1579 	}
1580 
1581 	// Register commpage image
1582 	image_id commPageImage = get_commpage_image();
1583 	image_info imageInfo;
1584 	err = get_image_info(commPageImage, &imageInfo);
1585 	if (err != B_OK) {
1586 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1587 			strerror(err)));
1588 		return err;
1589 	}
1590 	imageInfo.text = team->commpage_address;
1591 	image_id image = register_image(team, &imageInfo, sizeof(image_info));
1592 	if (image < 0) {
1593 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1594 			strerror(image)));
1595 		return image;
1596 	}
1597 
1598 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1599 	// automatic variables with function scope will never be destroyed.
1600 	{
1601 		// find runtime_loader path
1602 		KPath runtimeLoaderPath;
1603 		err = find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1604 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1605 		if (err < B_OK) {
1606 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1607 				strerror(err)));
1608 			return err;
1609 		}
1610 		runtimeLoaderPath.UnlockBuffer();
1611 		err = runtimeLoaderPath.Append("runtime_loader");
1612 
1613 		if (err == B_OK) {
1614 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1615 				&entry);
1616 		}
1617 	}
1618 
1619 	if (err < B_OK) {
1620 		// Luckily, we don't have to clean up the mess we created - that's
1621 		// done for us by the normal team deletion process
1622 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1623 			"%s\n", strerror(err)));
1624 		return err;
1625 	}
1626 
1627 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1628 
1629 	// enter userspace -- returns only in case of error
1630 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1631 		programArgs, team->commpage_address);
1632 }
1633 
1634 
1635 static status_t
1636 team_create_thread_start(void* args)
1637 {
1638 	team_create_thread_start_internal(args);
1639 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1640 	thread_exit();
1641 		// does not return
1642 	return B_OK;
1643 }
1644 
1645 
1646 static thread_id
1647 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1648 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1649 	port_id errorPort, uint32 errorToken)
1650 {
1651 	char** flatArgs = _flatArgs;
1652 	thread_id thread;
1653 	status_t status;
1654 	struct team_arg* teamArgs;
1655 	struct team_loading_info loadingInfo;
1656 	io_context* parentIOContext = NULL;
1657 	team_id teamID;
1658 
1659 	if (flatArgs == NULL || argCount == 0)
1660 		return B_BAD_VALUE;
1661 
1662 	const char* path = flatArgs[0];
1663 
1664 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1665 		"\n", path, flatArgs, argCount));
1666 
1667 	// cut the path from the main thread name
1668 	const char* threadName = strrchr(path, '/');
1669 	if (threadName != NULL)
1670 		threadName++;
1671 	else
1672 		threadName = path;
1673 
1674 	// create the main thread object
1675 	Thread* mainThread;
1676 	status = Thread::Create(threadName, mainThread);
1677 	if (status != B_OK)
1678 		return status;
1679 	BReference<Thread> mainThreadReference(mainThread, true);
1680 
1681 	// create team object
1682 	Team* team = Team::Create(mainThread->id, path, false);
1683 	if (team == NULL)
1684 		return B_NO_MEMORY;
1685 	BReference<Team> teamReference(team, true);
1686 
1687 	if (flags & B_WAIT_TILL_LOADED) {
1688 		loadingInfo.thread = thread_get_current_thread();
1689 		loadingInfo.result = B_ERROR;
1690 		loadingInfo.done = false;
1691 		team->loading_info = &loadingInfo;
1692 	}
1693 
1694 	// get the parent team
1695 	Team* parent = Team::Get(parentID);
1696 	if (parent == NULL)
1697 		return B_BAD_TEAM_ID;
1698 	BReference<Team> parentReference(parent, true);
1699 
1700 	parent->LockTeamAndProcessGroup();
1701 	team->Lock();
1702 
1703 	// inherit the parent's user/group
1704 	inherit_parent_user_and_group(team, parent);
1705 
1706  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1707 
1708 	sTeamHash.Insert(team);
1709 	sUsedTeams++;
1710 
1711 	teamsLocker.Unlock();
1712 
1713 	insert_team_into_parent(parent, team);
1714 	insert_team_into_group(parent->group, team);
1715 
1716 	// get a reference to the parent's I/O context -- we need it to create ours
1717 	parentIOContext = parent->io_context;
1718 	vfs_get_io_context(parentIOContext);
1719 
1720 	team->Unlock();
1721 	parent->UnlockTeamAndProcessGroup();
1722 
1723 	// notify team listeners
1724 	sNotificationService.Notify(TEAM_ADDED, team);
1725 
1726 	// check the executable's set-user/group-id permission
1727 	update_set_id_user_and_group(team, path);
1728 
1729 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1730 		envCount, (mode_t)-1, errorPort, errorToken);
1731 	if (status != B_OK)
1732 		goto err1;
1733 
1734 	_flatArgs = NULL;
1735 		// args are owned by the team_arg structure now
1736 
1737 	// create a new io_context for this team
1738 	team->io_context = vfs_new_io_context(parentIOContext, true);
1739 	if (!team->io_context) {
1740 		status = B_NO_MEMORY;
1741 		goto err2;
1742 	}
1743 
1744 	// We don't need the parent's I/O context any longer.
1745 	vfs_put_io_context(parentIOContext);
1746 	parentIOContext = NULL;
1747 
1748 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1749 	vfs_exec_io_context(team->io_context);
1750 
1751 	// create an address space for this team
1752 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1753 		&team->address_space);
1754 	if (status != B_OK)
1755 		goto err3;
1756 
1757 	// create the user data area
1758 	status = create_team_user_data(team);
1759 	if (status != B_OK)
1760 		goto err4;
1761 
1762 	// In case we start the main thread, we shouldn't access the team object
1763 	// afterwards, so cache the team's ID.
1764 	teamID = team->id;
1765 
1766 	// Create a kernel thread, but under the context of the new team
1767 	// The new thread will take over ownership of teamArgs.
1768 	{
1769 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1770 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1771 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1772 			+ teamArgs->flat_args_size;
1773 		thread = thread_create_thread(threadAttributes, false);
1774 		if (thread < 0) {
1775 			status = thread;
1776 			goto err5;
1777 		}
1778 	}
1779 
1780 	// The team has been created successfully, so we keep the reference. Or
1781 	// more precisely: It's owned by the team's main thread, now.
1782 	teamReference.Detach();
1783 
1784 	// wait for the loader of the new team to finish its work
1785 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1786 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1787 
1788 		// resume the team's main thread
1789 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1790 			scheduler_enqueue_in_run_queue(mainThread);
1791 
1792 		// Now suspend ourselves until loading is finished. We will be woken
1793 		// either by the thread, when it finished or aborted loading, or when
1794 		// the team is going to die (e.g. is killed). In either case the one
1795 		// setting `loadingInfo.done' is responsible for removing the info from
1796 		// the team structure.
1797 		while (!loadingInfo.done) {
1798 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1799 			scheduler_reschedule();
1800 		}
1801 
1802 		schedulerLocker.Unlock();
1803 
1804 		if (loadingInfo.result < B_OK)
1805 			return loadingInfo.result;
1806 	}
1807 
1808 	// notify the debugger
1809 	user_debug_team_created(teamID);
1810 
1811 	return thread;
1812 
1813 err5:
1814 	delete_team_user_data(team);
1815 err4:
1816 	team->address_space->Put();
1817 err3:
1818 	vfs_put_io_context(team->io_context);
1819 err2:
1820 	free_team_arg(teamArgs);
1821 err1:
1822 	if (parentIOContext != NULL)
1823 		vfs_put_io_context(parentIOContext);
1824 
1825 	// Remove the team structure from the process group, the parent team, and
1826 	// the team hash table and delete the team structure.
1827 	parent->LockTeamAndProcessGroup();
1828 	team->Lock();
1829 
1830 	remove_team_from_group(team);
1831 	remove_team_from_parent(team->parent, team);
1832 
1833 	team->Unlock();
1834 	parent->UnlockTeamAndProcessGroup();
1835 
1836 	teamsLocker.Lock();
1837 	sTeamHash.Remove(team);
1838 	teamsLocker.Unlock();
1839 
1840 	sNotificationService.Notify(TEAM_REMOVED, team);
1841 
1842 	return status;
1843 }
1844 
1845 
1846 /*!	Almost shuts down the current team and loads a new image into it.
1847 	If successful, this function does not return and will takeover ownership of
1848 	the arguments provided.
1849 	This function may only be called in a userland team (caused by one of the
1850 	exec*() syscalls).
1851 */
1852 static status_t
1853 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1854 	int32 argCount, int32 envCount, mode_t umask)
1855 {
1856 	// NOTE: Since this function normally doesn't return, don't use automatic
1857 	// variables that need destruction in the function scope.
1858 	char** flatArgs = _flatArgs;
1859 	Team* team = thread_get_current_thread()->team;
1860 	struct team_arg* teamArgs;
1861 	const char* threadName;
1862 	thread_id nubThreadID = -1;
1863 
1864 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1865 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1866 		team->id));
1867 
1868 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1869 
1870 	// switching the kernel at run time is probably not a good idea :)
1871 	if (team == team_get_kernel_team())
1872 		return B_NOT_ALLOWED;
1873 
1874 	// we currently need to be single threaded here
1875 	// TODO: maybe we should just kill all other threads and
1876 	//	make the current thread the team's main thread?
1877 	Thread* currentThread = thread_get_current_thread();
1878 	if (currentThread != team->main_thread)
1879 		return B_NOT_ALLOWED;
1880 
1881 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1882 	// We iterate through the thread list to make sure that there's no other
1883 	// thread.
1884 	TeamLocker teamLocker(team);
1885 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1886 
1887 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1888 		nubThreadID = team->debug_info.nub_thread;
1889 
1890 	debugInfoLocker.Unlock();
1891 
1892 	for (Thread* thread = team->thread_list; thread != NULL;
1893 			thread = thread->team_next) {
1894 		if (thread != team->main_thread && thread->id != nubThreadID)
1895 			return B_NOT_ALLOWED;
1896 	}
1897 
1898 	team->DeleteUserTimers(true);
1899 	team->ResetSignalsOnExec();
1900 
1901 	teamLocker.Unlock();
1902 
1903 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1904 		argCount, envCount, umask, -1, 0);
1905 	if (status != B_OK)
1906 		return status;
1907 
1908 	_flatArgs = NULL;
1909 		// args are owned by the team_arg structure now
1910 
1911 	// TODO: remove team resources if there are any left
1912 	// thread_atkernel_exit() might not be called at all
1913 
1914 	thread_reset_for_exec();
1915 
1916 	user_debug_prepare_for_exec();
1917 
1918 	delete_team_user_data(team);
1919 	vm_delete_areas(team->address_space, false);
1920 	xsi_sem_undo(team);
1921 	delete_owned_ports(team);
1922 	sem_delete_owned_sems(team);
1923 	remove_images(team);
1924 	vfs_exec_io_context(team->io_context);
1925 	delete_realtime_sem_context(team->realtime_sem_context);
1926 	team->realtime_sem_context = NULL;
1927 
1928 	status = create_team_user_data(team);
1929 	if (status != B_OK) {
1930 		// creating the user data failed -- we're toast
1931 		// TODO: We should better keep the old user area in the first place.
1932 		free_team_arg(teamArgs);
1933 		exit_thread(status);
1934 		return status;
1935 	}
1936 
1937 	user_debug_finish_after_exec();
1938 
1939 	// rename the team
1940 
1941 	team->Lock();
1942 	team->SetName(path);
1943 	team->Unlock();
1944 
1945 	// cut the path from the team name and rename the main thread, too
1946 	threadName = strrchr(path, '/');
1947 	if (threadName != NULL)
1948 		threadName++;
1949 	else
1950 		threadName = path;
1951 	rename_thread(thread_get_current_thread_id(), threadName);
1952 
1953 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1954 
1955 	// Update user/group according to the executable's set-user/group-id
1956 	// permission.
1957 	update_set_id_user_and_group(team, path);
1958 
1959 	user_debug_team_exec();
1960 
1961 	// notify team listeners
1962 	sNotificationService.Notify(TEAM_EXEC, team);
1963 
1964 	// get a user thread for the thread
1965 	user_thread* userThread = team_allocate_user_thread(team);
1966 		// cannot fail (the allocation for the team would have failed already)
1967 	ThreadLocker currentThreadLocker(currentThread);
1968 	currentThread->user_thread = userThread;
1969 	currentThreadLocker.Unlock();
1970 
1971 	// create the user stack for the thread
1972 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1973 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
1974 	if (status == B_OK) {
1975 		// prepare the stack, load the runtime loader, and enter userspace
1976 		team_create_thread_start(teamArgs);
1977 			// does never return
1978 	} else
1979 		free_team_arg(teamArgs);
1980 
1981 	// Sorry, we have to kill ourselves, there is no way out anymore
1982 	// (without any areas left and all that).
1983 	exit_thread(status);
1984 
1985 	// We return a status here since the signal that is sent by the
1986 	// call above is not immediately handled.
1987 	return B_ERROR;
1988 }
1989 
1990 
1991 static thread_id
1992 fork_team(void)
1993 {
1994 	Thread* parentThread = thread_get_current_thread();
1995 	Team* parentTeam = parentThread->team;
1996 	Team* team;
1997 	arch_fork_arg* forkArgs;
1998 	struct area_info info;
1999 	thread_id threadID;
2000 	status_t status;
2001 	ssize_t areaCookie;
2002 	int32 imageCookie;
2003 
2004 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2005 
2006 	if (parentTeam == team_get_kernel_team())
2007 		return B_NOT_ALLOWED;
2008 
2009 	// create a new team
2010 	// TODO: this is very similar to load_image_internal() - maybe we can do
2011 	// something about it :)
2012 
2013 	// create the main thread object
2014 	Thread* thread;
2015 	status = Thread::Create(parentThread->name, thread);
2016 	if (status != B_OK)
2017 		return status;
2018 	BReference<Thread> threadReference(thread, true);
2019 
2020 	// create the team object
2021 	team = Team::Create(thread->id, NULL, false);
2022 	if (team == NULL)
2023 		return B_NO_MEMORY;
2024 
2025 	parentTeam->LockTeamAndProcessGroup();
2026 	team->Lock();
2027 
2028 	team->SetName(parentTeam->Name());
2029 	team->SetArgs(parentTeam->Args());
2030 
2031 	team->commpage_address = parentTeam->commpage_address;
2032 
2033 	// Inherit the parent's user/group.
2034 	inherit_parent_user_and_group(team, parentTeam);
2035 
2036 	// inherit signal handlers
2037 	team->InheritSignalActions(parentTeam);
2038 
2039 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2040 
2041 	sTeamHash.Insert(team);
2042 	sUsedTeams++;
2043 
2044 	teamsLocker.Unlock();
2045 
2046 	insert_team_into_parent(parentTeam, team);
2047 	insert_team_into_group(parentTeam->group, team);
2048 
2049 	team->Unlock();
2050 	parentTeam->UnlockTeamAndProcessGroup();
2051 
2052 	// notify team listeners
2053 	sNotificationService.Notify(TEAM_ADDED, team);
2054 
2055 	// inherit some team debug flags
2056 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2057 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2058 
2059 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2060 	if (forkArgs == NULL) {
2061 		status = B_NO_MEMORY;
2062 		goto err1;
2063 	}
2064 
2065 	// create a new io_context for this team
2066 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2067 	if (!team->io_context) {
2068 		status = B_NO_MEMORY;
2069 		goto err2;
2070 	}
2071 
2072 	// duplicate the realtime sem context
2073 	if (parentTeam->realtime_sem_context) {
2074 		team->realtime_sem_context = clone_realtime_sem_context(
2075 			parentTeam->realtime_sem_context);
2076 		if (team->realtime_sem_context == NULL) {
2077 			status = B_NO_MEMORY;
2078 			goto err25;
2079 		}
2080 	}
2081 
2082 	// create an address space for this team
2083 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2084 		&team->address_space);
2085 	if (status < B_OK)
2086 		goto err3;
2087 
2088 	// copy all areas of the team
2089 	// TODO: should be able to handle stack areas differently (ie. don't have
2090 	// them copy-on-write)
2091 
2092 	areaCookie = 0;
2093 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2094 		if (info.area == parentTeam->user_data_area) {
2095 			// don't clone the user area; just create a new one
2096 			status = create_team_user_data(team, info.address);
2097 			if (status != B_OK)
2098 				break;
2099 
2100 			thread->user_thread = team_allocate_user_thread(team);
2101 		} else {
2102 			void* address;
2103 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2104 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2105 			if (area < B_OK) {
2106 				status = area;
2107 				break;
2108 			}
2109 
2110 			if (info.area == parentThread->user_stack_area)
2111 				thread->user_stack_area = area;
2112 		}
2113 	}
2114 
2115 	if (status < B_OK)
2116 		goto err4;
2117 
2118 	if (thread->user_thread == NULL) {
2119 #if KDEBUG
2120 		panic("user data area not found, parent area is %" B_PRId32,
2121 			parentTeam->user_data_area);
2122 #endif
2123 		status = B_ERROR;
2124 		goto err4;
2125 	}
2126 
2127 	thread->user_stack_base = parentThread->user_stack_base;
2128 	thread->user_stack_size = parentThread->user_stack_size;
2129 	thread->user_local_storage = parentThread->user_local_storage;
2130 	thread->sig_block_mask = parentThread->sig_block_mask;
2131 	thread->signal_stack_base = parentThread->signal_stack_base;
2132 	thread->signal_stack_size = parentThread->signal_stack_size;
2133 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2134 
2135 	arch_store_fork_frame(forkArgs);
2136 
2137 	// copy image list
2138 	image_info imageInfo;
2139 	imageCookie = 0;
2140 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2141 			== B_OK) {
2142 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2143 		if (image < 0)
2144 			goto err5;
2145 	}
2146 
2147 	// create the main thread
2148 	{
2149 		ThreadCreationAttributes threadCreationAttributes(NULL,
2150 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2151 		threadCreationAttributes.forkArgs = forkArgs;
2152 		threadID = thread_create_thread(threadCreationAttributes, false);
2153 		if (threadID < 0) {
2154 			status = threadID;
2155 			goto err5;
2156 		}
2157 	}
2158 
2159 	// notify the debugger
2160 	user_debug_team_created(team->id);
2161 
2162 	T(TeamForked(threadID));
2163 
2164 	resume_thread(threadID);
2165 	return threadID;
2166 
2167 err5:
2168 	remove_images(team);
2169 err4:
2170 	team->address_space->RemoveAndPut();
2171 err3:
2172 	delete_realtime_sem_context(team->realtime_sem_context);
2173 err25:
2174 	vfs_put_io_context(team->io_context);
2175 err2:
2176 	free(forkArgs);
2177 err1:
2178 	// Remove the team structure from the process group, the parent team, and
2179 	// the team hash table and delete the team structure.
2180 	parentTeam->LockTeamAndProcessGroup();
2181 	team->Lock();
2182 
2183 	remove_team_from_group(team);
2184 	remove_team_from_parent(team->parent, team);
2185 
2186 	team->Unlock();
2187 	parentTeam->UnlockTeamAndProcessGroup();
2188 
2189 	teamsLocker.Lock();
2190 	sTeamHash.Remove(team);
2191 	teamsLocker.Unlock();
2192 
2193 	sNotificationService.Notify(TEAM_REMOVED, team);
2194 
2195 	team->ReleaseReference();
2196 
2197 	return status;
2198 }
2199 
2200 
2201 /*!	Returns if the specified team \a parent has any children belonging to the
2202 	process group with the specified ID \a groupID.
2203 	The caller must hold \a parent's lock.
2204 */
2205 static bool
2206 has_children_in_group(Team* parent, pid_t groupID)
2207 {
2208 	for (Team* child = parent->children; child != NULL;
2209 			child = child->siblings_next) {
2210 		TeamLocker childLocker(child);
2211 		if (child->group_id == groupID)
2212 			return true;
2213 	}
2214 
2215 	return false;
2216 }
2217 
2218 
2219 /*!	Returns the first job control entry from \a children, which matches \a id.
2220 	\a id can be:
2221 	- \code > 0 \endcode: Matching an entry with that team ID.
2222 	- \code == -1 \endcode: Matching any entry.
2223 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2224 	\c 0 is an invalid value for \a id.
2225 
2226 	The caller must hold the lock of the team that \a children belongs to.
2227 
2228 	\param children The job control entry list to check.
2229 	\param id The match criterion.
2230 	\return The first matching entry or \c NULL, if none matches.
2231 */
2232 static job_control_entry*
2233 get_job_control_entry(team_job_control_children& children, pid_t id)
2234 {
2235 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2236 		 job_control_entry* entry = it.Next();) {
2237 
2238 		if (id > 0) {
2239 			if (entry->thread == id)
2240 				return entry;
2241 		} else if (id == -1) {
2242 			return entry;
2243 		} else {
2244 			pid_t processGroup
2245 				= (entry->team ? entry->team->group_id : entry->group_id);
2246 			if (processGroup == -id)
2247 				return entry;
2248 		}
2249 	}
2250 
2251 	return NULL;
2252 }
2253 
2254 
2255 /*!	Returns the first job control entry from one of team's dead, continued, or
2256     stopped children which matches \a id.
2257 	\a id can be:
2258 	- \code > 0 \endcode: Matching an entry with that team ID.
2259 	- \code == -1 \endcode: Matching any entry.
2260 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2261 	\c 0 is an invalid value for \a id.
2262 
2263 	The caller must hold \a team's lock.
2264 
2265 	\param team The team whose dead, stopped, and continued child lists shall be
2266 		checked.
2267 	\param id The match criterion.
2268 	\param flags Specifies which children shall be considered. Dead children
2269 		always are. Stopped children are considered when \a flags is ORed
2270 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2271 		bitwise with \c WCONTINUED.
2272 	\return The first matching entry or \c NULL, if none matches.
2273 */
2274 static job_control_entry*
2275 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2276 {
2277 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2278 
2279 	if (entry == NULL && (flags & WCONTINUED) != 0)
2280 		entry = get_job_control_entry(team->continued_children, id);
2281 
2282 	if (entry == NULL && (flags & WUNTRACED) != 0)
2283 		entry = get_job_control_entry(team->stopped_children, id);
2284 
2285 	return entry;
2286 }
2287 
2288 
2289 job_control_entry::job_control_entry()
2290 	:
2291 	has_group_ref(false)
2292 {
2293 }
2294 
2295 
2296 job_control_entry::~job_control_entry()
2297 {
2298 	if (has_group_ref) {
2299 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2300 
2301 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2302 		if (group == NULL) {
2303 			panic("job_control_entry::~job_control_entry(): unknown group "
2304 				"ID: %" B_PRId32, group_id);
2305 			return;
2306 		}
2307 
2308 		groupHashLocker.Unlock();
2309 
2310 		group->ReleaseReference();
2311 	}
2312 }
2313 
2314 
2315 /*!	Invoked when the owning team is dying, initializing the entry according to
2316 	the dead state.
2317 
2318 	The caller must hold the owning team's lock and the scheduler lock.
2319 */
2320 void
2321 job_control_entry::InitDeadState()
2322 {
2323 	if (team != NULL) {
2324 		ASSERT(team->exit.initialized);
2325 
2326 		group_id = team->group_id;
2327 		team->group->AcquireReference();
2328 		has_group_ref = true;
2329 
2330 		thread = team->id;
2331 		status = team->exit.status;
2332 		reason = team->exit.reason;
2333 		signal = team->exit.signal;
2334 		signaling_user = team->exit.signaling_user;
2335 
2336 		team = NULL;
2337 	}
2338 }
2339 
2340 
2341 job_control_entry&
2342 job_control_entry::operator=(const job_control_entry& other)
2343 {
2344 	state = other.state;
2345 	thread = other.thread;
2346 	signal = other.signal;
2347 	has_group_ref = false;
2348 	signaling_user = other.signaling_user;
2349 	team = other.team;
2350 	group_id = other.group_id;
2351 	status = other.status;
2352 	reason = other.reason;
2353 
2354 	return *this;
2355 }
2356 
2357 
2358 /*! This is the kernel backend for waitid().
2359 */
2360 static thread_id
2361 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2362 {
2363 	Thread* thread = thread_get_current_thread();
2364 	Team* team = thread->team;
2365 	struct job_control_entry foundEntry;
2366 	struct job_control_entry* freeDeathEntry = NULL;
2367 	status_t status = B_OK;
2368 
2369 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2370 		child, flags));
2371 
2372 	T(WaitForChild(child, flags));
2373 
2374 	pid_t originalChild = child;
2375 
2376 	bool ignoreFoundEntries = false;
2377 	bool ignoreFoundEntriesChecked = false;
2378 
2379 	while (true) {
2380 		// lock the team
2381 		TeamLocker teamLocker(team);
2382 
2383 		// A 0 child argument means to wait for all children in the process
2384 		// group of the calling team.
2385 		child = originalChild == 0 ? -team->group_id : originalChild;
2386 
2387 		// check whether any condition holds
2388 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2389 
2390 		// If we don't have an entry yet, check whether there are any children
2391 		// complying to the process group specification at all.
2392 		if (entry == NULL) {
2393 			// No success yet -- check whether there are any children complying
2394 			// to the process group specification at all.
2395 			bool childrenExist = false;
2396 			if (child == -1) {
2397 				childrenExist = team->children != NULL;
2398 			} else if (child < -1) {
2399 				childrenExist = has_children_in_group(team, -child);
2400 			} else {
2401 				if (Team* childTeam = Team::Get(child)) {
2402 					BReference<Team> childTeamReference(childTeam, true);
2403 					TeamLocker childTeamLocker(childTeam);
2404 					childrenExist = childTeam->parent == team;
2405 				}
2406 			}
2407 
2408 			if (!childrenExist) {
2409 				// there is no child we could wait for
2410 				status = ECHILD;
2411 			} else {
2412 				// the children we're waiting for are still running
2413 				status = B_WOULD_BLOCK;
2414 			}
2415 		} else {
2416 			// got something
2417 			foundEntry = *entry;
2418 
2419 			// unless WNOWAIT has been specified, "consume" the wait state
2420 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2421 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2422 					// The child is dead. Reap its death entry.
2423 					freeDeathEntry = entry;
2424 					team->dead_children.entries.Remove(entry);
2425 					team->dead_children.count--;
2426 				} else {
2427 					// The child is well. Reset its job control state.
2428 					team_set_job_control_state(entry->team,
2429 						JOB_CONTROL_STATE_NONE, NULL, false);
2430 				}
2431 			}
2432 		}
2433 
2434 		// If we haven't got anything yet, prepare for waiting for the
2435 		// condition variable.
2436 		ConditionVariableEntry deadWaitEntry;
2437 
2438 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2439 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2440 
2441 		teamLocker.Unlock();
2442 
2443 		// we got our entry and can return to our caller
2444 		if (status == B_OK) {
2445 			if (ignoreFoundEntries) {
2446 				// ... unless we shall ignore found entries
2447 				delete freeDeathEntry;
2448 				freeDeathEntry = NULL;
2449 				continue;
2450 			}
2451 
2452 			break;
2453 		}
2454 
2455 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2456 			T(WaitForChildDone(status));
2457 			return status;
2458 		}
2459 
2460 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2461 		if (status == B_INTERRUPTED) {
2462 			T(WaitForChildDone(status));
2463 			return status;
2464 		}
2465 
2466 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2467 		// all our children are dead and fail with ECHILD. We check the
2468 		// condition at this point.
2469 		if (!ignoreFoundEntriesChecked) {
2470 			teamLocker.Lock();
2471 
2472 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2473 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2474 				|| handler.sa_handler == SIG_IGN) {
2475 				ignoreFoundEntries = true;
2476 			}
2477 
2478 			teamLocker.Unlock();
2479 
2480 			ignoreFoundEntriesChecked = true;
2481 		}
2482 	}
2483 
2484 	delete freeDeathEntry;
2485 
2486 	// When we got here, we have a valid death entry, and already got
2487 	// unregistered from the team or group. Fill in the returned info.
2488 	memset(&_info, 0, sizeof(_info));
2489 	_info.si_signo = SIGCHLD;
2490 	_info.si_pid = foundEntry.thread;
2491 	_info.si_uid = foundEntry.signaling_user;
2492 	// TODO: Fill in si_errno?
2493 
2494 	switch (foundEntry.state) {
2495 		case JOB_CONTROL_STATE_DEAD:
2496 			_info.si_code = foundEntry.reason;
2497 			_info.si_status = foundEntry.reason == CLD_EXITED
2498 				? foundEntry.status : foundEntry.signal;
2499 			break;
2500 		case JOB_CONTROL_STATE_STOPPED:
2501 			_info.si_code = CLD_STOPPED;
2502 			_info.si_status = foundEntry.signal;
2503 			break;
2504 		case JOB_CONTROL_STATE_CONTINUED:
2505 			_info.si_code = CLD_CONTINUED;
2506 			_info.si_status = 0;
2507 			break;
2508 		case JOB_CONTROL_STATE_NONE:
2509 			// can't happen
2510 			break;
2511 	}
2512 
2513 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2514 	// status is available.
2515 	TeamLocker teamLocker(team);
2516 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2517 
2518 	if (is_team_signal_blocked(team, SIGCHLD)) {
2519 		if (get_job_control_entry(team, child, flags) == NULL)
2520 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2521 	}
2522 
2523 	schedulerLocker.Unlock();
2524 	teamLocker.Unlock();
2525 
2526 	// When the team is dead, the main thread continues to live in the kernel
2527 	// team for a very short time. To avoid surprises for the caller we rather
2528 	// wait until the thread is really gone.
2529 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2530 		wait_for_thread(foundEntry.thread, NULL);
2531 
2532 	T(WaitForChildDone(foundEntry));
2533 
2534 	return foundEntry.thread;
2535 }
2536 
2537 
2538 /*! Fills the team_info structure with information from the specified team.
2539 	Interrupts must be enabled. The team must not be locked.
2540 */
2541 static status_t
2542 fill_team_info(Team* team, team_info* info, size_t size)
2543 {
2544 	if (size != sizeof(team_info))
2545 		return B_BAD_VALUE;
2546 
2547 	// TODO: Set more informations for team_info
2548 	memset(info, 0, size);
2549 
2550 	info->team = team->id;
2551 		// immutable
2552 	info->image_count = count_images(team);
2553 		// protected by sImageMutex
2554 
2555 	TeamLocker teamLocker(team);
2556 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2557 
2558 	info->thread_count = team->num_threads;
2559 	//info->area_count =
2560 	info->debugger_nub_thread = team->debug_info.nub_thread;
2561 	info->debugger_nub_port = team->debug_info.nub_port;
2562 	info->uid = team->effective_uid;
2563 	info->gid = team->effective_gid;
2564 
2565 	strlcpy(info->args, team->Args(), sizeof(info->args));
2566 	info->argc = 1;
2567 
2568 	return B_OK;
2569 }
2570 
2571 
2572 /*!	Returns whether the process group contains stopped processes.
2573 	The caller must hold the process group's lock.
2574 */
2575 static bool
2576 process_group_has_stopped_processes(ProcessGroup* group)
2577 {
2578 	Team* team = group->teams;
2579 	while (team != NULL) {
2580 		// the parent team's lock guards the job control entry -- acquire it
2581 		team->LockTeamAndParent(false);
2582 
2583 		if (team->job_control_entry != NULL
2584 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2585 			team->UnlockTeamAndParent();
2586 			return true;
2587 		}
2588 
2589 		team->UnlockTeamAndParent();
2590 
2591 		team = team->group_next;
2592 	}
2593 
2594 	return false;
2595 }
2596 
2597 
2598 /*!	Iterates through all process groups queued in team_remove_team() and signals
2599 	those that are orphaned and have stopped processes.
2600 	The caller must not hold any team or process group locks.
2601 */
2602 static void
2603 orphaned_process_group_check()
2604 {
2605 	// process as long as there are groups in the list
2606 	while (true) {
2607 		// remove the head from the list
2608 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2609 
2610 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2611 		if (group == NULL)
2612 			return;
2613 
2614 		group->UnsetOrphanedCheck();
2615 		BReference<ProcessGroup> groupReference(group);
2616 
2617 		orphanedCheckLocker.Unlock();
2618 
2619 		AutoLocker<ProcessGroup> groupLocker(group);
2620 
2621 		// If the group is orphaned and contains stopped processes, we're
2622 		// supposed to send SIGHUP + SIGCONT.
2623 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2624 			Thread* currentThread = thread_get_current_thread();
2625 
2626 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2627 			send_signal_to_process_group_locked(group, signal, 0);
2628 
2629 			signal.SetNumber(SIGCONT);
2630 			send_signal_to_process_group_locked(group, signal, 0);
2631 		}
2632 	}
2633 }
2634 
2635 
2636 static status_t
2637 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2638 	uint32 flags)
2639 {
2640 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2641 		return B_BAD_VALUE;
2642 
2643 	// get the team
2644 	Team* team = Team::GetAndLock(id);
2645 	if (team == NULL)
2646 		return B_BAD_TEAM_ID;
2647 	BReference<Team> teamReference(team, true);
2648 	TeamLocker teamLocker(team, true);
2649 
2650 	if ((flags & B_CHECK_PERMISSION) != 0) {
2651 		uid_t uid = geteuid();
2652 		if (uid != 0 && uid != team->effective_uid)
2653 			return B_NOT_ALLOWED;
2654 	}
2655 
2656 	bigtime_t kernelTime = 0;
2657 	bigtime_t userTime = 0;
2658 
2659 	switch (who) {
2660 		case B_TEAM_USAGE_SELF:
2661 		{
2662 			Thread* thread = team->thread_list;
2663 
2664 			for (; thread != NULL; thread = thread->team_next) {
2665 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2666 				kernelTime += thread->kernel_time;
2667 				userTime += thread->user_time;
2668 			}
2669 
2670 			kernelTime += team->dead_threads_kernel_time;
2671 			userTime += team->dead_threads_user_time;
2672 			break;
2673 		}
2674 
2675 		case B_TEAM_USAGE_CHILDREN:
2676 		{
2677 			Team* child = team->children;
2678 			for (; child != NULL; child = child->siblings_next) {
2679 				TeamLocker childLocker(child);
2680 
2681 				Thread* thread = team->thread_list;
2682 
2683 				for (; thread != NULL; thread = thread->team_next) {
2684 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2685 					kernelTime += thread->kernel_time;
2686 					userTime += thread->user_time;
2687 				}
2688 
2689 				kernelTime += child->dead_threads_kernel_time;
2690 				userTime += child->dead_threads_user_time;
2691 			}
2692 
2693 			kernelTime += team->dead_children.kernel_time;
2694 			userTime += team->dead_children.user_time;
2695 			break;
2696 		}
2697 	}
2698 
2699 	info->kernel_time = kernelTime;
2700 	info->user_time = userTime;
2701 
2702 	return B_OK;
2703 }
2704 
2705 
2706 //	#pragma mark - Private kernel API
2707 
2708 
2709 status_t
2710 team_init(kernel_args* args)
2711 {
2712 	// create the team hash table
2713 	new(&sTeamHash) TeamTable;
2714 	if (sTeamHash.Init(64) != B_OK)
2715 		panic("Failed to init team hash table!");
2716 
2717 	new(&sGroupHash) ProcessGroupHashTable;
2718 	if (sGroupHash.Init() != B_OK)
2719 		panic("Failed to init process group hash table!");
2720 
2721 	// create initial session and process groups
2722 
2723 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2724 	if (session == NULL)
2725 		panic("Could not create initial session.\n");
2726 	BReference<ProcessSession> sessionReference(session, true);
2727 
2728 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2729 	if (group == NULL)
2730 		panic("Could not create initial process group.\n");
2731 	BReference<ProcessGroup> groupReference(group, true);
2732 
2733 	group->Publish(session);
2734 
2735 	// create the kernel team
2736 	sKernelTeam = Team::Create(1, "kernel_team", true);
2737 	if (sKernelTeam == NULL)
2738 		panic("could not create kernel team!\n");
2739 	sKernelTeam->SetArgs(sKernelTeam->Name());
2740 	sKernelTeam->state = TEAM_STATE_NORMAL;
2741 
2742 	sKernelTeam->saved_set_uid = 0;
2743 	sKernelTeam->real_uid = 0;
2744 	sKernelTeam->effective_uid = 0;
2745 	sKernelTeam->saved_set_gid = 0;
2746 	sKernelTeam->real_gid = 0;
2747 	sKernelTeam->effective_gid = 0;
2748 	sKernelTeam->supplementary_groups = NULL;
2749 	sKernelTeam->supplementary_group_count = 0;
2750 
2751 	insert_team_into_group(group, sKernelTeam);
2752 
2753 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2754 	if (sKernelTeam->io_context == NULL)
2755 		panic("could not create io_context for kernel team!\n");
2756 
2757 	// stick it in the team hash
2758 	sTeamHash.Insert(sKernelTeam);
2759 
2760 	add_debugger_command_etc("team", &dump_team_info,
2761 		"Dump info about a particular team",
2762 		"[ <id> | <address> | <name> ]\n"
2763 		"Prints information about the specified team. If no argument is given\n"
2764 		"the current team is selected.\n"
2765 		"  <id>       - The ID of the team.\n"
2766 		"  <address>  - The address of the team structure.\n"
2767 		"  <name>     - The team's name.\n", 0);
2768 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2769 		"\n"
2770 		"Prints a list of all existing teams.\n", 0);
2771 
2772 	new(&sNotificationService) TeamNotificationService();
2773 
2774 	sNotificationService.Register();
2775 
2776 	return B_OK;
2777 }
2778 
2779 
2780 int32
2781 team_max_teams(void)
2782 {
2783 	return sMaxTeams;
2784 }
2785 
2786 
2787 int32
2788 team_used_teams(void)
2789 {
2790 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2791 	return sUsedTeams;
2792 }
2793 
2794 
2795 /*! Returns a death entry of a child team specified by ID (if any).
2796 	The caller must hold the team's lock.
2797 
2798 	\param team The team whose dead children list to check.
2799 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2800 	\param _deleteEntry Return variable, indicating whether the caller needs to
2801 		delete the returned entry.
2802 	\return The death entry of the matching team, or \c NULL, if no death entry
2803 		for the team was found.
2804 */
2805 job_control_entry*
2806 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2807 {
2808 	if (child <= 0)
2809 		return NULL;
2810 
2811 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2812 		child);
2813 	if (entry) {
2814 		// remove the entry only, if the caller is the parent of the found team
2815 		if (team_get_current_team_id() == entry->thread) {
2816 			team->dead_children.entries.Remove(entry);
2817 			team->dead_children.count--;
2818 			*_deleteEntry = true;
2819 		} else {
2820 			*_deleteEntry = false;
2821 		}
2822 	}
2823 
2824 	return entry;
2825 }
2826 
2827 
2828 /*! Quick check to see if we have a valid team ID. */
2829 bool
2830 team_is_valid(team_id id)
2831 {
2832 	if (id <= 0)
2833 		return false;
2834 
2835 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2836 
2837 	return team_get_team_struct_locked(id) != NULL;
2838 }
2839 
2840 
2841 Team*
2842 team_get_team_struct_locked(team_id id)
2843 {
2844 	return sTeamHash.Lookup(id);
2845 }
2846 
2847 
2848 void
2849 team_set_controlling_tty(int32 ttyIndex)
2850 {
2851 	// lock the team, so its session won't change while we're playing with it
2852 	Team* team = thread_get_current_thread()->team;
2853 	TeamLocker teamLocker(team);
2854 
2855 	// get and lock the session
2856 	ProcessSession* session = team->group->Session();
2857 	AutoLocker<ProcessSession> sessionLocker(session);
2858 
2859 	// set the session's fields
2860 	session->controlling_tty = ttyIndex;
2861 	session->foreground_group = -1;
2862 }
2863 
2864 
2865 int32
2866 team_get_controlling_tty()
2867 {
2868 	// lock the team, so its session won't change while we're playing with it
2869 	Team* team = thread_get_current_thread()->team;
2870 	TeamLocker teamLocker(team);
2871 
2872 	// get and lock the session
2873 	ProcessSession* session = team->group->Session();
2874 	AutoLocker<ProcessSession> sessionLocker(session);
2875 
2876 	// get the session's field
2877 	return session->controlling_tty;
2878 }
2879 
2880 
2881 status_t
2882 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2883 {
2884 	// lock the team, so its session won't change while we're playing with it
2885 	Thread* thread = thread_get_current_thread();
2886 	Team* team = thread->team;
2887 	TeamLocker teamLocker(team);
2888 
2889 	// get and lock the session
2890 	ProcessSession* session = team->group->Session();
2891 	AutoLocker<ProcessSession> sessionLocker(session);
2892 
2893 	// check given TTY -- must be the controlling tty of the calling process
2894 	if (session->controlling_tty != ttyIndex)
2895 		return ENOTTY;
2896 
2897 	// check given process group -- must belong to our session
2898 	{
2899 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2900 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2901 		if (group == NULL || group->Session() != session)
2902 			return B_BAD_VALUE;
2903 	}
2904 
2905 	// If we are a background group, we can do that unharmed only when we
2906 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2907 	if (session->foreground_group != -1
2908 		&& session->foreground_group != team->group_id
2909 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2910 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2911 
2912 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2913 			pid_t groupID = team->group_id;
2914 
2915 			schedulerLocker.Unlock();
2916 			sessionLocker.Unlock();
2917 			teamLocker.Unlock();
2918 
2919 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2920 			send_signal_to_process_group(groupID, signal, 0);
2921 			return B_INTERRUPTED;
2922 		}
2923 	}
2924 
2925 	session->foreground_group = processGroupID;
2926 
2927 	return B_OK;
2928 }
2929 
2930 
2931 /*!	Removes the specified team from the global team hash, from its process
2932 	group, and from its parent.
2933 	It also moves all of its children to the kernel team.
2934 
2935 	The caller must hold the following locks:
2936 	- \a team's process group's lock,
2937 	- the kernel team's lock,
2938 	- \a team's parent team's lock (might be the kernel team), and
2939 	- \a team's lock.
2940 */
2941 void
2942 team_remove_team(Team* team, pid_t& _signalGroup)
2943 {
2944 	Team* parent = team->parent;
2945 
2946 	// remember how long this team lasted
2947 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2948 		+ team->dead_children.kernel_time;
2949 	parent->dead_children.user_time += team->dead_threads_user_time
2950 		+ team->dead_children.user_time;
2951 
2952 	// remove the team from the hash table
2953 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2954 	sTeamHash.Remove(team);
2955 	sUsedTeams--;
2956 	teamsLocker.Unlock();
2957 
2958 	// The team can no longer be accessed by ID. Navigation to it is still
2959 	// possible from its process group and its parent and children, but that
2960 	// will be rectified shortly.
2961 	team->state = TEAM_STATE_DEATH;
2962 
2963 	// If we're a controlling process (i.e. a session leader with controlling
2964 	// terminal), there's a bit of signalling we have to do. We can't do any of
2965 	// the signaling here due to the bunch of locks we're holding, but we need
2966 	// to determine, whom to signal.
2967 	_signalGroup = -1;
2968 	bool isSessionLeader = false;
2969 	if (team->session_id == team->id
2970 		&& team->group->Session()->controlling_tty >= 0) {
2971 		isSessionLeader = true;
2972 
2973 		ProcessSession* session = team->group->Session();
2974 
2975 		AutoLocker<ProcessSession> sessionLocker(session);
2976 
2977 		session->controlling_tty = -1;
2978 		_signalGroup = session->foreground_group;
2979 	}
2980 
2981 	// remove us from our process group
2982 	remove_team_from_group(team);
2983 
2984 	// move the team's children to the kernel team
2985 	while (Team* child = team->children) {
2986 		// remove the child from the current team and add it to the kernel team
2987 		TeamLocker childLocker(child);
2988 
2989 		remove_team_from_parent(team, child);
2990 		insert_team_into_parent(sKernelTeam, child);
2991 
2992 		// move job control entries too
2993 		sKernelTeam->stopped_children.entries.MoveFrom(
2994 			&team->stopped_children.entries);
2995 		sKernelTeam->continued_children.entries.MoveFrom(
2996 			&team->continued_children.entries);
2997 
2998 		// If the team was a session leader with controlling terminal,
2999 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3000 		// groups with stopped processes. Due to locking complications we can't
3001 		// do that here, so we only check whether we were a reason for the
3002 		// child's process group not being an orphan and, if so, schedule a
3003 		// later check (cf. orphaned_process_group_check()).
3004 		if (isSessionLeader) {
3005 			ProcessGroup* childGroup = child->group;
3006 			if (childGroup->Session()->id == team->session_id
3007 				&& childGroup->id != team->group_id) {
3008 				childGroup->ScheduleOrphanedCheck();
3009 			}
3010 		}
3011 
3012 		// Note, we don't move the dead children entries. Those will be deleted
3013 		// when the team structure is deleted.
3014 	}
3015 
3016 	// remove us from our parent
3017 	remove_team_from_parent(parent, team);
3018 }
3019 
3020 
3021 /*!	Kills all threads but the main thread of the team and shuts down user
3022 	debugging for it.
3023 	To be called on exit of the team's main thread. No locks must be held.
3024 
3025 	\param team The team in question.
3026 	\return The port of the debugger for the team, -1 if none. To be passed to
3027 		team_delete_team().
3028 */
3029 port_id
3030 team_shutdown_team(Team* team)
3031 {
3032 	ASSERT(thread_get_current_thread() == team->main_thread);
3033 
3034 	TeamLocker teamLocker(team);
3035 
3036 	// Make sure debugging changes won't happen anymore.
3037 	port_id debuggerPort = -1;
3038 	while (true) {
3039 		// If a debugger change is in progress for the team, we'll have to
3040 		// wait until it is done.
3041 		ConditionVariableEntry waitForDebuggerEntry;
3042 		bool waitForDebugger = false;
3043 
3044 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3045 
3046 		if (team->debug_info.debugger_changed_condition != NULL) {
3047 			team->debug_info.debugger_changed_condition->Add(
3048 				&waitForDebuggerEntry);
3049 			waitForDebugger = true;
3050 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3051 			// The team is being debugged. That will stop with the termination
3052 			// of the nub thread. Since we set the team state to death, no one
3053 			// can install a debugger anymore. We fetch the debugger's port to
3054 			// send it a message at the bitter end.
3055 			debuggerPort = team->debug_info.debugger_port;
3056 		}
3057 
3058 		debugInfoLocker.Unlock();
3059 
3060 		if (!waitForDebugger)
3061 			break;
3062 
3063 		// wait for the debugger change to be finished
3064 		teamLocker.Unlock();
3065 
3066 		waitForDebuggerEntry.Wait();
3067 
3068 		teamLocker.Lock();
3069 	}
3070 
3071 	// Mark the team as shutting down. That will prevent new threads from being
3072 	// created and debugger changes from taking place.
3073 	team->state = TEAM_STATE_SHUTDOWN;
3074 
3075 	// delete all timers
3076 	team->DeleteUserTimers(false);
3077 
3078 	// deactivate CPU time user timers for the team
3079 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3080 
3081 	if (team->HasActiveCPUTimeUserTimers())
3082 		team->DeactivateCPUTimeUserTimers();
3083 
3084 	schedulerLocker.Unlock();
3085 
3086 	// kill all threads but the main thread
3087 	team_death_entry deathEntry;
3088 	deathEntry.condition.Init(team, "team death");
3089 
3090 	while (true) {
3091 		team->death_entry = &deathEntry;
3092 		deathEntry.remaining_threads = 0;
3093 
3094 		Thread* thread = team->thread_list;
3095 		while (thread != NULL) {
3096 			if (thread != team->main_thread) {
3097 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3098 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3099 				deathEntry.remaining_threads++;
3100 			}
3101 
3102 			thread = thread->team_next;
3103 		}
3104 
3105 		if (deathEntry.remaining_threads == 0)
3106 			break;
3107 
3108 		// there are threads to wait for
3109 		ConditionVariableEntry entry;
3110 		deathEntry.condition.Add(&entry);
3111 
3112 		teamLocker.Unlock();
3113 
3114 		entry.Wait();
3115 
3116 		teamLocker.Lock();
3117 	}
3118 
3119 	team->death_entry = NULL;
3120 
3121 	return debuggerPort;
3122 }
3123 
3124 
3125 /*!	Called on team exit to notify threads waiting on the team and free most
3126 	resources associated with it.
3127 	The caller shouldn't hold any locks.
3128 */
3129 void
3130 team_delete_team(Team* team, port_id debuggerPort)
3131 {
3132 	// Not quite in our job description, but work that has been left by
3133 	// team_remove_team() and that can be done now that we're not holding any
3134 	// locks.
3135 	orphaned_process_group_check();
3136 
3137 	team_id teamID = team->id;
3138 
3139 	ASSERT(team->num_threads == 0);
3140 
3141 	// If someone is waiting for this team to be loaded, but it dies
3142 	// unexpectedly before being done, we need to notify the waiting
3143 	// thread now.
3144 
3145 	TeamLocker teamLocker(team);
3146 
3147 	if (team->loading_info) {
3148 		// there's indeed someone waiting
3149 		struct team_loading_info* loadingInfo = team->loading_info;
3150 		team->loading_info = NULL;
3151 
3152 		loadingInfo->result = B_ERROR;
3153 		loadingInfo->done = true;
3154 
3155 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3156 
3157 		// wake up the waiting thread
3158 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3159 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3160 	}
3161 
3162 	// notify team watchers
3163 
3164 	{
3165 		// we're not reachable from anyone anymore at this point, so we
3166 		// can safely access the list without any locking
3167 		struct team_watcher* watcher;
3168 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3169 				&team->watcher_list)) != NULL) {
3170 			watcher->hook(teamID, watcher->data);
3171 			free(watcher);
3172 		}
3173 	}
3174 
3175 	teamLocker.Unlock();
3176 
3177 	sNotificationService.Notify(TEAM_REMOVED, team);
3178 
3179 	// free team resources
3180 
3181 	delete_realtime_sem_context(team->realtime_sem_context);
3182 	xsi_sem_undo(team);
3183 	remove_images(team);
3184 	team->address_space->RemoveAndPut();
3185 
3186 	team->ReleaseReference();
3187 
3188 	// notify the debugger, that the team is gone
3189 	user_debug_team_deleted(teamID, debuggerPort);
3190 }
3191 
3192 
3193 Team*
3194 team_get_kernel_team(void)
3195 {
3196 	return sKernelTeam;
3197 }
3198 
3199 
3200 team_id
3201 team_get_kernel_team_id(void)
3202 {
3203 	if (!sKernelTeam)
3204 		return 0;
3205 
3206 	return sKernelTeam->id;
3207 }
3208 
3209 
3210 team_id
3211 team_get_current_team_id(void)
3212 {
3213 	return thread_get_current_thread()->team->id;
3214 }
3215 
3216 
3217 status_t
3218 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3219 {
3220 	if (id == sKernelTeam->id) {
3221 		// we're the kernel team, so we don't have to go through all
3222 		// the hassle (locking and hash lookup)
3223 		*_addressSpace = VMAddressSpace::GetKernel();
3224 		return B_OK;
3225 	}
3226 
3227 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3228 
3229 	Team* team = team_get_team_struct_locked(id);
3230 	if (team == NULL)
3231 		return B_BAD_VALUE;
3232 
3233 	team->address_space->Get();
3234 	*_addressSpace = team->address_space;
3235 	return B_OK;
3236 }
3237 
3238 
3239 /*!	Sets the team's job control state.
3240 	The caller must hold the parent team's lock. Interrupts are allowed to be
3241 	enabled or disabled. In the latter case the scheduler lock may be held as
3242 	well.
3243 	\a team The team whose job control state shall be set.
3244 	\a newState The new state to be set.
3245 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3246 		the caller is responsible for filling in the following fields of the
3247 		entry before releasing the parent team's lock, unless the new state is
3248 		\c JOB_CONTROL_STATE_NONE:
3249 		- \c signal: The number of the signal causing the state change.
3250 		- \c signaling_user: The real UID of the user sending the signal.
3251 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3252 */
3253 void
3254 team_set_job_control_state(Team* team, job_control_state newState,
3255 	Signal* signal, bool schedulerLocked)
3256 {
3257 	if (team == NULL || team->job_control_entry == NULL)
3258 		return;
3259 
3260 	// don't touch anything, if the state stays the same or the team is already
3261 	// dead
3262 	job_control_entry* entry = team->job_control_entry;
3263 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3264 		return;
3265 
3266 	T(SetJobControlState(team->id, newState, signal));
3267 
3268 	// remove from the old list
3269 	switch (entry->state) {
3270 		case JOB_CONTROL_STATE_NONE:
3271 			// entry is in no list ATM
3272 			break;
3273 		case JOB_CONTROL_STATE_DEAD:
3274 			// can't get here
3275 			break;
3276 		case JOB_CONTROL_STATE_STOPPED:
3277 			team->parent->stopped_children.entries.Remove(entry);
3278 			break;
3279 		case JOB_CONTROL_STATE_CONTINUED:
3280 			team->parent->continued_children.entries.Remove(entry);
3281 			break;
3282 	}
3283 
3284 	entry->state = newState;
3285 
3286 	if (signal != NULL) {
3287 		entry->signal = signal->Number();
3288 		entry->signaling_user = signal->SendingUser();
3289 	}
3290 
3291 	// add to new list
3292 	team_job_control_children* childList = NULL;
3293 	switch (entry->state) {
3294 		case JOB_CONTROL_STATE_NONE:
3295 			// entry doesn't get into any list
3296 			break;
3297 		case JOB_CONTROL_STATE_DEAD:
3298 			childList = &team->parent->dead_children;
3299 			team->parent->dead_children.count++;
3300 			break;
3301 		case JOB_CONTROL_STATE_STOPPED:
3302 			childList = &team->parent->stopped_children;
3303 			break;
3304 		case JOB_CONTROL_STATE_CONTINUED:
3305 			childList = &team->parent->continued_children;
3306 			break;
3307 	}
3308 
3309 	if (childList != NULL) {
3310 		childList->entries.Add(entry);
3311 		team->parent->dead_children.condition_variable.NotifyAll(
3312 			schedulerLocked);
3313 	}
3314 }
3315 
3316 
3317 /*!	Inits the given team's exit information, if not yet initialized, to some
3318 	generic "killed" status.
3319 	The caller must not hold the team's lock. Interrupts must be enabled.
3320 
3321 	\param team The team whose exit info shall be initialized.
3322 */
3323 void
3324 team_init_exit_info_on_error(Team* team)
3325 {
3326 	TeamLocker teamLocker(team);
3327 
3328 	if (!team->exit.initialized) {
3329 		team->exit.reason = CLD_KILLED;
3330 		team->exit.signal = SIGKILL;
3331 		team->exit.signaling_user = geteuid();
3332 		team->exit.status = 0;
3333 		team->exit.initialized = true;
3334 	}
3335 }
3336 
3337 
3338 /*! Adds a hook to the team that is called as soon as this team goes away.
3339 	This call might get public in the future.
3340 */
3341 status_t
3342 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3343 {
3344 	if (hook == NULL || teamID < B_OK)
3345 		return B_BAD_VALUE;
3346 
3347 	// create the watcher object
3348 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3349 	if (watcher == NULL)
3350 		return B_NO_MEMORY;
3351 
3352 	watcher->hook = hook;
3353 	watcher->data = data;
3354 
3355 	// add watcher, if the team isn't already dying
3356 	// get the team
3357 	Team* team = Team::GetAndLock(teamID);
3358 	if (team == NULL) {
3359 		free(watcher);
3360 		return B_BAD_TEAM_ID;
3361 	}
3362 
3363 	list_add_item(&team->watcher_list, watcher);
3364 
3365 	team->UnlockAndReleaseReference();
3366 
3367 	return B_OK;
3368 }
3369 
3370 
3371 status_t
3372 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3373 {
3374 	if (hook == NULL || teamID < 0)
3375 		return B_BAD_VALUE;
3376 
3377 	// get team and remove watcher (if present)
3378 	Team* team = Team::GetAndLock(teamID);
3379 	if (team == NULL)
3380 		return B_BAD_TEAM_ID;
3381 
3382 	// search for watcher
3383 	team_watcher* watcher = NULL;
3384 	while ((watcher = (team_watcher*)list_get_next_item(
3385 			&team->watcher_list, watcher)) != NULL) {
3386 		if (watcher->hook == hook && watcher->data == data) {
3387 			// got it!
3388 			list_remove_item(&team->watcher_list, watcher);
3389 			break;
3390 		}
3391 	}
3392 
3393 	team->UnlockAndReleaseReference();
3394 
3395 	if (watcher == NULL)
3396 		return B_ENTRY_NOT_FOUND;
3397 
3398 	free(watcher);
3399 	return B_OK;
3400 }
3401 
3402 
3403 /*!	Allocates a user_thread structure from the team.
3404 	The team lock must be held, unless the function is called for the team's
3405 	main thread. Interrupts must be enabled.
3406 */
3407 struct user_thread*
3408 team_allocate_user_thread(Team* team)
3409 {
3410 	if (team->user_data == 0)
3411 		return NULL;
3412 
3413 	// take an entry from the free list, if any
3414 	if (struct free_user_thread* entry = team->free_user_threads) {
3415 		user_thread* thread = entry->thread;
3416 		team->free_user_threads = entry->next;
3417 		free(entry);
3418 		return thread;
3419 	}
3420 
3421 	while (true) {
3422 		// enough space left?
3423 		size_t needed = ROUNDUP(sizeof(user_thread), 128);
3424 		if (team->user_data_size - team->used_user_data < needed) {
3425 			// try to resize the area
3426 			if (resize_area(team->user_data_area,
3427 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3428 				return NULL;
3429 			}
3430 
3431 			// resized user area successfully -- try to allocate the user_thread
3432 			// again
3433 			team->user_data_size += B_PAGE_SIZE;
3434 			continue;
3435 		}
3436 
3437 		// allocate the user_thread
3438 		user_thread* thread
3439 			= (user_thread*)(team->user_data + team->used_user_data);
3440 		team->used_user_data += needed;
3441 
3442 		return thread;
3443 	}
3444 }
3445 
3446 
3447 /*!	Frees the given user_thread structure.
3448 	The team's lock must not be held. Interrupts must be enabled.
3449 	\param team The team the user thread was allocated from.
3450 	\param userThread The user thread to free.
3451 */
3452 void
3453 team_free_user_thread(Team* team, struct user_thread* userThread)
3454 {
3455 	if (userThread == NULL)
3456 		return;
3457 
3458 	// create a free list entry
3459 	free_user_thread* entry
3460 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3461 	if (entry == NULL) {
3462 		// we have to leak the user thread :-/
3463 		return;
3464 	}
3465 
3466 	// add to free list
3467 	TeamLocker teamLocker(team);
3468 
3469 	entry->thread = userThread;
3470 	entry->next = team->free_user_threads;
3471 	team->free_user_threads = entry;
3472 }
3473 
3474 
3475 //	#pragma mark - Associated data interface
3476 
3477 
3478 AssociatedData::AssociatedData()
3479 	:
3480 	fOwner(NULL)
3481 {
3482 }
3483 
3484 
3485 AssociatedData::~AssociatedData()
3486 {
3487 }
3488 
3489 
3490 void
3491 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3492 {
3493 }
3494 
3495 
3496 AssociatedDataOwner::AssociatedDataOwner()
3497 {
3498 	mutex_init(&fLock, "associated data owner");
3499 }
3500 
3501 
3502 AssociatedDataOwner::~AssociatedDataOwner()
3503 {
3504 	mutex_destroy(&fLock);
3505 }
3506 
3507 
3508 bool
3509 AssociatedDataOwner::AddData(AssociatedData* data)
3510 {
3511 	MutexLocker locker(fLock);
3512 
3513 	if (data->Owner() != NULL)
3514 		return false;
3515 
3516 	data->AcquireReference();
3517 	fList.Add(data);
3518 	data->SetOwner(this);
3519 
3520 	return true;
3521 }
3522 
3523 
3524 bool
3525 AssociatedDataOwner::RemoveData(AssociatedData* data)
3526 {
3527 	MutexLocker locker(fLock);
3528 
3529 	if (data->Owner() != this)
3530 		return false;
3531 
3532 	data->SetOwner(NULL);
3533 	fList.Remove(data);
3534 
3535 	locker.Unlock();
3536 
3537 	data->ReleaseReference();
3538 
3539 	return true;
3540 }
3541 
3542 
3543 void
3544 AssociatedDataOwner::PrepareForDeletion()
3545 {
3546 	MutexLocker locker(fLock);
3547 
3548 	// move all data to a temporary list and unset the owner
3549 	DataList list;
3550 	list.MoveFrom(&fList);
3551 
3552 	for (DataList::Iterator it = list.GetIterator();
3553 		AssociatedData* data = it.Next();) {
3554 		data->SetOwner(NULL);
3555 	}
3556 
3557 	locker.Unlock();
3558 
3559 	// call the notification hooks and release our references
3560 	while (AssociatedData* data = list.RemoveHead()) {
3561 		data->OwnerDeleted(this);
3562 		data->ReleaseReference();
3563 	}
3564 }
3565 
3566 
3567 /*!	Associates data with the current team.
3568 	When the team is deleted, the data object is notified.
3569 	The team acquires a reference to the object.
3570 
3571 	\param data The data object.
3572 	\return \c true on success, \c false otherwise. Fails only when the supplied
3573 		data object is already associated with another owner.
3574 */
3575 bool
3576 team_associate_data(AssociatedData* data)
3577 {
3578 	return thread_get_current_thread()->team->AddData(data);
3579 }
3580 
3581 
3582 /*!	Dissociates data from the current team.
3583 	Balances an earlier call to team_associate_data().
3584 
3585 	\param data The data object.
3586 	\return \c true on success, \c false otherwise. Fails only when the data
3587 		object is not associated with the current team.
3588 */
3589 bool
3590 team_dissociate_data(AssociatedData* data)
3591 {
3592 	return thread_get_current_thread()->team->RemoveData(data);
3593 }
3594 
3595 
3596 //	#pragma mark - Public kernel API
3597 
3598 
3599 thread_id
3600 load_image(int32 argCount, const char** args, const char** env)
3601 {
3602 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3603 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3604 }
3605 
3606 
3607 thread_id
3608 load_image_etc(int32 argCount, const char* const* args,
3609 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3610 {
3611 	// we need to flatten the args and environment
3612 
3613 	if (args == NULL)
3614 		return B_BAD_VALUE;
3615 
3616 	// determine total needed size
3617 	int32 argSize = 0;
3618 	for (int32 i = 0; i < argCount; i++)
3619 		argSize += strlen(args[i]) + 1;
3620 
3621 	int32 envCount = 0;
3622 	int32 envSize = 0;
3623 	while (env != NULL && env[envCount] != NULL)
3624 		envSize += strlen(env[envCount++]) + 1;
3625 
3626 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3627 	if (size > MAX_PROCESS_ARGS_SIZE)
3628 		return B_TOO_MANY_ARGS;
3629 
3630 	// allocate space
3631 	char** flatArgs = (char**)malloc(size);
3632 	if (flatArgs == NULL)
3633 		return B_NO_MEMORY;
3634 
3635 	char** slot = flatArgs;
3636 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3637 
3638 	// copy arguments and environment
3639 	for (int32 i = 0; i < argCount; i++) {
3640 		int32 argSize = strlen(args[i]) + 1;
3641 		memcpy(stringSpace, args[i], argSize);
3642 		*slot++ = stringSpace;
3643 		stringSpace += argSize;
3644 	}
3645 
3646 	*slot++ = NULL;
3647 
3648 	for (int32 i = 0; i < envCount; i++) {
3649 		int32 envSize = strlen(env[i]) + 1;
3650 		memcpy(stringSpace, env[i], envSize);
3651 		*slot++ = stringSpace;
3652 		stringSpace += envSize;
3653 	}
3654 
3655 	*slot++ = NULL;
3656 
3657 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3658 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3659 
3660 	free(flatArgs);
3661 		// load_image_internal() unset our variable if it took over ownership
3662 
3663 	return thread;
3664 }
3665 
3666 
3667 status_t
3668 wait_for_team(team_id id, status_t* _returnCode)
3669 {
3670 	// check whether the team exists
3671 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3672 
3673 	Team* team = team_get_team_struct_locked(id);
3674 	if (team == NULL)
3675 		return B_BAD_TEAM_ID;
3676 
3677 	id = team->id;
3678 
3679 	teamsLocker.Unlock();
3680 
3681 	// wait for the main thread (it has the same ID as the team)
3682 	return wait_for_thread(id, _returnCode);
3683 }
3684 
3685 
3686 status_t
3687 kill_team(team_id id)
3688 {
3689 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3690 
3691 	Team* team = team_get_team_struct_locked(id);
3692 	if (team == NULL)
3693 		return B_BAD_TEAM_ID;
3694 
3695 	id = team->id;
3696 
3697 	teamsLocker.Unlock();
3698 
3699 	if (team == sKernelTeam)
3700 		return B_NOT_ALLOWED;
3701 
3702 	// Just kill the team's main thread (it has same ID as the team). The
3703 	// cleanup code there will take care of the team.
3704 	return kill_thread(id);
3705 }
3706 
3707 
3708 status_t
3709 _get_team_info(team_id id, team_info* info, size_t size)
3710 {
3711 	// get the team
3712 	Team* team = Team::Get(id);
3713 	if (team == NULL)
3714 		return B_BAD_TEAM_ID;
3715 	BReference<Team> teamReference(team, true);
3716 
3717 	// fill in the info
3718 	return fill_team_info(team, info, size);
3719 }
3720 
3721 
3722 status_t
3723 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3724 {
3725 	int32 slot = *cookie;
3726 	if (slot < 1)
3727 		slot = 1;
3728 
3729 	InterruptsSpinLocker locker(sTeamHashLock);
3730 
3731 	team_id lastTeamID = peek_next_thread_id();
3732 		// TODO: This is broken, since the id can wrap around!
3733 
3734 	// get next valid team
3735 	Team* team = NULL;
3736 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3737 		slot++;
3738 
3739 	if (team == NULL)
3740 		return B_BAD_TEAM_ID;
3741 
3742 	// get a reference to the team and unlock
3743 	BReference<Team> teamReference(team);
3744 	locker.Unlock();
3745 
3746 	// fill in the info
3747 	*cookie = ++slot;
3748 	return fill_team_info(team, info, size);
3749 }
3750 
3751 
3752 status_t
3753 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3754 {
3755 	if (size != sizeof(team_usage_info))
3756 		return B_BAD_VALUE;
3757 
3758 	return common_get_team_usage_info(id, who, info, 0);
3759 }
3760 
3761 
3762 pid_t
3763 getpid(void)
3764 {
3765 	return thread_get_current_thread()->team->id;
3766 }
3767 
3768 
3769 pid_t
3770 getppid(void)
3771 {
3772 	Team* team = thread_get_current_thread()->team;
3773 
3774 	TeamLocker teamLocker(team);
3775 
3776 	return team->parent->id;
3777 }
3778 
3779 
3780 pid_t
3781 getpgid(pid_t id)
3782 {
3783 	if (id < 0) {
3784 		errno = EINVAL;
3785 		return -1;
3786 	}
3787 
3788 	if (id == 0) {
3789 		// get process group of the calling process
3790 		Team* team = thread_get_current_thread()->team;
3791 		TeamLocker teamLocker(team);
3792 		return team->group_id;
3793 	}
3794 
3795 	// get the team
3796 	Team* team = Team::GetAndLock(id);
3797 	if (team == NULL) {
3798 		errno = ESRCH;
3799 		return -1;
3800 	}
3801 
3802 	// get the team's process group ID
3803 	pid_t groupID = team->group_id;
3804 
3805 	team->UnlockAndReleaseReference();
3806 
3807 	return groupID;
3808 }
3809 
3810 
3811 pid_t
3812 getsid(pid_t id)
3813 {
3814 	if (id < 0) {
3815 		errno = EINVAL;
3816 		return -1;
3817 	}
3818 
3819 	if (id == 0) {
3820 		// get session of the calling process
3821 		Team* team = thread_get_current_thread()->team;
3822 		TeamLocker teamLocker(team);
3823 		return team->session_id;
3824 	}
3825 
3826 	// get the team
3827 	Team* team = Team::GetAndLock(id);
3828 	if (team == NULL) {
3829 		errno = ESRCH;
3830 		return -1;
3831 	}
3832 
3833 	// get the team's session ID
3834 	pid_t sessionID = team->session_id;
3835 
3836 	team->UnlockAndReleaseReference();
3837 
3838 	return sessionID;
3839 }
3840 
3841 
3842 //	#pragma mark - User syscalls
3843 
3844 
3845 status_t
3846 _user_exec(const char* userPath, const char* const* userFlatArgs,
3847 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3848 {
3849 	// NOTE: Since this function normally doesn't return, don't use automatic
3850 	// variables that need destruction in the function scope.
3851 	char path[B_PATH_NAME_LENGTH];
3852 
3853 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3854 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3855 		return B_BAD_ADDRESS;
3856 
3857 	// copy and relocate the flat arguments
3858 	char** flatArgs;
3859 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3860 		argCount, envCount, flatArgs);
3861 
3862 	if (error == B_OK) {
3863 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3864 			envCount, umask);
3865 			// this one only returns in case of error
3866 	}
3867 
3868 	free(flatArgs);
3869 	return error;
3870 }
3871 
3872 
3873 thread_id
3874 _user_fork(void)
3875 {
3876 	return fork_team();
3877 }
3878 
3879 
3880 pid_t
3881 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3882 {
3883 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3884 		return B_BAD_ADDRESS;
3885 
3886 	siginfo_t info;
3887 	pid_t foundChild = wait_for_child(child, flags, info);
3888 	if (foundChild < 0)
3889 		return syscall_restart_handle_post(foundChild);
3890 
3891 	// copy info back to userland
3892 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3893 		return B_BAD_ADDRESS;
3894 
3895 	return foundChild;
3896 }
3897 
3898 
3899 pid_t
3900 _user_process_info(pid_t process, int32 which)
3901 {
3902 	// we only allow to return the parent of the current process
3903 	if (which == PARENT_ID
3904 		&& process != 0 && process != thread_get_current_thread()->team->id)
3905 		return B_BAD_VALUE;
3906 
3907 	pid_t result;
3908 	switch (which) {
3909 		case SESSION_ID:
3910 			result = getsid(process);
3911 			break;
3912 		case GROUP_ID:
3913 			result = getpgid(process);
3914 			break;
3915 		case PARENT_ID:
3916 			result = getppid();
3917 			break;
3918 		default:
3919 			return B_BAD_VALUE;
3920 	}
3921 
3922 	return result >= 0 ? result : errno;
3923 }
3924 
3925 
3926 pid_t
3927 _user_setpgid(pid_t processID, pid_t groupID)
3928 {
3929 	// setpgid() can be called either by the parent of the target process or
3930 	// by the process itself to do one of two things:
3931 	// * Create a new process group with the target process' ID and the target
3932 	//   process as group leader.
3933 	// * Set the target process' process group to an already existing one in the
3934 	//   same session.
3935 
3936 	if (groupID < 0)
3937 		return B_BAD_VALUE;
3938 
3939 	Team* currentTeam = thread_get_current_thread()->team;
3940 	if (processID == 0)
3941 		processID = currentTeam->id;
3942 
3943 	// if the group ID is not specified, use the target process' ID
3944 	if (groupID == 0)
3945 		groupID = processID;
3946 
3947 	// We loop when running into the following race condition: We create a new
3948 	// process group, because there isn't one with that ID yet, but later when
3949 	// trying to publish it, we find that someone else created and published
3950 	// a group with that ID in the meantime. In that case we just restart the
3951 	// whole action.
3952 	while (true) {
3953 		// Look up the process group by ID. If it doesn't exist yet and we are
3954 		// allowed to create a new one, do that.
3955 		ProcessGroup* group = ProcessGroup::Get(groupID);
3956 		bool newGroup = false;
3957 		if (group == NULL) {
3958 			if (groupID != processID)
3959 				return B_NOT_ALLOWED;
3960 
3961 			group = new(std::nothrow) ProcessGroup(groupID);
3962 			if (group == NULL)
3963 				return B_NO_MEMORY;
3964 
3965 			newGroup = true;
3966 		}
3967 		BReference<ProcessGroup> groupReference(group, true);
3968 
3969 		// get the target team
3970 		Team* team = Team::Get(processID);
3971 		if (team == NULL)
3972 			return ESRCH;
3973 		BReference<Team> teamReference(team, true);
3974 
3975 		// lock the new process group and the team's current process group
3976 		while (true) {
3977 			// lock the team's current process group
3978 			team->LockProcessGroup();
3979 
3980 			ProcessGroup* oldGroup = team->group;
3981 			if (oldGroup == group) {
3982 				// it's the same as the target group, so just bail out
3983 				oldGroup->Unlock();
3984 				return group->id;
3985 			}
3986 
3987 			oldGroup->AcquireReference();
3988 
3989 			// lock the target process group, if locking order allows it
3990 			if (newGroup || group->id > oldGroup->id) {
3991 				group->Lock();
3992 				break;
3993 			}
3994 
3995 			// try to lock
3996 			if (group->TryLock())
3997 				break;
3998 
3999 			// no dice -- unlock the team's current process group and relock in
4000 			// the correct order
4001 			oldGroup->Unlock();
4002 
4003 			group->Lock();
4004 			oldGroup->Lock();
4005 
4006 			// check whether things are still the same
4007 			TeamLocker teamLocker(team);
4008 			if (team->group == oldGroup)
4009 				break;
4010 
4011 			// something changed -- unlock everything and retry
4012 			teamLocker.Unlock();
4013 			oldGroup->Unlock();
4014 			group->Unlock();
4015 			oldGroup->ReleaseReference();
4016 		}
4017 
4018 		// we now have references and locks of both new and old process group
4019 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4020 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4021 		AutoLocker<ProcessGroup> groupLocker(group, true);
4022 
4023 		// also lock the target team and its parent
4024 		team->LockTeamAndParent(false);
4025 		TeamLocker parentLocker(team->parent, true);
4026 		TeamLocker teamLocker(team, true);
4027 
4028 		// perform the checks
4029 		if (team == currentTeam) {
4030 			// we set our own group
4031 
4032 			// we must not change our process group ID if we're a session leader
4033 			if (is_session_leader(currentTeam))
4034 				return B_NOT_ALLOWED;
4035 		} else {
4036 			// Calling team != target team. The target team must be a child of
4037 			// the calling team and in the same session. (If that's the case it
4038 			// isn't a session leader either.)
4039 			if (team->parent != currentTeam
4040 				|| team->session_id != currentTeam->session_id) {
4041 				return B_NOT_ALLOWED;
4042 			}
4043 
4044 			// The call is also supposed to fail on a child, when the child has
4045 			// already executed exec*() [EACCES].
4046 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4047 				return EACCES;
4048 		}
4049 
4050 		// If we created a new process group, publish it now.
4051 		if (newGroup) {
4052 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4053 			if (sGroupHash.Lookup(groupID)) {
4054 				// A group with the group ID appeared since we first checked.
4055 				// Back to square one.
4056 				continue;
4057 			}
4058 
4059 			group->PublishLocked(team->group->Session());
4060 		} else if (group->Session()->id != team->session_id) {
4061 			// The existing target process group belongs to a different session.
4062 			// That's not allowed.
4063 			return B_NOT_ALLOWED;
4064 		}
4065 
4066 		// Everything is ready -- set the group.
4067 		remove_team_from_group(team);
4068 		insert_team_into_group(group, team);
4069 
4070 		// Changing the process group might have changed the situation for a
4071 		// parent waiting in wait_for_child(). Hence we notify it.
4072 		team->parent->dead_children.condition_variable.NotifyAll(false);
4073 
4074 		return group->id;
4075 	}
4076 }
4077 
4078 
4079 pid_t
4080 _user_setsid(void)
4081 {
4082 	Team* team = thread_get_current_thread()->team;
4083 
4084 	// create a new process group and session
4085 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4086 	if (group == NULL)
4087 		return B_NO_MEMORY;
4088 	BReference<ProcessGroup> groupReference(group, true);
4089 	AutoLocker<ProcessGroup> groupLocker(group);
4090 
4091 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4092 	if (session == NULL)
4093 		return B_NO_MEMORY;
4094 	BReference<ProcessSession> sessionReference(session, true);
4095 
4096 	// lock the team's current process group, parent, and the team itself
4097 	team->LockTeamParentAndProcessGroup();
4098 	BReference<ProcessGroup> oldGroupReference(team->group);
4099 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4100 	TeamLocker parentLocker(team->parent, true);
4101 	TeamLocker teamLocker(team, true);
4102 
4103 	// the team must not already be a process group leader
4104 	if (is_process_group_leader(team))
4105 		return B_NOT_ALLOWED;
4106 
4107 	// remove the team from the old and add it to the new process group
4108 	remove_team_from_group(team);
4109 	group->Publish(session);
4110 	insert_team_into_group(group, team);
4111 
4112 	// Changing the process group might have changed the situation for a
4113 	// parent waiting in wait_for_child(). Hence we notify it.
4114 	team->parent->dead_children.condition_variable.NotifyAll(false);
4115 
4116 	return group->id;
4117 }
4118 
4119 
4120 status_t
4121 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4122 {
4123 	status_t returnCode;
4124 	status_t status;
4125 
4126 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4127 		return B_BAD_ADDRESS;
4128 
4129 	status = wait_for_team(id, &returnCode);
4130 	if (status >= B_OK && _userReturnCode != NULL) {
4131 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4132 				!= B_OK)
4133 			return B_BAD_ADDRESS;
4134 		return B_OK;
4135 	}
4136 
4137 	return syscall_restart_handle_post(status);
4138 }
4139 
4140 
4141 thread_id
4142 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4143 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4144 	port_id errorPort, uint32 errorToken)
4145 {
4146 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4147 
4148 	if (argCount < 1)
4149 		return B_BAD_VALUE;
4150 
4151 	// copy and relocate the flat arguments
4152 	char** flatArgs;
4153 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4154 		argCount, envCount, flatArgs);
4155 	if (error != B_OK)
4156 		return error;
4157 
4158 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4159 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4160 		errorToken);
4161 
4162 	free(flatArgs);
4163 		// load_image_internal() unset our variable if it took over ownership
4164 
4165 	return thread;
4166 }
4167 
4168 
4169 void
4170 _user_exit_team(status_t returnValue)
4171 {
4172 	Thread* thread = thread_get_current_thread();
4173 	Team* team = thread->team;
4174 
4175 	// set this thread's exit status
4176 	thread->exit.status = returnValue;
4177 
4178 	// set the team exit status
4179 	TeamLocker teamLocker(team);
4180 
4181 	if (!team->exit.initialized) {
4182 		team->exit.reason = CLD_EXITED;
4183 		team->exit.signal = 0;
4184 		team->exit.signaling_user = 0;
4185 		team->exit.status = returnValue;
4186 		team->exit.initialized = true;
4187 	}
4188 
4189 	teamLocker.Unlock();
4190 
4191 	// Stop the thread, if the team is being debugged and that has been
4192 	// requested.
4193 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4194 		user_debug_stop_thread();
4195 
4196 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4197 	// userland. The signal handling code forwards the signal to the main
4198 	// thread (if that's not already this one), which will take the team down.
4199 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4200 	send_signal_to_thread(thread, signal, 0);
4201 }
4202 
4203 
4204 status_t
4205 _user_kill_team(team_id team)
4206 {
4207 	return kill_team(team);
4208 }
4209 
4210 
4211 status_t
4212 _user_get_team_info(team_id id, team_info* userInfo)
4213 {
4214 	status_t status;
4215 	team_info info;
4216 
4217 	if (!IS_USER_ADDRESS(userInfo))
4218 		return B_BAD_ADDRESS;
4219 
4220 	status = _get_team_info(id, &info, sizeof(team_info));
4221 	if (status == B_OK) {
4222 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4223 			return B_BAD_ADDRESS;
4224 	}
4225 
4226 	return status;
4227 }
4228 
4229 
4230 status_t
4231 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4232 {
4233 	status_t status;
4234 	team_info info;
4235 	int32 cookie;
4236 
4237 	if (!IS_USER_ADDRESS(userCookie)
4238 		|| !IS_USER_ADDRESS(userInfo)
4239 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4240 		return B_BAD_ADDRESS;
4241 
4242 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4243 	if (status != B_OK)
4244 		return status;
4245 
4246 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4247 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4248 		return B_BAD_ADDRESS;
4249 
4250 	return status;
4251 }
4252 
4253 
4254 team_id
4255 _user_get_current_team(void)
4256 {
4257 	return team_get_current_team_id();
4258 }
4259 
4260 
4261 status_t
4262 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4263 	size_t size)
4264 {
4265 	if (size != sizeof(team_usage_info))
4266 		return B_BAD_VALUE;
4267 
4268 	team_usage_info info;
4269 	status_t status = common_get_team_usage_info(team, who, &info,
4270 		B_CHECK_PERMISSION);
4271 
4272 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4273 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4274 		return B_BAD_ADDRESS;
4275 	}
4276 
4277 	return status;
4278 }
4279 
4280 
4281 status_t
4282 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4283 	size_t size, size_t* _sizeNeeded)
4284 {
4285 	// check parameters
4286 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4287 		|| (buffer == NULL && size > 0)
4288 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4289 		return B_BAD_ADDRESS;
4290 	}
4291 
4292 	KMessage info;
4293 
4294 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4295 		// allocate memory for a copy of the needed team data
4296 		struct ExtendedTeamData {
4297 			team_id	id;
4298 			pid_t	group_id;
4299 			pid_t	session_id;
4300 			uid_t	real_uid;
4301 			gid_t	real_gid;
4302 			uid_t	effective_uid;
4303 			gid_t	effective_gid;
4304 			char	name[B_OS_NAME_LENGTH];
4305 		};
4306 
4307 		ExtendedTeamData* teamClone
4308 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4309 			// It would be nicer to use new, but then we'd have to use
4310 			// ObjectDeleter and declare the structure outside of the function
4311 			// due to template parameter restrictions.
4312 		if (teamClone == NULL)
4313 			return B_NO_MEMORY;
4314 		MemoryDeleter teamCloneDeleter(teamClone);
4315 
4316 		io_context* ioContext;
4317 		{
4318 			// get the team structure
4319 			Team* team = Team::GetAndLock(teamID);
4320 			if (team == NULL)
4321 				return B_BAD_TEAM_ID;
4322 			BReference<Team> teamReference(team, true);
4323 			TeamLocker teamLocker(team, true);
4324 
4325 			// copy the data
4326 			teamClone->id = team->id;
4327 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4328 			teamClone->group_id = team->group_id;
4329 			teamClone->session_id = team->session_id;
4330 			teamClone->real_uid = team->real_uid;
4331 			teamClone->real_gid = team->real_gid;
4332 			teamClone->effective_uid = team->effective_uid;
4333 			teamClone->effective_gid = team->effective_gid;
4334 
4335 			// also fetch a reference to the I/O context
4336 			ioContext = team->io_context;
4337 			vfs_get_io_context(ioContext);
4338 		}
4339 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4340 			&vfs_put_io_context);
4341 
4342 		// add the basic data to the info message
4343 		if (info.AddInt32("id", teamClone->id) != B_OK
4344 			|| info.AddString("name", teamClone->name) != B_OK
4345 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4346 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4347 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4348 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4349 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4350 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4351 			return B_NO_MEMORY;
4352 		}
4353 
4354 		// get the current working directory from the I/O context
4355 		dev_t cwdDevice;
4356 		ino_t cwdDirectory;
4357 		{
4358 			MutexLocker ioContextLocker(ioContext->io_mutex);
4359 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4360 		}
4361 
4362 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4363 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4364 			return B_NO_MEMORY;
4365 		}
4366 	}
4367 
4368 	// TODO: Support the other flags!
4369 
4370 	// copy the needed size and, if it fits, the message back to userland
4371 	size_t sizeNeeded = info.ContentSize();
4372 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4373 		return B_BAD_ADDRESS;
4374 
4375 	if (sizeNeeded > size)
4376 		return B_BUFFER_OVERFLOW;
4377 
4378 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4379 		return B_BAD_ADDRESS;
4380 
4381 	return B_OK;
4382 }
4383