xref: /haiku/src/system/kernel/team.cpp (revision 922e7ba1f3228e6f28db69b0ded8f86eb32dea17)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <boot_device.h>
30 #include <elf.h>
31 #include <file_cache.h>
32 #include <fs/KPath.h>
33 #include <heap.h>
34 #include <int.h>
35 #include <kernel.h>
36 #include <kimage.h>
37 #include <kscheduler.h>
38 #include <ksignal.h>
39 #include <Notifications.h>
40 #include <port.h>
41 #include <posix/realtime_sem.h>
42 #include <posix/xsi_semaphore.h>
43 #include <sem.h>
44 #include <syscall_process_info.h>
45 #include <syscall_restart.h>
46 #include <syscalls.h>
47 #include <tls.h>
48 #include <tracing.h>
49 #include <user_runtime.h>
50 #include <user_thread.h>
51 #include <usergroup.h>
52 #include <vfs.h>
53 #include <vm/vm.h>
54 #include <vm/VMAddressSpace.h>
55 #include <util/AutoLock.h>
56 
57 #include "TeamThreadTables.h"
58 
59 
60 //#define TRACE_TEAM
61 #ifdef TRACE_TEAM
62 #	define TRACE(x) dprintf x
63 #else
64 #	define TRACE(x) ;
65 #endif
66 
67 
68 struct team_key {
69 	team_id id;
70 };
71 
72 struct team_arg {
73 	char	*path;
74 	char	**flat_args;
75 	size_t	flat_args_size;
76 	uint32	arg_count;
77 	uint32	env_count;
78 	mode_t	umask;
79 	port_id	error_port;
80 	uint32	error_token;
81 };
82 
83 
84 namespace {
85 
86 
87 class TeamNotificationService : public DefaultNotificationService {
88 public:
89 							TeamNotificationService();
90 
91 			void			Notify(uint32 eventCode, Team* team);
92 };
93 
94 
95 // #pragma mark - TeamTable
96 
97 
98 typedef BKernel::TeamThreadTable<Team> TeamTable;
99 
100 
101 // #pragma mark - ProcessGroupHashDefinition
102 
103 
104 struct ProcessGroupHashDefinition {
105 	typedef pid_t			KeyType;
106 	typedef	ProcessGroup	ValueType;
107 
108 	size_t HashKey(pid_t key) const
109 	{
110 		return key;
111 	}
112 
113 	size_t Hash(ProcessGroup* value) const
114 	{
115 		return HashKey(value->id);
116 	}
117 
118 	bool Compare(pid_t key, ProcessGroup* value) const
119 	{
120 		return value->id == key;
121 	}
122 
123 	ProcessGroup*& GetLink(ProcessGroup* value) const
124 	{
125 		return value->next;
126 	}
127 };
128 
129 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
130 
131 
132 }	// unnamed namespace
133 
134 
135 // #pragma mark -
136 
137 
138 // the team_id -> Team hash table and the lock protecting it
139 static TeamTable sTeamHash;
140 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
141 
142 // the pid_t -> ProcessGroup hash table and the lock protecting it
143 static ProcessGroupHashTable sGroupHash;
144 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
145 
146 static Team* sKernelTeam = NULL;
147 
148 // A list of process groups of children of dying session leaders that need to
149 // be signalled, if they have become orphaned and contain stopped processes.
150 static ProcessGroupList sOrphanedCheckProcessGroups;
151 static mutex sOrphanedCheckLock
152 	= MUTEX_INITIALIZER("orphaned process group check");
153 
154 // some arbitrarily chosen limits -- should probably depend on the available
155 // memory (the limit is not yet enforced)
156 static int32 sMaxTeams = 2048;
157 static int32 sUsedTeams = 1;
158 
159 static TeamNotificationService sNotificationService;
160 
161 
162 // #pragma mark - TeamListIterator
163 
164 
165 TeamListIterator::TeamListIterator()
166 {
167 	// queue the entry
168 	InterruptsSpinLocker locker(sTeamHashLock);
169 	sTeamHash.InsertIteratorEntry(&fEntry);
170 }
171 
172 
173 TeamListIterator::~TeamListIterator()
174 {
175 	// remove the entry
176 	InterruptsSpinLocker locker(sTeamHashLock);
177 	sTeamHash.RemoveIteratorEntry(&fEntry);
178 }
179 
180 
181 Team*
182 TeamListIterator::Next()
183 {
184 	// get the next team -- if there is one, get reference for it
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	Team* team = sTeamHash.NextElement(&fEntry);
187 	if (team != NULL)
188 		team->AcquireReference();
189 
190 	return team;
191 }
192 
193 
194 // #pragma mark - Tracing
195 
196 
197 #if TEAM_TRACING
198 namespace TeamTracing {
199 
200 class TeamForked : public AbstractTraceEntry {
201 public:
202 	TeamForked(thread_id forkedThread)
203 		:
204 		fForkedThread(forkedThread)
205 	{
206 		Initialized();
207 	}
208 
209 	virtual void AddDump(TraceOutput& out)
210 	{
211 		out.Print("team forked, new thread %ld", fForkedThread);
212 	}
213 
214 private:
215 	thread_id			fForkedThread;
216 };
217 
218 
219 class ExecTeam : public AbstractTraceEntry {
220 public:
221 	ExecTeam(const char* path, int32 argCount, const char* const* args,
222 			int32 envCount, const char* const* env)
223 		:
224 		fArgCount(argCount),
225 		fArgs(NULL)
226 	{
227 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
228 			false);
229 
230 		// determine the buffer size we need for the args
231 		size_t argBufferSize = 0;
232 		for (int32 i = 0; i < argCount; i++)
233 			argBufferSize += strlen(args[i]) + 1;
234 
235 		// allocate a buffer
236 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
237 		if (fArgs) {
238 			char* buffer = fArgs;
239 			for (int32 i = 0; i < argCount; i++) {
240 				size_t argSize = strlen(args[i]) + 1;
241 				memcpy(buffer, args[i], argSize);
242 				buffer += argSize;
243 			}
244 		}
245 
246 		// ignore env for the time being
247 		(void)envCount;
248 		(void)env;
249 
250 		Initialized();
251 	}
252 
253 	virtual void AddDump(TraceOutput& out)
254 	{
255 		out.Print("team exec, \"%p\", args:", fPath);
256 
257 		if (fArgs != NULL) {
258 			char* args = fArgs;
259 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
260 				out.Print(" \"%s\"", args);
261 				args += strlen(args) + 1;
262 			}
263 		} else
264 			out.Print(" <too long>");
265 	}
266 
267 private:
268 	char*	fPath;
269 	int32	fArgCount;
270 	char*	fArgs;
271 };
272 
273 
274 static const char*
275 job_control_state_name(job_control_state state)
276 {
277 	switch (state) {
278 		case JOB_CONTROL_STATE_NONE:
279 			return "none";
280 		case JOB_CONTROL_STATE_STOPPED:
281 			return "stopped";
282 		case JOB_CONTROL_STATE_CONTINUED:
283 			return "continued";
284 		case JOB_CONTROL_STATE_DEAD:
285 			return "dead";
286 		default:
287 			return "invalid";
288 	}
289 }
290 
291 
292 class SetJobControlState : public AbstractTraceEntry {
293 public:
294 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
295 		:
296 		fTeam(team),
297 		fNewState(newState),
298 		fSignal(signal != NULL ? signal->Number() : 0)
299 	{
300 		Initialized();
301 	}
302 
303 	virtual void AddDump(TraceOutput& out)
304 	{
305 		out.Print("team set job control state, team %ld, "
306 			"new state: %s, signal: %d",
307 			fTeam, job_control_state_name(fNewState), fSignal);
308 	}
309 
310 private:
311 	team_id				fTeam;
312 	job_control_state	fNewState;
313 	int					fSignal;
314 };
315 
316 
317 class WaitForChild : public AbstractTraceEntry {
318 public:
319 	WaitForChild(pid_t child, uint32 flags)
320 		:
321 		fChild(child),
322 		fFlags(flags)
323 	{
324 		Initialized();
325 	}
326 
327 	virtual void AddDump(TraceOutput& out)
328 	{
329 		out.Print("team wait for child, child: %ld, "
330 			"flags: 0x%lx", fChild, fFlags);
331 	}
332 
333 private:
334 	pid_t	fChild;
335 	uint32	fFlags;
336 };
337 
338 
339 class WaitForChildDone : public AbstractTraceEntry {
340 public:
341 	WaitForChildDone(const job_control_entry& entry)
342 		:
343 		fState(entry.state),
344 		fTeam(entry.thread),
345 		fStatus(entry.status),
346 		fReason(entry.reason),
347 		fSignal(entry.signal)
348 	{
349 		Initialized();
350 	}
351 
352 	WaitForChildDone(status_t error)
353 		:
354 		fTeam(error)
355 	{
356 		Initialized();
357 	}
358 
359 	virtual void AddDump(TraceOutput& out)
360 	{
361 		if (fTeam >= 0) {
362 			out.Print("team wait for child done, team: %ld, "
363 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
364 				fTeam, job_control_state_name(fState), fStatus, fReason,
365 				fSignal);
366 		} else {
367 			out.Print("team wait for child failed, error: "
368 				"0x%lx, ", fTeam);
369 		}
370 	}
371 
372 private:
373 	job_control_state	fState;
374 	team_id				fTeam;
375 	status_t			fStatus;
376 	uint16				fReason;
377 	uint16				fSignal;
378 };
379 
380 }	// namespace TeamTracing
381 
382 #	define T(x) new(std::nothrow) TeamTracing::x;
383 #else
384 #	define T(x) ;
385 #endif
386 
387 
388 //	#pragma mark - TeamNotificationService
389 
390 
391 TeamNotificationService::TeamNotificationService()
392 	: DefaultNotificationService("teams")
393 {
394 }
395 
396 
397 void
398 TeamNotificationService::Notify(uint32 eventCode, Team* team)
399 {
400 	char eventBuffer[128];
401 	KMessage event;
402 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
403 	event.AddInt32("event", eventCode);
404 	event.AddInt32("team", team->id);
405 	event.AddPointer("teamStruct", team);
406 
407 	DefaultNotificationService::Notify(event, eventCode);
408 }
409 
410 
411 //	#pragma mark - Team
412 
413 
414 Team::Team(team_id id, bool kernel)
415 {
416 	// allocate an ID
417 	this->id = id;
418 	visible = true;
419 	serial_number = -1;
420 
421 	// init mutex
422 	if (kernel) {
423 		mutex_init(&fLock, "Team:kernel");
424 	} else {
425 		char lockName[16];
426 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
427 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
428 	}
429 
430 	hash_next = siblings_next = children = parent = NULL;
431 	fName[0] = '\0';
432 	fArgs[0] = '\0';
433 	num_threads = 0;
434 	io_context = NULL;
435 	address_space = NULL;
436 	realtime_sem_context = NULL;
437 	xsi_sem_context = NULL;
438 	thread_list = NULL;
439 	main_thread = NULL;
440 	loading_info = NULL;
441 	state = TEAM_STATE_BIRTH;
442 	flags = 0;
443 	death_entry = NULL;
444 	user_data_area = -1;
445 	user_data = 0;
446 	used_user_data = 0;
447 	user_data_size = 0;
448 	free_user_threads = NULL;
449 
450 	supplementary_groups = NULL;
451 	supplementary_group_count = 0;
452 
453 	dead_threads_kernel_time = 0;
454 	dead_threads_user_time = 0;
455 	cpu_clock_offset = 0;
456 
457 	// dead threads
458 	list_init(&dead_threads);
459 	dead_threads_count = 0;
460 
461 	// dead children
462 	dead_children.count = 0;
463 	dead_children.kernel_time = 0;
464 	dead_children.user_time = 0;
465 
466 	// job control entry
467 	job_control_entry = new(nothrow) ::job_control_entry;
468 	if (job_control_entry != NULL) {
469 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
470 		job_control_entry->thread = id;
471 		job_control_entry->team = this;
472 	}
473 
474 	// exit status -- setting initialized to false suffices
475 	exit.initialized = false;
476 
477 	list_init(&sem_list);
478 	list_init(&port_list);
479 	list_init(&image_list);
480 	list_init(&watcher_list);
481 
482 	clear_team_debug_info(&debug_info, true);
483 
484 	// init dead/stopped/continued children condition vars
485 	dead_children.condition_variable.Init(&dead_children, "team children");
486 
487 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
488 		kernel ? -1 : MAX_QUEUED_SIGNALS);
489 	memset(fSignalActions, 0, sizeof(fSignalActions));
490 
491 	fUserDefinedTimerCount = 0;
492 }
493 
494 
495 Team::~Team()
496 {
497 	// get rid of all associated data
498 	PrepareForDeletion();
499 
500 	vfs_put_io_context(io_context);
501 	delete_owned_ports(this);
502 	sem_delete_owned_sems(this);
503 
504 	DeleteUserTimers(false);
505 
506 	fPendingSignals.Clear();
507 
508 	if (fQueuedSignalsCounter != NULL)
509 		fQueuedSignalsCounter->ReleaseReference();
510 
511 	while (thread_death_entry* threadDeathEntry
512 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
513 		free(threadDeathEntry);
514 	}
515 
516 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
517 		delete entry;
518 
519 	while (free_user_thread* entry = free_user_threads) {
520 		free_user_threads = entry->next;
521 		free(entry);
522 	}
523 
524 	malloc_referenced_release(supplementary_groups);
525 
526 	delete job_control_entry;
527 		// usually already NULL and transferred to the parent
528 
529 	mutex_destroy(&fLock);
530 }
531 
532 
533 /*static*/ Team*
534 Team::Create(team_id id, const char* name, bool kernel)
535 {
536 	// create the team object
537 	Team* team = new(std::nothrow) Team(id, kernel);
538 	if (team == NULL)
539 		return NULL;
540 	ObjectDeleter<Team> teamDeleter(team);
541 
542 	if (name != NULL)
543 		team->SetName(name);
544 
545 	// check initialization
546 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
547 		return NULL;
548 
549 	// finish initialization (arch specifics)
550 	if (arch_team_init_team_struct(team, kernel) != B_OK)
551 		return NULL;
552 
553 	if (!kernel) {
554 		status_t error = user_timer_create_team_timers(team);
555 		if (error != B_OK)
556 			return NULL;
557 	}
558 
559 	// everything went fine
560 	return teamDeleter.Detach();
561 }
562 
563 
564 /*!	\brief Returns the team with the given ID.
565 	Returns a reference to the team.
566 	Team and thread spinlock must not be held.
567 */
568 /*static*/ Team*
569 Team::Get(team_id id)
570 {
571 	if (id == B_CURRENT_TEAM) {
572 		Team* team = thread_get_current_thread()->team;
573 		team->AcquireReference();
574 		return team;
575 	}
576 
577 	InterruptsSpinLocker locker(sTeamHashLock);
578 	Team* team = sTeamHash.Lookup(id);
579 	if (team != NULL)
580 		team->AcquireReference();
581 	return team;
582 }
583 
584 
585 /*!	\brief Returns the team with the given ID in a locked state.
586 	Returns a reference to the team.
587 	Team and thread spinlock must not be held.
588 */
589 /*static*/ Team*
590 Team::GetAndLock(team_id id)
591 {
592 	// get the team
593 	Team* team = Get(id);
594 	if (team == NULL)
595 		return NULL;
596 
597 	// lock it
598 	team->Lock();
599 
600 	// only return the team, when it isn't already dying
601 	if (team->state >= TEAM_STATE_SHUTDOWN) {
602 		team->Unlock();
603 		team->ReleaseReference();
604 		return NULL;
605 	}
606 
607 	return team;
608 }
609 
610 
611 /*!	Locks the team and its parent team (if any).
612 	The caller must hold a reference to the team or otherwise make sure that
613 	it won't be deleted.
614 	If the team doesn't have a parent, only the team itself is locked. If the
615 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
616 	only the team itself is locked.
617 
618 	\param dontLockParentIfKernel If \c true, the team's parent team is only
619 		locked, if it is not the kernel team.
620 */
621 void
622 Team::LockTeamAndParent(bool dontLockParentIfKernel)
623 {
624 	// The locking order is parent -> child. Since the parent can change as long
625 	// as we don't lock the team, we need to do a trial and error loop.
626 	Lock();
627 
628 	while (true) {
629 		// If the team doesn't have a parent, we're done. Otherwise try to lock
630 		// the parent.This will succeed in most cases, simplifying things.
631 		Team* parent = this->parent;
632 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
633 			|| parent->TryLock()) {
634 			return;
635 		}
636 
637 		// get a temporary reference to the parent, unlock this team, lock the
638 		// parent, and re-lock this team
639 		BReference<Team> parentReference(parent);
640 
641 		Unlock();
642 		parent->Lock();
643 		Lock();
644 
645 		// If the parent hasn't changed in the meantime, we're done.
646 		if (this->parent == parent)
647 			return;
648 
649 		// The parent has changed -- unlock and retry.
650 		parent->Unlock();
651 	}
652 }
653 
654 
655 /*!	Unlocks the team and its parent team (if any).
656 */
657 void
658 Team::UnlockTeamAndParent()
659 {
660 	if (parent != NULL)
661 		parent->Unlock();
662 
663 	Unlock();
664 }
665 
666 
667 /*!	Locks the team, its parent team (if any), and the team's process group.
668 	The caller must hold a reference to the team or otherwise make sure that
669 	it won't be deleted.
670 	If the team doesn't have a parent, only the team itself is locked.
671 */
672 void
673 Team::LockTeamParentAndProcessGroup()
674 {
675 	LockTeamAndProcessGroup();
676 
677 	// We hold the group's and the team's lock, but not the parent team's lock.
678 	// If we have a parent, try to lock it.
679 	if (this->parent == NULL || this->parent->TryLock())
680 		return;
681 
682 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
683 	// the job.
684 	Unlock();
685 	LockTeamAndParent(false);
686 }
687 
688 
689 /*!	Unlocks the team, its parent team (if any), and the team's process group.
690 */
691 void
692 Team::UnlockTeamParentAndProcessGroup()
693 {
694 	group->Unlock();
695 
696 	if (parent != NULL)
697 		parent->Unlock();
698 
699 	Unlock();
700 }
701 
702 
703 void
704 Team::LockTeamAndProcessGroup()
705 {
706 	// The locking order is process group -> child. Since the process group can
707 	// change as long as we don't lock the team, we need to do a trial and error
708 	// loop.
709 	Lock();
710 
711 	while (true) {
712 		// Try to lock the group. This will succeed in most cases, simplifying
713 		// things.
714 		ProcessGroup* group = this->group;
715 		if (group->TryLock())
716 			return;
717 
718 		// get a temporary reference to the group, unlock this team, lock the
719 		// group, and re-lock this team
720 		BReference<ProcessGroup> groupReference(group);
721 
722 		Unlock();
723 		group->Lock();
724 		Lock();
725 
726 		// If the group hasn't changed in the meantime, we're done.
727 		if (this->group == group)
728 			return;
729 
730 		// The group has changed -- unlock and retry.
731 		group->Unlock();
732 	}
733 }
734 
735 
736 void
737 Team::UnlockTeamAndProcessGroup()
738 {
739 	group->Unlock();
740 	Unlock();
741 }
742 
743 
744 void
745 Team::SetName(const char* name)
746 {
747 	if (const char* lastSlash = strrchr(name, '/'))
748 		name = lastSlash + 1;
749 
750 	strlcpy(fName, name, B_OS_NAME_LENGTH);
751 }
752 
753 
754 void
755 Team::SetArgs(const char* args)
756 {
757 	strlcpy(fArgs, args, sizeof(fArgs));
758 }
759 
760 
761 void
762 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
763 {
764 	fArgs[0] = '\0';
765 	strlcpy(fArgs, path, sizeof(fArgs));
766 	for (int i = 0; i < otherArgCount; i++) {
767 		strlcat(fArgs, " ", sizeof(fArgs));
768 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
769 	}
770 }
771 
772 
773 void
774 Team::ResetSignalsOnExec()
775 {
776 	// We are supposed to keep pending signals. Signal actions shall be reset
777 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
778 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
779 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
780 	// flags, but since there aren't any handlers, they make little sense, so
781 	// we clear them.
782 
783 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
784 		struct sigaction& action = SignalActionFor(i);
785 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
786 			action.sa_handler = SIG_DFL;
787 
788 		action.sa_mask = 0;
789 		action.sa_flags = 0;
790 		action.sa_userdata = NULL;
791 	}
792 }
793 
794 
795 void
796 Team::InheritSignalActions(Team* parent)
797 {
798 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
799 }
800 
801 
802 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
803 	ID.
804 
805 	The caller must hold the team's lock.
806 
807 	\param timer The timer to be added. If it doesn't have an ID yet, it is
808 		considered user-defined and will be assigned an ID.
809 	\return \c B_OK, if the timer was added successfully, another error code
810 		otherwise.
811 */
812 status_t
813 Team::AddUserTimer(UserTimer* timer)
814 {
815 	// don't allow addition of timers when already shutting the team down
816 	if (state >= TEAM_STATE_SHUTDOWN)
817 		return B_BAD_TEAM_ID;
818 
819 	// If the timer is user-defined, check timer limit and increment
820 	// user-defined count.
821 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
822 		return EAGAIN;
823 
824 	fUserTimers.AddTimer(timer);
825 
826 	return B_OK;
827 }
828 
829 
830 /*!	Removes the given user timer from the team.
831 
832 	The caller must hold the team's lock.
833 
834 	\param timer The timer to be removed.
835 
836 */
837 void
838 Team::RemoveUserTimer(UserTimer* timer)
839 {
840 	fUserTimers.RemoveTimer(timer);
841 
842 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
843 		UserDefinedTimersRemoved(1);
844 }
845 
846 
847 /*!	Deletes all (or all user-defined) user timers of the team.
848 
849 	Timer's belonging to the team's threads are not affected.
850 	The caller must hold the team's lock.
851 
852 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
853 		otherwise all timers are deleted.
854 */
855 void
856 Team::DeleteUserTimers(bool userDefinedOnly)
857 {
858 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
859 	UserDefinedTimersRemoved(count);
860 }
861 
862 
863 /*!	If not at the limit yet, increments the team's user-defined timer count.
864 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
865 */
866 bool
867 Team::CheckAddUserDefinedTimer()
868 {
869 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
870 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
871 		atomic_add(&fUserDefinedTimerCount, -1);
872 		return false;
873 	}
874 
875 	return true;
876 }
877 
878 
879 /*!	Subtracts the given count for the team's user-defined timer count.
880 	\param count The count to subtract.
881 */
882 void
883 Team::UserDefinedTimersRemoved(int32 count)
884 {
885 	atomic_add(&fUserDefinedTimerCount, -count);
886 }
887 
888 
889 void
890 Team::DeactivateCPUTimeUserTimers()
891 {
892 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
893 		timer->Deactivate();
894 
895 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
896 		timer->Deactivate();
897 }
898 
899 
900 /*!	Returns the team's current total CPU time (kernel + user + offset).
901 
902 	The caller must hold the scheduler lock.
903 
904 	\param ignoreCurrentRun If \c true and the current thread is one team's
905 		threads, don't add the time since the last time \c last_time was
906 		updated. Should be used in "thread unscheduled" scheduler callbacks,
907 		since although the thread is still running at that time, its time has
908 		already been stopped.
909 	\return The team's current total CPU time.
910 */
911 bigtime_t
912 Team::CPUTime(bool ignoreCurrentRun) const
913 {
914 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
915 		+ dead_threads_user_time;
916 
917 	Thread* currentThread = thread_get_current_thread();
918 	bigtime_t now = system_time();
919 
920 	for (Thread* thread = thread_list; thread != NULL;
921 			thread = thread->team_next) {
922 		SpinLocker threadTimeLocker(thread->time_lock);
923 		time += thread->kernel_time + thread->user_time;
924 
925 		if (thread->IsRunning()) {
926 			if (!ignoreCurrentRun || thread != currentThread)
927 				time += now - thread->last_time;
928 		}
929 	}
930 
931 	return time;
932 }
933 
934 
935 /*!	Returns the team's current user CPU time.
936 
937 	The caller must hold the scheduler lock.
938 
939 	\return The team's current user CPU time.
940 */
941 bigtime_t
942 Team::UserCPUTime() const
943 {
944 	bigtime_t time = dead_threads_user_time;
945 
946 	bigtime_t now = system_time();
947 
948 	for (Thread* thread = thread_list; thread != NULL;
949 			thread = thread->team_next) {
950 		SpinLocker threadTimeLocker(thread->time_lock);
951 		time += thread->user_time;
952 
953 		if (thread->IsRunning() && !thread->in_kernel)
954 			time += now - thread->last_time;
955 	}
956 
957 	return time;
958 }
959 
960 
961 //	#pragma mark - ProcessGroup
962 
963 
964 ProcessGroup::ProcessGroup(pid_t id)
965 	:
966 	id(id),
967 	teams(NULL),
968 	fSession(NULL),
969 	fInOrphanedCheckList(false)
970 {
971 	char lockName[32];
972 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
973 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
974 }
975 
976 
977 ProcessGroup::~ProcessGroup()
978 {
979 	TRACE(("ProcessGroup::~ProcessGroup(): id = %ld\n", group->id));
980 
981 	// If the group is in the orphaned check list, remove it.
982 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
983 
984 	if (fInOrphanedCheckList)
985 		sOrphanedCheckProcessGroups.Remove(this);
986 
987 	orphanedCheckLocker.Unlock();
988 
989 	// remove group from the hash table and from the session
990 	if (fSession != NULL) {
991 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
992 		sGroupHash.RemoveUnchecked(this);
993 		groupHashLocker.Unlock();
994 
995 		fSession->ReleaseReference();
996 	}
997 
998 	mutex_destroy(&fLock);
999 }
1000 
1001 
1002 /*static*/ ProcessGroup*
1003 ProcessGroup::Get(pid_t id)
1004 {
1005 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1006 	ProcessGroup* group = sGroupHash.Lookup(id);
1007 	if (group != NULL)
1008 		group->AcquireReference();
1009 	return group;
1010 }
1011 
1012 
1013 /*!	Adds the group the given session and makes it publicly accessible.
1014 	The caller must not hold the process group hash lock.
1015 */
1016 void
1017 ProcessGroup::Publish(ProcessSession* session)
1018 {
1019 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1020 	PublishLocked(session);
1021 }
1022 
1023 
1024 /*!	Adds the group to the given session and makes it publicly accessible.
1025 	The caller must hold the process group hash lock.
1026 */
1027 void
1028 ProcessGroup::PublishLocked(ProcessSession* session)
1029 {
1030 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1031 
1032 	fSession = session;
1033 	fSession->AcquireReference();
1034 
1035 	sGroupHash.InsertUnchecked(this);
1036 }
1037 
1038 
1039 /*!	Checks whether the process group is orphaned.
1040 	The caller must hold the group's lock.
1041 	\return \c true, if the group is orphaned, \c false otherwise.
1042 */
1043 bool
1044 ProcessGroup::IsOrphaned() const
1045 {
1046 	// Orphaned Process Group: "A process group in which the parent of every
1047 	// member is either itself a member of the group or is not a member of the
1048 	// group's session." (Open Group Base Specs Issue 7)
1049 	bool orphaned = true;
1050 
1051 	Team* team = teams;
1052 	while (orphaned && team != NULL) {
1053 		team->LockTeamAndParent(false);
1054 
1055 		Team* parent = team->parent;
1056 		if (parent != NULL && parent->group_id != id
1057 			&& parent->session_id == fSession->id) {
1058 			orphaned = false;
1059 		}
1060 
1061 		team->UnlockTeamAndParent();
1062 
1063 		team = team->group_next;
1064 	}
1065 
1066 	return orphaned;
1067 }
1068 
1069 
1070 void
1071 ProcessGroup::ScheduleOrphanedCheck()
1072 {
1073 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1074 
1075 	if (!fInOrphanedCheckList) {
1076 		sOrphanedCheckProcessGroups.Add(this);
1077 		fInOrphanedCheckList = true;
1078 	}
1079 }
1080 
1081 
1082 void
1083 ProcessGroup::UnsetOrphanedCheck()
1084 {
1085 	fInOrphanedCheckList = false;
1086 }
1087 
1088 
1089 //	#pragma mark - ProcessSession
1090 
1091 
1092 ProcessSession::ProcessSession(pid_t id)
1093 	:
1094 	id(id),
1095 	controlling_tty(-1),
1096 	foreground_group(-1)
1097 {
1098 	char lockName[32];
1099 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1100 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1101 }
1102 
1103 
1104 ProcessSession::~ProcessSession()
1105 {
1106 	mutex_destroy(&fLock);
1107 }
1108 
1109 
1110 //	#pragma mark - KDL functions
1111 
1112 
1113 static void
1114 _dump_team_info(Team* team)
1115 {
1116 	kprintf("TEAM: %p\n", team);
1117 	kprintf("id:               %ld (%#lx)\n", team->id, team->id);
1118 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1119 	kprintf("name:             '%s'\n", team->Name());
1120 	kprintf("args:             '%s'\n", team->Args());
1121 	kprintf("hash_next:        %p\n", team->hash_next);
1122 	kprintf("parent:           %p", team->parent);
1123 	if (team->parent != NULL) {
1124 		kprintf(" (id = %ld)\n", team->parent->id);
1125 	} else
1126 		kprintf("\n");
1127 
1128 	kprintf("children:         %p\n", team->children);
1129 	kprintf("num_threads:      %d\n", team->num_threads);
1130 	kprintf("state:            %d\n", team->state);
1131 	kprintf("flags:            0x%lx\n", team->flags);
1132 	kprintf("io_context:       %p\n", team->io_context);
1133 	if (team->address_space)
1134 		kprintf("address_space:    %p\n", team->address_space);
1135 	kprintf("user data:        %p (area %ld)\n", (void*)team->user_data,
1136 		team->user_data_area);
1137 	kprintf("free user thread: %p\n", team->free_user_threads);
1138 	kprintf("main_thread:      %p\n", team->main_thread);
1139 	kprintf("thread_list:      %p\n", team->thread_list);
1140 	kprintf("group_id:         %ld\n", team->group_id);
1141 	kprintf("session_id:       %ld\n", team->session_id);
1142 }
1143 
1144 
1145 static int
1146 dump_team_info(int argc, char** argv)
1147 {
1148 	team_id id = -1;
1149 	bool found = false;
1150 
1151 	if (argc < 2) {
1152 		Thread* thread = thread_get_current_thread();
1153 		if (thread != NULL && thread->team != NULL)
1154 			_dump_team_info(thread->team);
1155 		else
1156 			kprintf("No current team!\n");
1157 		return 0;
1158 	}
1159 
1160 	id = strtoul(argv[1], NULL, 0);
1161 	if (IS_KERNEL_ADDRESS(id)) {
1162 		// semi-hack
1163 		_dump_team_info((Team*)id);
1164 		return 0;
1165 	}
1166 
1167 	// walk through the thread list, trying to match name or id
1168 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1169 		Team* team = it.Next();) {
1170 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1171 			|| team->id == id) {
1172 			_dump_team_info(team);
1173 			found = true;
1174 			break;
1175 		}
1176 	}
1177 
1178 	if (!found)
1179 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
1180 	return 0;
1181 }
1182 
1183 
1184 static int
1185 dump_teams(int argc, char** argv)
1186 {
1187 	kprintf("team           id  parent      name\n");
1188 
1189 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1190 		Team* team = it.Next();) {
1191 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->Name());
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 
1198 //	#pragma mark - Private functions
1199 
1200 
1201 /*!	Inserts team \a team into the child list of team \a parent.
1202 
1203 	The caller must hold the lock of both \a parent and \a team.
1204 
1205 	\param parent The parent team.
1206 	\param team The team to be inserted into \a parent's child list.
1207 */
1208 static void
1209 insert_team_into_parent(Team* parent, Team* team)
1210 {
1211 	ASSERT(parent != NULL);
1212 
1213 	team->siblings_next = parent->children;
1214 	parent->children = team;
1215 	team->parent = parent;
1216 }
1217 
1218 
1219 /*!	Removes team \a team from the child list of team \a parent.
1220 
1221 	The caller must hold the lock of both \a parent and \a team.
1222 
1223 	\param parent The parent team.
1224 	\param team The team to be removed from \a parent's child list.
1225 */
1226 static void
1227 remove_team_from_parent(Team* parent, Team* team)
1228 {
1229 	Team* child;
1230 	Team* last = NULL;
1231 
1232 	for (child = parent->children; child != NULL;
1233 			child = child->siblings_next) {
1234 		if (child == team) {
1235 			if (last == NULL)
1236 				parent->children = child->siblings_next;
1237 			else
1238 				last->siblings_next = child->siblings_next;
1239 
1240 			team->parent = NULL;
1241 			break;
1242 		}
1243 		last = child;
1244 	}
1245 }
1246 
1247 
1248 /*!	Returns whether the given team is a session leader.
1249 	The caller must hold the team's lock or its process group's lock.
1250 */
1251 static bool
1252 is_session_leader(Team* team)
1253 {
1254 	return team->session_id == team->id;
1255 }
1256 
1257 
1258 /*!	Returns whether the given team is a process group leader.
1259 	The caller must hold the team's lock or its process group's lock.
1260 */
1261 static bool
1262 is_process_group_leader(Team* team)
1263 {
1264 	return team->group_id == team->id;
1265 }
1266 
1267 
1268 /*!	Inserts the given team into the given process group.
1269 	The caller must hold the process group's lock, the team's lock, and the
1270 	team's parent's lock.
1271 */
1272 static void
1273 insert_team_into_group(ProcessGroup* group, Team* team)
1274 {
1275 	team->group = group;
1276 	team->group_id = group->id;
1277 	team->session_id = group->Session()->id;
1278 
1279 	team->group_next = group->teams;
1280 	group->teams = team;
1281 	group->AcquireReference();
1282 }
1283 
1284 
1285 /*!	Removes the given team from its process group.
1286 
1287 	The caller must hold the process group's lock, the team's lock, and the
1288 	team's parent's lock. Interrupts must be enabled.
1289 
1290 	\param team The team that'll be removed from its process group.
1291 */
1292 static void
1293 remove_team_from_group(Team* team)
1294 {
1295 	ProcessGroup* group = team->group;
1296 	Team* current;
1297 	Team* last = NULL;
1298 
1299 	// the team must be in a process group to let this function have any effect
1300 	if  (group == NULL)
1301 		return;
1302 
1303 	for (current = group->teams; current != NULL;
1304 			current = current->group_next) {
1305 		if (current == team) {
1306 			if (last == NULL)
1307 				group->teams = current->group_next;
1308 			else
1309 				last->group_next = current->group_next;
1310 
1311 			team->group = NULL;
1312 			break;
1313 		}
1314 		last = current;
1315 	}
1316 
1317 	team->group = NULL;
1318 	team->group_next = NULL;
1319 
1320 	group->ReleaseReference();
1321 }
1322 
1323 
1324 static status_t
1325 create_team_user_data(Team* team)
1326 {
1327 	void* address;
1328 	size_t size = 4 * B_PAGE_SIZE;
1329 	virtual_address_restrictions virtualRestrictions = {};
1330 	virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1331 	virtualRestrictions.address_specification = B_BASE_ADDRESS;
1332 	physical_address_restrictions physicalRestrictions = {};
1333 	team->user_data_area = create_area_etc(team->id, "user area", size,
1334 		B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
1335 		&physicalRestrictions, &address);
1336 	if (team->user_data_area < 0)
1337 		return team->user_data_area;
1338 
1339 	team->user_data = (addr_t)address;
1340 	team->used_user_data = 0;
1341 	team->user_data_size = size;
1342 	team->free_user_threads = NULL;
1343 
1344 	return B_OK;
1345 }
1346 
1347 
1348 static void
1349 delete_team_user_data(Team* team)
1350 {
1351 	if (team->user_data_area >= 0) {
1352 		vm_delete_area(team->id, team->user_data_area, true);
1353 		team->user_data = 0;
1354 		team->used_user_data = 0;
1355 		team->user_data_size = 0;
1356 		team->user_data_area = -1;
1357 		while (free_user_thread* entry = team->free_user_threads) {
1358 			team->free_user_threads = entry->next;
1359 			free(entry);
1360 		}
1361 	}
1362 }
1363 
1364 
1365 static status_t
1366 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1367 	int32 argCount, int32 envCount, char**& _flatArgs)
1368 {
1369 	if (argCount < 0 || envCount < 0)
1370 		return B_BAD_VALUE;
1371 
1372 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1373 		return B_TOO_MANY_ARGS;
1374 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1375 		return B_BAD_VALUE;
1376 
1377 	if (!IS_USER_ADDRESS(userFlatArgs))
1378 		return B_BAD_ADDRESS;
1379 
1380 	// allocate kernel memory
1381 	char** flatArgs = (char**)malloc(flatArgsSize);
1382 	if (flatArgs == NULL)
1383 		return B_NO_MEMORY;
1384 
1385 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1386 		free(flatArgs);
1387 		return B_BAD_ADDRESS;
1388 	}
1389 
1390 	// check and relocate the array
1391 	status_t error = B_OK;
1392 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1393 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1394 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1395 		if (i == argCount || i == argCount + envCount + 1) {
1396 			// check array null termination
1397 			if (flatArgs[i] != NULL) {
1398 				error = B_BAD_VALUE;
1399 				break;
1400 			}
1401 		} else {
1402 			// check string
1403 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1404 			size_t maxLen = stringEnd - arg;
1405 			if (arg < stringBase || arg >= stringEnd
1406 					|| strnlen(arg, maxLen) == maxLen) {
1407 				error = B_BAD_VALUE;
1408 				break;
1409 			}
1410 
1411 			flatArgs[i] = arg;
1412 		}
1413 	}
1414 
1415 	if (error == B_OK)
1416 		_flatArgs = flatArgs;
1417 	else
1418 		free(flatArgs);
1419 
1420 	return error;
1421 }
1422 
1423 
1424 static void
1425 free_team_arg(struct team_arg* teamArg)
1426 {
1427 	if (teamArg != NULL) {
1428 		free(teamArg->flat_args);
1429 		free(teamArg->path);
1430 		free(teamArg);
1431 	}
1432 }
1433 
1434 
1435 static status_t
1436 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1437 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1438 	port_id port, uint32 token)
1439 {
1440 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1441 	if (teamArg == NULL)
1442 		return B_NO_MEMORY;
1443 
1444 	teamArg->path = strdup(path);
1445 	if (teamArg->path == NULL) {
1446 		free(teamArg);
1447 		return B_NO_MEMORY;
1448 	}
1449 
1450 	// copy the args over
1451 
1452 	teamArg->flat_args = flatArgs;
1453 	teamArg->flat_args_size = flatArgsSize;
1454 	teamArg->arg_count = argCount;
1455 	teamArg->env_count = envCount;
1456 	teamArg->umask = umask;
1457 	teamArg->error_port = port;
1458 	teamArg->error_token = token;
1459 
1460 	*_teamArg = teamArg;
1461 	return B_OK;
1462 }
1463 
1464 
1465 static status_t
1466 team_create_thread_start_internal(void* args)
1467 {
1468 	status_t err;
1469 	Thread* thread;
1470 	Team* team;
1471 	struct team_arg* teamArgs = (struct team_arg*)args;
1472 	const char* path;
1473 	addr_t entry;
1474 	char** userArgs;
1475 	char** userEnv;
1476 	struct user_space_program_args* programArgs;
1477 	uint32 argCount, envCount;
1478 
1479 	thread = thread_get_current_thread();
1480 	team = thread->team;
1481 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1482 
1483 	TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
1484 
1485 	// Main stack area layout is currently as follows (starting from 0):
1486 	//
1487 	// size								| usage
1488 	// ---------------------------------+--------------------------------
1489 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1490 	// TLS_SIZE							| TLS data
1491 	// sizeof(user_space_program_args)	| argument structure for the runtime
1492 	//									| loader
1493 	// flat arguments size				| flat process arguments and environment
1494 
1495 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1496 	// the heap
1497 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1498 
1499 	argCount = teamArgs->arg_count;
1500 	envCount = teamArgs->env_count;
1501 
1502 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1503 		+ thread->user_stack_size + TLS_SIZE);
1504 
1505 	userArgs = (char**)(programArgs + 1);
1506 	userEnv = userArgs + argCount + 1;
1507 	path = teamArgs->path;
1508 
1509 	if (user_strlcpy(programArgs->program_path, path,
1510 				sizeof(programArgs->program_path)) < B_OK
1511 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1512 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1513 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1514 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1515 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1516 				sizeof(port_id)) < B_OK
1517 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1518 				sizeof(uint32)) < B_OK
1519 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1520 		|| user_memcpy(userArgs, teamArgs->flat_args,
1521 				teamArgs->flat_args_size) < B_OK) {
1522 		// the team deletion process will clean this mess
1523 		free_team_arg(teamArgs);
1524 		return B_BAD_ADDRESS;
1525 	}
1526 
1527 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1528 
1529 	// set team args and update state
1530 	team->Lock();
1531 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1532 	team->state = TEAM_STATE_NORMAL;
1533 	team->Unlock();
1534 
1535 	free_team_arg(teamArgs);
1536 		// the arguments are already on the user stack, we no longer need
1537 		// them in this form
1538 
1539 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1540 	// automatic variables with function scope will never be destroyed.
1541 	{
1542 		// find runtime_loader path
1543 		KPath runtimeLoaderPath;
1544 		err = find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1545 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1546 		if (err < B_OK) {
1547 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1548 				strerror(err)));
1549 			return err;
1550 		}
1551 		runtimeLoaderPath.UnlockBuffer();
1552 		err = runtimeLoaderPath.Append("runtime_loader");
1553 
1554 		if (err == B_OK) {
1555 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1556 				&entry);
1557 		}
1558 	}
1559 
1560 	if (err < B_OK) {
1561 		// Luckily, we don't have to clean up the mess we created - that's
1562 		// done for us by the normal team deletion process
1563 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1564 			"%s\n", strerror(err)));
1565 		return err;
1566 	}
1567 
1568 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1569 
1570 	// enter userspace -- returns only in case of error
1571 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1572 		programArgs, NULL);
1573 }
1574 
1575 
1576 static status_t
1577 team_create_thread_start(void* args)
1578 {
1579 	team_create_thread_start_internal(args);
1580 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1581 	thread_exit();
1582 		// does not return
1583 	return B_OK;
1584 }
1585 
1586 
1587 static thread_id
1588 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1589 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1590 	port_id errorPort, uint32 errorToken)
1591 {
1592 	char** flatArgs = _flatArgs;
1593 	thread_id thread;
1594 	status_t status;
1595 	struct team_arg* teamArgs;
1596 	struct team_loading_info loadingInfo;
1597 	io_context* parentIOContext = NULL;
1598 	team_id teamID;
1599 
1600 	if (flatArgs == NULL || argCount == 0)
1601 		return B_BAD_VALUE;
1602 
1603 	const char* path = flatArgs[0];
1604 
1605 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1606 		path, flatArgs, argCount));
1607 
1608 	// cut the path from the main thread name
1609 	const char* threadName = strrchr(path, '/');
1610 	if (threadName != NULL)
1611 		threadName++;
1612 	else
1613 		threadName = path;
1614 
1615 	// create the main thread object
1616 	Thread* mainThread;
1617 	status = Thread::Create(threadName, mainThread);
1618 	if (status != B_OK)
1619 		return status;
1620 	BReference<Thread> mainThreadReference(mainThread, true);
1621 
1622 	// create team object
1623 	Team* team = Team::Create(mainThread->id, path, false);
1624 	if (team == NULL)
1625 		return B_NO_MEMORY;
1626 	BReference<Team> teamReference(team, true);
1627 
1628 	if (flags & B_WAIT_TILL_LOADED) {
1629 		loadingInfo.thread = thread_get_current_thread();
1630 		loadingInfo.result = B_ERROR;
1631 		loadingInfo.done = false;
1632 		team->loading_info = &loadingInfo;
1633 	}
1634 
1635 	// get the parent team
1636 	Team* parent = Team::Get(parentID);
1637 	if (parent == NULL)
1638 		return B_BAD_TEAM_ID;
1639 	BReference<Team> parentReference(parent, true);
1640 
1641 	parent->LockTeamAndProcessGroup();
1642 	team->Lock();
1643 
1644 	// inherit the parent's user/group
1645 	inherit_parent_user_and_group(team, parent);
1646 
1647  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1648 
1649 	sTeamHash.Insert(team);
1650 	sUsedTeams++;
1651 
1652 	teamsLocker.Unlock();
1653 
1654 	insert_team_into_parent(parent, team);
1655 	insert_team_into_group(parent->group, team);
1656 
1657 	// get a reference to the parent's I/O context -- we need it to create ours
1658 	parentIOContext = parent->io_context;
1659 	vfs_get_io_context(parentIOContext);
1660 
1661 	team->Unlock();
1662 	parent->UnlockTeamAndProcessGroup();
1663 
1664 	// notify team listeners
1665 	sNotificationService.Notify(TEAM_ADDED, team);
1666 
1667 	// check the executable's set-user/group-id permission
1668 	update_set_id_user_and_group(team, path);
1669 
1670 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1671 		envCount, (mode_t)-1, errorPort, errorToken);
1672 	if (status != B_OK)
1673 		goto err1;
1674 
1675 	_flatArgs = NULL;
1676 		// args are owned by the team_arg structure now
1677 
1678 	// create a new io_context for this team
1679 	team->io_context = vfs_new_io_context(parentIOContext, true);
1680 	if (!team->io_context) {
1681 		status = B_NO_MEMORY;
1682 		goto err2;
1683 	}
1684 
1685 	// We don't need the parent's I/O context any longer.
1686 	vfs_put_io_context(parentIOContext);
1687 	parentIOContext = NULL;
1688 
1689 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1690 	vfs_exec_io_context(team->io_context);
1691 
1692 	// create an address space for this team
1693 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1694 		&team->address_space);
1695 	if (status != B_OK)
1696 		goto err3;
1697 
1698 	// create the user data area
1699 	status = create_team_user_data(team);
1700 	if (status != B_OK)
1701 		goto err4;
1702 
1703 	// In case we start the main thread, we shouldn't access the team object
1704 	// afterwards, so cache the team's ID.
1705 	teamID = team->id;
1706 
1707 	// Create a kernel thread, but under the context of the new team
1708 	// The new thread will take over ownership of teamArgs.
1709 	{
1710 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1711 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1712 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1713 			+ teamArgs->flat_args_size;
1714 		thread = thread_create_thread(threadAttributes, false);
1715 		if (thread < 0) {
1716 			status = thread;
1717 			goto err5;
1718 		}
1719 	}
1720 
1721 	// The team has been created successfully, so we keep the reference. Or
1722 	// more precisely: It's owned by the team's main thread, now.
1723 	teamReference.Detach();
1724 
1725 	// wait for the loader of the new team to finish its work
1726 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1727 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1728 
1729 		// resume the team's main thread
1730 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1731 			scheduler_enqueue_in_run_queue(mainThread);
1732 
1733 		// Now suspend ourselves until loading is finished. We will be woken
1734 		// either by the thread, when it finished or aborted loading, or when
1735 		// the team is going to die (e.g. is killed). In either case the one
1736 		// setting `loadingInfo.done' is responsible for removing the info from
1737 		// the team structure.
1738 		while (!loadingInfo.done) {
1739 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1740 			scheduler_reschedule();
1741 		}
1742 
1743 		schedulerLocker.Unlock();
1744 
1745 		if (loadingInfo.result < B_OK)
1746 			return loadingInfo.result;
1747 	}
1748 
1749 	// notify the debugger
1750 	user_debug_team_created(teamID);
1751 
1752 	return thread;
1753 
1754 err5:
1755 	delete_team_user_data(team);
1756 err4:
1757 	team->address_space->Put();
1758 err3:
1759 	vfs_put_io_context(team->io_context);
1760 err2:
1761 	free_team_arg(teamArgs);
1762 err1:
1763 	if (parentIOContext != NULL)
1764 		vfs_put_io_context(parentIOContext);
1765 
1766 	// Remove the team structure from the process group, the parent team, and
1767 	// the team hash table and delete the team structure.
1768 	parent->LockTeamAndProcessGroup();
1769 	team->Lock();
1770 
1771 	remove_team_from_group(team);
1772 	remove_team_from_parent(team->parent, team);
1773 
1774 	team->Unlock();
1775 	parent->UnlockTeamAndProcessGroup();
1776 
1777 	teamsLocker.Lock();
1778 	sTeamHash.Remove(team);
1779 	teamsLocker.Unlock();
1780 
1781 	sNotificationService.Notify(TEAM_REMOVED, team);
1782 
1783 	return status;
1784 }
1785 
1786 
1787 /*!	Almost shuts down the current team and loads a new image into it.
1788 	If successful, this function does not return and will takeover ownership of
1789 	the arguments provided.
1790 	This function may only be called in a userland team (caused by one of the
1791 	exec*() syscalls).
1792 */
1793 static status_t
1794 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1795 	int32 argCount, int32 envCount, mode_t umask)
1796 {
1797 	// NOTE: Since this function normally doesn't return, don't use automatic
1798 	// variables that need destruction in the function scope.
1799 	char** flatArgs = _flatArgs;
1800 	Team* team = thread_get_current_thread()->team;
1801 	struct team_arg* teamArgs;
1802 	const char* threadName;
1803 	thread_id nubThreadID = -1;
1804 
1805 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1806 		path, argCount, envCount, team->id));
1807 
1808 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1809 
1810 	// switching the kernel at run time is probably not a good idea :)
1811 	if (team == team_get_kernel_team())
1812 		return B_NOT_ALLOWED;
1813 
1814 	// we currently need to be single threaded here
1815 	// TODO: maybe we should just kill all other threads and
1816 	//	make the current thread the team's main thread?
1817 	Thread* currentThread = thread_get_current_thread();
1818 	if (currentThread != team->main_thread)
1819 		return B_NOT_ALLOWED;
1820 
1821 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1822 	// We iterate through the thread list to make sure that there's no other
1823 	// thread.
1824 	TeamLocker teamLocker(team);
1825 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1826 
1827 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1828 		nubThreadID = team->debug_info.nub_thread;
1829 
1830 	debugInfoLocker.Unlock();
1831 
1832 	for (Thread* thread = team->thread_list; thread != NULL;
1833 			thread = thread->team_next) {
1834 		if (thread != team->main_thread && thread->id != nubThreadID)
1835 			return B_NOT_ALLOWED;
1836 	}
1837 
1838 	team->DeleteUserTimers(true);
1839 	team->ResetSignalsOnExec();
1840 
1841 	teamLocker.Unlock();
1842 
1843 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1844 		argCount, envCount, umask, -1, 0);
1845 	if (status != B_OK)
1846 		return status;
1847 
1848 	_flatArgs = NULL;
1849 		// args are owned by the team_arg structure now
1850 
1851 	// TODO: remove team resources if there are any left
1852 	// thread_atkernel_exit() might not be called at all
1853 
1854 	thread_reset_for_exec();
1855 
1856 	user_debug_prepare_for_exec();
1857 
1858 	delete_team_user_data(team);
1859 	vm_delete_areas(team->address_space, false);
1860 	xsi_sem_undo(team);
1861 	delete_owned_ports(team);
1862 	sem_delete_owned_sems(team);
1863 	remove_images(team);
1864 	vfs_exec_io_context(team->io_context);
1865 	delete_realtime_sem_context(team->realtime_sem_context);
1866 	team->realtime_sem_context = NULL;
1867 
1868 	status = create_team_user_data(team);
1869 	if (status != B_OK) {
1870 		// creating the user data failed -- we're toast
1871 		// TODO: We should better keep the old user area in the first place.
1872 		free_team_arg(teamArgs);
1873 		exit_thread(status);
1874 		return status;
1875 	}
1876 
1877 	user_debug_finish_after_exec();
1878 
1879 	// rename the team
1880 
1881 	team->Lock();
1882 	team->SetName(path);
1883 	team->Unlock();
1884 
1885 	// cut the path from the team name and rename the main thread, too
1886 	threadName = strrchr(path, '/');
1887 	if (threadName != NULL)
1888 		threadName++;
1889 	else
1890 		threadName = path;
1891 	rename_thread(thread_get_current_thread_id(), threadName);
1892 
1893 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1894 
1895 	// Update user/group according to the executable's set-user/group-id
1896 	// permission.
1897 	update_set_id_user_and_group(team, path);
1898 
1899 	user_debug_team_exec();
1900 
1901 	// notify team listeners
1902 	sNotificationService.Notify(TEAM_EXEC, team);
1903 
1904 	// get a user thread for the thread
1905 	user_thread* userThread = team_allocate_user_thread(team);
1906 		// cannot fail (the allocation for the team would have failed already)
1907 	ThreadLocker currentThreadLocker(currentThread);
1908 	currentThread->user_thread = userThread;
1909 	currentThreadLocker.Unlock();
1910 
1911 	// create the user stack for the thread
1912 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1913 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
1914 	if (status == B_OK) {
1915 		// prepare the stack, load the runtime loader, and enter userspace
1916 		team_create_thread_start(teamArgs);
1917 			// does never return
1918 	} else
1919 		free_team_arg(teamArgs);
1920 
1921 	// Sorry, we have to kill ourselves, there is no way out anymore
1922 	// (without any areas left and all that).
1923 	exit_thread(status);
1924 
1925 	// We return a status here since the signal that is sent by the
1926 	// call above is not immediately handled.
1927 	return B_ERROR;
1928 }
1929 
1930 
1931 static thread_id
1932 fork_team(void)
1933 {
1934 	Thread* parentThread = thread_get_current_thread();
1935 	Team* parentTeam = parentThread->team;
1936 	Team* team;
1937 	arch_fork_arg* forkArgs;
1938 	struct area_info info;
1939 	thread_id threadID;
1940 	status_t status;
1941 	int32 cookie;
1942 
1943 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1944 
1945 	if (parentTeam == team_get_kernel_team())
1946 		return B_NOT_ALLOWED;
1947 
1948 	// create a new team
1949 	// TODO: this is very similar to load_image_internal() - maybe we can do
1950 	// something about it :)
1951 
1952 	// create the main thread object
1953 	Thread* thread;
1954 	status = Thread::Create(parentThread->name, thread);
1955 	if (status != B_OK)
1956 		return status;
1957 	BReference<Thread> threadReference(thread, true);
1958 
1959 	// create the team object
1960 	team = Team::Create(thread->id, NULL, false);
1961 	if (team == NULL)
1962 		return B_NO_MEMORY;
1963 
1964 	parentTeam->LockTeamAndProcessGroup();
1965 	team->Lock();
1966 
1967 	team->SetName(parentTeam->Name());
1968 	team->SetArgs(parentTeam->Args());
1969 
1970 	// Inherit the parent's user/group.
1971 	inherit_parent_user_and_group(team, parentTeam);
1972 
1973 	// inherit signal handlers
1974 	team->InheritSignalActions(parentTeam);
1975 
1976 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1977 
1978 	sTeamHash.Insert(team);
1979 	sUsedTeams++;
1980 
1981 	teamsLocker.Unlock();
1982 
1983 	insert_team_into_parent(parentTeam, team);
1984 	insert_team_into_group(parentTeam->group, team);
1985 
1986 	team->Unlock();
1987 	parentTeam->UnlockTeamAndProcessGroup();
1988 
1989 	// notify team listeners
1990 	sNotificationService.Notify(TEAM_ADDED, team);
1991 
1992 	// inherit some team debug flags
1993 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1994 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1995 
1996 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
1997 	if (forkArgs == NULL) {
1998 		status = B_NO_MEMORY;
1999 		goto err1;
2000 	}
2001 
2002 	// create a new io_context for this team
2003 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2004 	if (!team->io_context) {
2005 		status = B_NO_MEMORY;
2006 		goto err2;
2007 	}
2008 
2009 	// duplicate the realtime sem context
2010 	if (parentTeam->realtime_sem_context) {
2011 		team->realtime_sem_context = clone_realtime_sem_context(
2012 			parentTeam->realtime_sem_context);
2013 		if (team->realtime_sem_context == NULL) {
2014 			status = B_NO_MEMORY;
2015 			goto err25;
2016 		}
2017 	}
2018 
2019 	// create an address space for this team
2020 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2021 		&team->address_space);
2022 	if (status < B_OK)
2023 		goto err3;
2024 
2025 	// copy all areas of the team
2026 	// TODO: should be able to handle stack areas differently (ie. don't have
2027 	// them copy-on-write)
2028 
2029 	cookie = 0;
2030 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
2031 		if (info.area == parentTeam->user_data_area) {
2032 			// don't clone the user area; just create a new one
2033 			status = create_team_user_data(team);
2034 			if (status != B_OK)
2035 				break;
2036 
2037 			thread->user_thread = team_allocate_user_thread(team);
2038 		} else {
2039 			void* address;
2040 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2041 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2042 			if (area < B_OK) {
2043 				status = area;
2044 				break;
2045 			}
2046 
2047 			if (info.area == parentThread->user_stack_area)
2048 				thread->user_stack_area = area;
2049 		}
2050 	}
2051 
2052 	if (status < B_OK)
2053 		goto err4;
2054 
2055 	if (thread->user_thread == NULL) {
2056 #if KDEBUG
2057 		panic("user data area not found, parent area is %ld",
2058 			parentTeam->user_data_area);
2059 #endif
2060 		status = B_ERROR;
2061 		goto err4;
2062 	}
2063 
2064 	thread->user_stack_base = parentThread->user_stack_base;
2065 	thread->user_stack_size = parentThread->user_stack_size;
2066 	thread->user_local_storage = parentThread->user_local_storage;
2067 	thread->sig_block_mask = parentThread->sig_block_mask;
2068 	thread->signal_stack_base = parentThread->signal_stack_base;
2069 	thread->signal_stack_size = parentThread->signal_stack_size;
2070 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2071 
2072 	arch_store_fork_frame(forkArgs);
2073 
2074 	// copy image list
2075 	image_info imageInfo;
2076 	cookie = 0;
2077 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
2078 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2079 		if (image < 0)
2080 			goto err5;
2081 	}
2082 
2083 	// create the main thread
2084 	{
2085 		ThreadCreationAttributes threadCreationAttributes(NULL,
2086 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2087 		threadCreationAttributes.forkArgs = forkArgs;
2088 		threadID = thread_create_thread(threadCreationAttributes, false);
2089 		if (threadID < 0) {
2090 			status = threadID;
2091 			goto err5;
2092 		}
2093 	}
2094 
2095 	// notify the debugger
2096 	user_debug_team_created(team->id);
2097 
2098 	T(TeamForked(threadID));
2099 
2100 	resume_thread(threadID);
2101 	return threadID;
2102 
2103 err5:
2104 	remove_images(team);
2105 err4:
2106 	team->address_space->RemoveAndPut();
2107 err3:
2108 	delete_realtime_sem_context(team->realtime_sem_context);
2109 err25:
2110 	vfs_put_io_context(team->io_context);
2111 err2:
2112 	free(forkArgs);
2113 err1:
2114 	// Remove the team structure from the process group, the parent team, and
2115 	// the team hash table and delete the team structure.
2116 	parentTeam->LockTeamAndProcessGroup();
2117 	team->Lock();
2118 
2119 	remove_team_from_group(team);
2120 	remove_team_from_parent(team->parent, team);
2121 
2122 	team->Unlock();
2123 	parentTeam->UnlockTeamAndProcessGroup();
2124 
2125 	teamsLocker.Lock();
2126 	sTeamHash.Remove(team);
2127 	teamsLocker.Unlock();
2128 
2129 	sNotificationService.Notify(TEAM_REMOVED, team);
2130 
2131 	team->ReleaseReference();
2132 
2133 	return status;
2134 }
2135 
2136 
2137 /*!	Returns if the specified team \a parent has any children belonging to the
2138 	process group with the specified ID \a groupID.
2139 	The caller must hold \a parent's lock.
2140 */
2141 static bool
2142 has_children_in_group(Team* parent, pid_t groupID)
2143 {
2144 	for (Team* child = parent->children; child != NULL;
2145 			child = child->siblings_next) {
2146 		TeamLocker childLocker(child);
2147 		if (child->group_id == groupID)
2148 			return true;
2149 	}
2150 
2151 	return false;
2152 }
2153 
2154 
2155 /*!	Returns the first job control entry from \a children, which matches \a id.
2156 	\a id can be:
2157 	- \code > 0 \endcode: Matching an entry with that team ID.
2158 	- \code == -1 \endcode: Matching any entry.
2159 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2160 	\c 0 is an invalid value for \a id.
2161 
2162 	The caller must hold the lock of the team that \a children belongs to.
2163 
2164 	\param children The job control entry list to check.
2165 	\param id The match criterion.
2166 	\return The first matching entry or \c NULL, if none matches.
2167 */
2168 static job_control_entry*
2169 get_job_control_entry(team_job_control_children& children, pid_t id)
2170 {
2171 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2172 		 job_control_entry* entry = it.Next();) {
2173 
2174 		if (id > 0) {
2175 			if (entry->thread == id)
2176 				return entry;
2177 		} else if (id == -1) {
2178 			return entry;
2179 		} else {
2180 			pid_t processGroup
2181 				= (entry->team ? entry->team->group_id : entry->group_id);
2182 			if (processGroup == -id)
2183 				return entry;
2184 		}
2185 	}
2186 
2187 	return NULL;
2188 }
2189 
2190 
2191 /*!	Returns the first job control entry from one of team's dead, continued, or
2192     stopped children which matches \a id.
2193 	\a id can be:
2194 	- \code > 0 \endcode: Matching an entry with that team ID.
2195 	- \code == -1 \endcode: Matching any entry.
2196 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2197 	\c 0 is an invalid value for \a id.
2198 
2199 	The caller must hold \a team's lock.
2200 
2201 	\param team The team whose dead, stopped, and continued child lists shall be
2202 		checked.
2203 	\param id The match criterion.
2204 	\param flags Specifies which children shall be considered. Dead children
2205 		always are. Stopped children are considered when \a flags is ORed
2206 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2207 		bitwise with \c WCONTINUED.
2208 	\return The first matching entry or \c NULL, if none matches.
2209 */
2210 static job_control_entry*
2211 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2212 {
2213 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2214 
2215 	if (entry == NULL && (flags & WCONTINUED) != 0)
2216 		entry = get_job_control_entry(team->continued_children, id);
2217 
2218 	if (entry == NULL && (flags & WUNTRACED) != 0)
2219 		entry = get_job_control_entry(team->stopped_children, id);
2220 
2221 	return entry;
2222 }
2223 
2224 
2225 job_control_entry::job_control_entry()
2226 	:
2227 	has_group_ref(false)
2228 {
2229 }
2230 
2231 
2232 job_control_entry::~job_control_entry()
2233 {
2234 	if (has_group_ref) {
2235 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2236 
2237 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2238 		if (group == NULL) {
2239 			panic("job_control_entry::~job_control_entry(): unknown group "
2240 				"ID: %ld", group_id);
2241 			return;
2242 		}
2243 
2244 		groupHashLocker.Unlock();
2245 
2246 		group->ReleaseReference();
2247 	}
2248 }
2249 
2250 
2251 /*!	Invoked when the owning team is dying, initializing the entry according to
2252 	the dead state.
2253 
2254 	The caller must hold the owning team's lock and the scheduler lock.
2255 */
2256 void
2257 job_control_entry::InitDeadState()
2258 {
2259 	if (team != NULL) {
2260 		ASSERT(team->exit.initialized);
2261 
2262 		group_id = team->group_id;
2263 		team->group->AcquireReference();
2264 		has_group_ref = true;
2265 
2266 		thread = team->id;
2267 		status = team->exit.status;
2268 		reason = team->exit.reason;
2269 		signal = team->exit.signal;
2270 		signaling_user = team->exit.signaling_user;
2271 
2272 		team = NULL;
2273 	}
2274 }
2275 
2276 
2277 job_control_entry&
2278 job_control_entry::operator=(const job_control_entry& other)
2279 {
2280 	state = other.state;
2281 	thread = other.thread;
2282 	signal = other.signal;
2283 	has_group_ref = false;
2284 	signaling_user = other.signaling_user;
2285 	team = other.team;
2286 	group_id = other.group_id;
2287 	status = other.status;
2288 	reason = other.reason;
2289 
2290 	return *this;
2291 }
2292 
2293 
2294 /*! This is the kernel backend for waitid().
2295 */
2296 static thread_id
2297 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2298 {
2299 	Thread* thread = thread_get_current_thread();
2300 	Team* team = thread->team;
2301 	struct job_control_entry foundEntry;
2302 	struct job_control_entry* freeDeathEntry = NULL;
2303 	status_t status = B_OK;
2304 
2305 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
2306 
2307 	T(WaitForChild(child, flags));
2308 
2309 	pid_t originalChild = child;
2310 
2311 	bool ignoreFoundEntries = false;
2312 	bool ignoreFoundEntriesChecked = false;
2313 
2314 	while (true) {
2315 		// lock the team
2316 		TeamLocker teamLocker(team);
2317 
2318 		// A 0 child argument means to wait for all children in the process
2319 		// group of the calling team.
2320 		child = originalChild == 0 ? -team->group_id : originalChild;
2321 
2322 		// check whether any condition holds
2323 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2324 
2325 		// If we don't have an entry yet, check whether there are any children
2326 		// complying to the process group specification at all.
2327 		if (entry == NULL) {
2328 			// No success yet -- check whether there are any children complying
2329 			// to the process group specification at all.
2330 			bool childrenExist = false;
2331 			if (child == -1) {
2332 				childrenExist = team->children != NULL;
2333 			} else if (child < -1) {
2334 				childrenExist = has_children_in_group(team, -child);
2335 			} else {
2336 				if (Team* childTeam = Team::Get(child)) {
2337 					BReference<Team> childTeamReference(childTeam, true);
2338 					TeamLocker childTeamLocker(childTeam);
2339 					childrenExist = childTeam->parent == team;
2340 				}
2341 			}
2342 
2343 			if (!childrenExist) {
2344 				// there is no child we could wait for
2345 				status = ECHILD;
2346 			} else {
2347 				// the children we're waiting for are still running
2348 				status = B_WOULD_BLOCK;
2349 			}
2350 		} else {
2351 			// got something
2352 			foundEntry = *entry;
2353 
2354 			// unless WNOWAIT has been specified, "consume" the wait state
2355 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2356 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2357 					// The child is dead. Reap its death entry.
2358 					freeDeathEntry = entry;
2359 					team->dead_children.entries.Remove(entry);
2360 					team->dead_children.count--;
2361 				} else {
2362 					// The child is well. Reset its job control state.
2363 					team_set_job_control_state(entry->team,
2364 						JOB_CONTROL_STATE_NONE, NULL, false);
2365 				}
2366 			}
2367 		}
2368 
2369 		// If we haven't got anything yet, prepare for waiting for the
2370 		// condition variable.
2371 		ConditionVariableEntry deadWaitEntry;
2372 
2373 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2374 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2375 
2376 		teamLocker.Unlock();
2377 
2378 		// we got our entry and can return to our caller
2379 		if (status == B_OK) {
2380 			if (ignoreFoundEntries) {
2381 				// ... unless we shall ignore found entries
2382 				delete freeDeathEntry;
2383 				freeDeathEntry = NULL;
2384 				continue;
2385 			}
2386 
2387 			break;
2388 		}
2389 
2390 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2391 			T(WaitForChildDone(status));
2392 			return status;
2393 		}
2394 
2395 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2396 		if (status == B_INTERRUPTED) {
2397 			T(WaitForChildDone(status));
2398 			return status;
2399 		}
2400 
2401 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2402 		// all our children are dead and fail with ECHILD. We check the
2403 		// condition at this point.
2404 		if (!ignoreFoundEntriesChecked) {
2405 			teamLocker.Lock();
2406 
2407 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2408 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2409 				|| handler.sa_handler == SIG_IGN) {
2410 				ignoreFoundEntries = true;
2411 			}
2412 
2413 			teamLocker.Unlock();
2414 
2415 			ignoreFoundEntriesChecked = true;
2416 		}
2417 	}
2418 
2419 	delete freeDeathEntry;
2420 
2421 	// When we got here, we have a valid death entry, and already got
2422 	// unregistered from the team or group. Fill in the returned info.
2423 	memset(&_info, 0, sizeof(_info));
2424 	_info.si_signo = SIGCHLD;
2425 	_info.si_pid = foundEntry.thread;
2426 	_info.si_uid = foundEntry.signaling_user;
2427 	// TODO: Fill in si_errno?
2428 
2429 	switch (foundEntry.state) {
2430 		case JOB_CONTROL_STATE_DEAD:
2431 			_info.si_code = foundEntry.reason;
2432 			_info.si_status = foundEntry.reason == CLD_EXITED
2433 				? foundEntry.status : foundEntry.signal;
2434 			break;
2435 		case JOB_CONTROL_STATE_STOPPED:
2436 			_info.si_code = CLD_STOPPED;
2437 			_info.si_status = foundEntry.signal;
2438 			break;
2439 		case JOB_CONTROL_STATE_CONTINUED:
2440 			_info.si_code = CLD_CONTINUED;
2441 			_info.si_status = 0;
2442 			break;
2443 		case JOB_CONTROL_STATE_NONE:
2444 			// can't happen
2445 			break;
2446 	}
2447 
2448 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2449 	// status is available.
2450 	TeamLocker teamLocker(team);
2451 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2452 
2453 	if (is_team_signal_blocked(team, SIGCHLD)) {
2454 		if (get_job_control_entry(team, child, flags) == NULL)
2455 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2456 	}
2457 
2458 	schedulerLocker.Unlock();
2459 	teamLocker.Unlock();
2460 
2461 	// When the team is dead, the main thread continues to live in the kernel
2462 	// team for a very short time. To avoid surprises for the caller we rather
2463 	// wait until the thread is really gone.
2464 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2465 		wait_for_thread(foundEntry.thread, NULL);
2466 
2467 	T(WaitForChildDone(foundEntry));
2468 
2469 	return foundEntry.thread;
2470 }
2471 
2472 
2473 /*! Fills the team_info structure with information from the specified team.
2474 	Interrupts must be enabled. The team must not be locked.
2475 */
2476 static status_t
2477 fill_team_info(Team* team, team_info* info, size_t size)
2478 {
2479 	if (size != sizeof(team_info))
2480 		return B_BAD_VALUE;
2481 
2482 	// TODO: Set more informations for team_info
2483 	memset(info, 0, size);
2484 
2485 	info->team = team->id;
2486 		// immutable
2487 	info->image_count = count_images(team);
2488 		// protected by sImageMutex
2489 
2490 	TeamLocker teamLocker(team);
2491 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2492 
2493 	info->thread_count = team->num_threads;
2494 	//info->area_count =
2495 	info->debugger_nub_thread = team->debug_info.nub_thread;
2496 	info->debugger_nub_port = team->debug_info.nub_port;
2497 	//info->uid =
2498 	//info->gid =
2499 
2500 	strlcpy(info->args, team->Args(), sizeof(info->args));
2501 	info->argc = 1;
2502 
2503 	return B_OK;
2504 }
2505 
2506 
2507 /*!	Returns whether the process group contains stopped processes.
2508 	The caller must hold the process group's lock.
2509 */
2510 static bool
2511 process_group_has_stopped_processes(ProcessGroup* group)
2512 {
2513 	Team* team = group->teams;
2514 	while (team != NULL) {
2515 		// the parent team's lock guards the job control entry -- acquire it
2516 		team->LockTeamAndParent(false);
2517 
2518 		if (team->job_control_entry != NULL
2519 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2520 			team->UnlockTeamAndParent();
2521 			return true;
2522 		}
2523 
2524 		team->UnlockTeamAndParent();
2525 
2526 		team = team->group_next;
2527 	}
2528 
2529 	return false;
2530 }
2531 
2532 
2533 /*!	Iterates through all process groups queued in team_remove_team() and signals
2534 	those that are orphaned and have stopped processes.
2535 	The caller must not hold any team or process group locks.
2536 */
2537 static void
2538 orphaned_process_group_check()
2539 {
2540 	// process as long as there are groups in the list
2541 	while (true) {
2542 		// remove the head from the list
2543 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2544 
2545 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2546 		if (group == NULL)
2547 			return;
2548 
2549 		group->UnsetOrphanedCheck();
2550 		BReference<ProcessGroup> groupReference(group);
2551 
2552 		orphanedCheckLocker.Unlock();
2553 
2554 		AutoLocker<ProcessGroup> groupLocker(group);
2555 
2556 		// If the group is orphaned and contains stopped processes, we're
2557 		// supposed to send SIGHUP + SIGCONT.
2558 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2559 			Thread* currentThread = thread_get_current_thread();
2560 
2561 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2562 			send_signal_to_process_group_locked(group, signal, 0);
2563 
2564 			signal.SetNumber(SIGCONT);
2565 			send_signal_to_process_group_locked(group, signal, 0);
2566 		}
2567 	}
2568 }
2569 
2570 
2571 static status_t
2572 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2573 	uint32 flags)
2574 {
2575 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2576 		return B_BAD_VALUE;
2577 
2578 	// get the team
2579 	Team* team = Team::GetAndLock(id);
2580 	if (team == NULL)
2581 		return B_BAD_TEAM_ID;
2582 	BReference<Team> teamReference(team, true);
2583 	TeamLocker teamLocker(team, true);
2584 
2585 	if ((flags & B_CHECK_PERMISSION) != 0) {
2586 		uid_t uid = geteuid();
2587 		if (uid != 0 && uid != team->effective_uid)
2588 			return B_NOT_ALLOWED;
2589 	}
2590 
2591 	bigtime_t kernelTime = 0;
2592 	bigtime_t userTime = 0;
2593 
2594 	switch (who) {
2595 		case B_TEAM_USAGE_SELF:
2596 		{
2597 			Thread* thread = team->thread_list;
2598 
2599 			for (; thread != NULL; thread = thread->team_next) {
2600 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2601 				kernelTime += thread->kernel_time;
2602 				userTime += thread->user_time;
2603 			}
2604 
2605 			kernelTime += team->dead_threads_kernel_time;
2606 			userTime += team->dead_threads_user_time;
2607 			break;
2608 		}
2609 
2610 		case B_TEAM_USAGE_CHILDREN:
2611 		{
2612 			Team* child = team->children;
2613 			for (; child != NULL; child = child->siblings_next) {
2614 				TeamLocker childLocker(child);
2615 
2616 				Thread* thread = team->thread_list;
2617 
2618 				for (; thread != NULL; thread = thread->team_next) {
2619 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2620 					kernelTime += thread->kernel_time;
2621 					userTime += thread->user_time;
2622 				}
2623 
2624 				kernelTime += child->dead_threads_kernel_time;
2625 				userTime += child->dead_threads_user_time;
2626 			}
2627 
2628 			kernelTime += team->dead_children.kernel_time;
2629 			userTime += team->dead_children.user_time;
2630 			break;
2631 		}
2632 	}
2633 
2634 	info->kernel_time = kernelTime;
2635 	info->user_time = userTime;
2636 
2637 	return B_OK;
2638 }
2639 
2640 
2641 //	#pragma mark - Private kernel API
2642 
2643 
2644 status_t
2645 team_init(kernel_args* args)
2646 {
2647 	// create the team hash table
2648 	new(&sTeamHash) TeamTable;
2649 	if (sTeamHash.Init(64) != B_OK)
2650 		panic("Failed to init team hash table!");
2651 
2652 	new(&sGroupHash) ProcessGroupHashTable;
2653 	if (sGroupHash.Init() != B_OK)
2654 		panic("Failed to init process group hash table!");
2655 
2656 	// create initial session and process groups
2657 
2658 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2659 	if (session == NULL)
2660 		panic("Could not create initial session.\n");
2661 	BReference<ProcessSession> sessionReference(session, true);
2662 
2663 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2664 	if (group == NULL)
2665 		panic("Could not create initial process group.\n");
2666 	BReference<ProcessGroup> groupReference(group, true);
2667 
2668 	group->Publish(session);
2669 
2670 	// create the kernel team
2671 	sKernelTeam = Team::Create(1, "kernel_team", true);
2672 	if (sKernelTeam == NULL)
2673 		panic("could not create kernel team!\n");
2674 	sKernelTeam->SetArgs(sKernelTeam->Name());
2675 	sKernelTeam->state = TEAM_STATE_NORMAL;
2676 
2677 	sKernelTeam->saved_set_uid = 0;
2678 	sKernelTeam->real_uid = 0;
2679 	sKernelTeam->effective_uid = 0;
2680 	sKernelTeam->saved_set_gid = 0;
2681 	sKernelTeam->real_gid = 0;
2682 	sKernelTeam->effective_gid = 0;
2683 	sKernelTeam->supplementary_groups = NULL;
2684 	sKernelTeam->supplementary_group_count = 0;
2685 
2686 	insert_team_into_group(group, sKernelTeam);
2687 
2688 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2689 	if (sKernelTeam->io_context == NULL)
2690 		panic("could not create io_context for kernel team!\n");
2691 
2692 	// stick it in the team hash
2693 	sTeamHash.Insert(sKernelTeam);
2694 
2695 	add_debugger_command_etc("team", &dump_team_info,
2696 		"Dump info about a particular team",
2697 		"[ <id> | <address> | <name> ]\n"
2698 		"Prints information about the specified team. If no argument is given\n"
2699 		"the current team is selected.\n"
2700 		"  <id>       - The ID of the team.\n"
2701 		"  <address>  - The address of the team structure.\n"
2702 		"  <name>     - The team's name.\n", 0);
2703 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2704 		"\n"
2705 		"Prints a list of all existing teams.\n", 0);
2706 
2707 	new(&sNotificationService) TeamNotificationService();
2708 
2709 	return B_OK;
2710 }
2711 
2712 
2713 int32
2714 team_max_teams(void)
2715 {
2716 	return sMaxTeams;
2717 }
2718 
2719 
2720 int32
2721 team_used_teams(void)
2722 {
2723 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2724 	return sUsedTeams;
2725 }
2726 
2727 
2728 /*! Returns a death entry of a child team specified by ID (if any).
2729 	The caller must hold the team's lock.
2730 
2731 	\param team The team whose dead children list to check.
2732 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2733 	\param _deleteEntry Return variable, indicating whether the caller needs to
2734 		delete the returned entry.
2735 	\return The death entry of the matching team, or \c NULL, if no death entry
2736 		for the team was found.
2737 */
2738 job_control_entry*
2739 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2740 {
2741 	if (child <= 0)
2742 		return NULL;
2743 
2744 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2745 		child);
2746 	if (entry) {
2747 		// remove the entry only, if the caller is the parent of the found team
2748 		if (team_get_current_team_id() == entry->thread) {
2749 			team->dead_children.entries.Remove(entry);
2750 			team->dead_children.count--;
2751 			*_deleteEntry = true;
2752 		} else {
2753 			*_deleteEntry = false;
2754 		}
2755 	}
2756 
2757 	return entry;
2758 }
2759 
2760 
2761 /*! Quick check to see if we have a valid team ID. */
2762 bool
2763 team_is_valid(team_id id)
2764 {
2765 	if (id <= 0)
2766 		return false;
2767 
2768 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2769 
2770 	return team_get_team_struct_locked(id) != NULL;
2771 }
2772 
2773 
2774 Team*
2775 team_get_team_struct_locked(team_id id)
2776 {
2777 	return sTeamHash.Lookup(id);
2778 }
2779 
2780 
2781 void
2782 team_set_controlling_tty(int32 ttyIndex)
2783 {
2784 	// lock the team, so its session won't change while we're playing with it
2785 	Team* team = thread_get_current_thread()->team;
2786 	TeamLocker teamLocker(team);
2787 
2788 	// get and lock the session
2789 	ProcessSession* session = team->group->Session();
2790 	AutoLocker<ProcessSession> sessionLocker(session);
2791 
2792 	// set the session's fields
2793 	session->controlling_tty = ttyIndex;
2794 	session->foreground_group = -1;
2795 }
2796 
2797 
2798 int32
2799 team_get_controlling_tty()
2800 {
2801 	// lock the team, so its session won't change while we're playing with it
2802 	Team* team = thread_get_current_thread()->team;
2803 	TeamLocker teamLocker(team);
2804 
2805 	// get and lock the session
2806 	ProcessSession* session = team->group->Session();
2807 	AutoLocker<ProcessSession> sessionLocker(session);
2808 
2809 	// get the session's field
2810 	return session->controlling_tty;
2811 }
2812 
2813 
2814 status_t
2815 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2816 {
2817 	// lock the team, so its session won't change while we're playing with it
2818 	Thread* thread = thread_get_current_thread();
2819 	Team* team = thread->team;
2820 	TeamLocker teamLocker(team);
2821 
2822 	// get and lock the session
2823 	ProcessSession* session = team->group->Session();
2824 	AutoLocker<ProcessSession> sessionLocker(session);
2825 
2826 	// check given TTY -- must be the controlling tty of the calling process
2827 	if (session->controlling_tty != ttyIndex)
2828 		return ENOTTY;
2829 
2830 	// check given process group -- must belong to our session
2831 	{
2832 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2833 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2834 		if (group == NULL || group->Session() != session)
2835 			return B_BAD_VALUE;
2836 	}
2837 
2838 	// If we are a background group, we can do that unharmed only when we
2839 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2840 	if (session->foreground_group != -1
2841 		&& session->foreground_group != team->group_id
2842 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2843 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2844 
2845 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2846 			pid_t groupID = team->group_id;
2847 
2848 			schedulerLocker.Unlock();
2849 			sessionLocker.Unlock();
2850 			teamLocker.Unlock();
2851 
2852 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2853 			send_signal_to_process_group(groupID, signal, 0);
2854 			return B_INTERRUPTED;
2855 		}
2856 	}
2857 
2858 	session->foreground_group = processGroupID;
2859 
2860 	return B_OK;
2861 }
2862 
2863 
2864 /*!	Removes the specified team from the global team hash, from its process
2865 	group, and from its parent.
2866 	It also moves all of its children to the kernel team.
2867 
2868 	The caller must hold the following locks:
2869 	- \a team's process group's lock,
2870 	- the kernel team's lock,
2871 	- \a team's parent team's lock (might be the kernel team), and
2872 	- \a team's lock.
2873 */
2874 void
2875 team_remove_team(Team* team, pid_t& _signalGroup)
2876 {
2877 	Team* parent = team->parent;
2878 
2879 	// remember how long this team lasted
2880 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2881 		+ team->dead_children.kernel_time;
2882 	parent->dead_children.user_time += team->dead_threads_user_time
2883 		+ team->dead_children.user_time;
2884 
2885 	// remove the team from the hash table
2886 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2887 	sTeamHash.Remove(team);
2888 	sUsedTeams--;
2889 	teamsLocker.Unlock();
2890 
2891 	// The team can no longer be accessed by ID. Navigation to it is still
2892 	// possible from its process group and its parent and children, but that
2893 	// will be rectified shortly.
2894 	team->state = TEAM_STATE_DEATH;
2895 
2896 	// If we're a controlling process (i.e. a session leader with controlling
2897 	// terminal), there's a bit of signalling we have to do. We can't do any of
2898 	// the signaling here due to the bunch of locks we're holding, but we need
2899 	// to determine, whom to signal.
2900 	_signalGroup = -1;
2901 	bool isSessionLeader = false;
2902 	if (team->session_id == team->id
2903 		&& team->group->Session()->controlling_tty >= 0) {
2904 		isSessionLeader = true;
2905 
2906 		ProcessSession* session = team->group->Session();
2907 
2908 		AutoLocker<ProcessSession> sessionLocker(session);
2909 
2910 		session->controlling_tty = -1;
2911 		_signalGroup = session->foreground_group;
2912 	}
2913 
2914 	// remove us from our process group
2915 	remove_team_from_group(team);
2916 
2917 	// move the team's children to the kernel team
2918 	while (Team* child = team->children) {
2919 		// remove the child from the current team and add it to the kernel team
2920 		TeamLocker childLocker(child);
2921 
2922 		remove_team_from_parent(team, child);
2923 		insert_team_into_parent(sKernelTeam, child);
2924 
2925 		// move job control entries too
2926 		sKernelTeam->stopped_children.entries.MoveFrom(
2927 			&team->stopped_children.entries);
2928 		sKernelTeam->continued_children.entries.MoveFrom(
2929 			&team->continued_children.entries);
2930 
2931 		// If the team was a session leader with controlling terminal,
2932 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
2933 		// groups with stopped processes. Due to locking complications we can't
2934 		// do that here, so we only check whether we were a reason for the
2935 		// child's process group not being an orphan and, if so, schedule a
2936 		// later check (cf. orphaned_process_group_check()).
2937 		if (isSessionLeader) {
2938 			ProcessGroup* childGroup = child->group;
2939 			if (childGroup->Session()->id == team->session_id
2940 				&& childGroup->id != team->group_id) {
2941 				childGroup->ScheduleOrphanedCheck();
2942 			}
2943 		}
2944 
2945 		// Note, we don't move the dead children entries. Those will be deleted
2946 		// when the team structure is deleted.
2947 	}
2948 
2949 	// remove us from our parent
2950 	remove_team_from_parent(parent, team);
2951 }
2952 
2953 
2954 /*!	Kills all threads but the main thread of the team and shuts down user
2955 	debugging for it.
2956 	To be called on exit of the team's main thread. No locks must be held.
2957 
2958 	\param team The team in question.
2959 	\return The port of the debugger for the team, -1 if none. To be passed to
2960 		team_delete_team().
2961 */
2962 port_id
2963 team_shutdown_team(Team* team)
2964 {
2965 	ASSERT(thread_get_current_thread() == team->main_thread);
2966 
2967 	TeamLocker teamLocker(team);
2968 
2969 	// Make sure debugging changes won't happen anymore.
2970 	port_id debuggerPort = -1;
2971 	while (true) {
2972 		// If a debugger change is in progress for the team, we'll have to
2973 		// wait until it is done.
2974 		ConditionVariableEntry waitForDebuggerEntry;
2975 		bool waitForDebugger = false;
2976 
2977 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2978 
2979 		if (team->debug_info.debugger_changed_condition != NULL) {
2980 			team->debug_info.debugger_changed_condition->Add(
2981 				&waitForDebuggerEntry);
2982 			waitForDebugger = true;
2983 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2984 			// The team is being debugged. That will stop with the termination
2985 			// of the nub thread. Since we set the team state to death, no one
2986 			// can install a debugger anymore. We fetch the debugger's port to
2987 			// send it a message at the bitter end.
2988 			debuggerPort = team->debug_info.debugger_port;
2989 		}
2990 
2991 		debugInfoLocker.Unlock();
2992 
2993 		if (!waitForDebugger)
2994 			break;
2995 
2996 		// wait for the debugger change to be finished
2997 		teamLocker.Unlock();
2998 
2999 		waitForDebuggerEntry.Wait();
3000 
3001 		teamLocker.Lock();
3002 	}
3003 
3004 	// Mark the team as shutting down. That will prevent new threads from being
3005 	// created and debugger changes from taking place.
3006 	team->state = TEAM_STATE_SHUTDOWN;
3007 
3008 	// delete all timers
3009 	team->DeleteUserTimers(false);
3010 
3011 	// deactivate CPU time user timers for the team
3012 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3013 
3014 	if (team->HasActiveCPUTimeUserTimers())
3015 		team->DeactivateCPUTimeUserTimers();
3016 
3017 	schedulerLocker.Unlock();
3018 
3019 	// kill all threads but the main thread
3020 	team_death_entry deathEntry;
3021 	deathEntry.condition.Init(team, "team death");
3022 
3023 	while (true) {
3024 		team->death_entry = &deathEntry;
3025 		deathEntry.remaining_threads = 0;
3026 
3027 		Thread* thread = team->thread_list;
3028 		while (thread != NULL) {
3029 			if (thread != team->main_thread) {
3030 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3031 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3032 				deathEntry.remaining_threads++;
3033 			}
3034 
3035 			thread = thread->team_next;
3036 		}
3037 
3038 		if (deathEntry.remaining_threads == 0)
3039 			break;
3040 
3041 		// there are threads to wait for
3042 		ConditionVariableEntry entry;
3043 		deathEntry.condition.Add(&entry);
3044 
3045 		teamLocker.Unlock();
3046 
3047 		entry.Wait();
3048 
3049 		teamLocker.Lock();
3050 	}
3051 
3052 	team->death_entry = NULL;
3053 
3054 	return debuggerPort;
3055 }
3056 
3057 
3058 /*!	Called on team exit to notify threads waiting on the team and free most
3059 	resources associated with it.
3060 	The caller shouldn't hold any locks.
3061 */
3062 void
3063 team_delete_team(Team* team, port_id debuggerPort)
3064 {
3065 	// Not quite in our job description, but work that has been left by
3066 	// team_remove_team() and that can be done now that we're not holding any
3067 	// locks.
3068 	orphaned_process_group_check();
3069 
3070 	team_id teamID = team->id;
3071 
3072 	ASSERT(team->num_threads == 0);
3073 
3074 	// If someone is waiting for this team to be loaded, but it dies
3075 	// unexpectedly before being done, we need to notify the waiting
3076 	// thread now.
3077 
3078 	TeamLocker teamLocker(team);
3079 
3080 	if (team->loading_info) {
3081 		// there's indeed someone waiting
3082 		struct team_loading_info* loadingInfo = team->loading_info;
3083 		team->loading_info = NULL;
3084 
3085 		loadingInfo->result = B_ERROR;
3086 		loadingInfo->done = true;
3087 
3088 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3089 
3090 		// wake up the waiting thread
3091 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3092 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3093 	}
3094 
3095 	// notify team watchers
3096 
3097 	{
3098 		// we're not reachable from anyone anymore at this point, so we
3099 		// can safely access the list without any locking
3100 		struct team_watcher* watcher;
3101 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3102 				&team->watcher_list)) != NULL) {
3103 			watcher->hook(teamID, watcher->data);
3104 			free(watcher);
3105 		}
3106 	}
3107 
3108 	teamLocker.Unlock();
3109 
3110 	sNotificationService.Notify(TEAM_REMOVED, team);
3111 
3112 	// free team resources
3113 
3114 	delete_realtime_sem_context(team->realtime_sem_context);
3115 	xsi_sem_undo(team);
3116 	remove_images(team);
3117 	team->address_space->RemoveAndPut();
3118 
3119 	team->ReleaseReference();
3120 
3121 	// notify the debugger, that the team is gone
3122 	user_debug_team_deleted(teamID, debuggerPort);
3123 }
3124 
3125 
3126 Team*
3127 team_get_kernel_team(void)
3128 {
3129 	return sKernelTeam;
3130 }
3131 
3132 
3133 team_id
3134 team_get_kernel_team_id(void)
3135 {
3136 	if (!sKernelTeam)
3137 		return 0;
3138 
3139 	return sKernelTeam->id;
3140 }
3141 
3142 
3143 team_id
3144 team_get_current_team_id(void)
3145 {
3146 	return thread_get_current_thread()->team->id;
3147 }
3148 
3149 
3150 status_t
3151 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3152 {
3153 	if (id == sKernelTeam->id) {
3154 		// we're the kernel team, so we don't have to go through all
3155 		// the hassle (locking and hash lookup)
3156 		*_addressSpace = VMAddressSpace::GetKernel();
3157 		return B_OK;
3158 	}
3159 
3160 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3161 
3162 	Team* team = team_get_team_struct_locked(id);
3163 	if (team == NULL)
3164 		return B_BAD_VALUE;
3165 
3166 	team->address_space->Get();
3167 	*_addressSpace = team->address_space;
3168 	return B_OK;
3169 }
3170 
3171 
3172 /*!	Sets the team's job control state.
3173 	The caller must hold the parent team's lock. Interrupts are allowed to be
3174 	enabled or disabled. In the latter case the scheduler lock may be held as
3175 	well.
3176 	\a team The team whose job control state shall be set.
3177 	\a newState The new state to be set.
3178 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3179 		the caller is responsible for filling in the following fields of the
3180 		entry before releasing the parent team's lock, unless the new state is
3181 		\c JOB_CONTROL_STATE_NONE:
3182 		- \c signal: The number of the signal causing the state change.
3183 		- \c signaling_user: The real UID of the user sending the signal.
3184 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3185 */
3186 void
3187 team_set_job_control_state(Team* team, job_control_state newState,
3188 	Signal* signal, bool schedulerLocked)
3189 {
3190 	if (team == NULL || team->job_control_entry == NULL)
3191 		return;
3192 
3193 	// don't touch anything, if the state stays the same or the team is already
3194 	// dead
3195 	job_control_entry* entry = team->job_control_entry;
3196 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3197 		return;
3198 
3199 	T(SetJobControlState(team->id, newState, signal));
3200 
3201 	// remove from the old list
3202 	switch (entry->state) {
3203 		case JOB_CONTROL_STATE_NONE:
3204 			// entry is in no list ATM
3205 			break;
3206 		case JOB_CONTROL_STATE_DEAD:
3207 			// can't get here
3208 			break;
3209 		case JOB_CONTROL_STATE_STOPPED:
3210 			team->parent->stopped_children.entries.Remove(entry);
3211 			break;
3212 		case JOB_CONTROL_STATE_CONTINUED:
3213 			team->parent->continued_children.entries.Remove(entry);
3214 			break;
3215 	}
3216 
3217 	entry->state = newState;
3218 
3219 	if (signal != NULL) {
3220 		entry->signal = signal->Number();
3221 		entry->signaling_user = signal->SendingUser();
3222 	}
3223 
3224 	// add to new list
3225 	team_job_control_children* childList = NULL;
3226 	switch (entry->state) {
3227 		case JOB_CONTROL_STATE_NONE:
3228 			// entry doesn't get into any list
3229 			break;
3230 		case JOB_CONTROL_STATE_DEAD:
3231 			childList = &team->parent->dead_children;
3232 			team->parent->dead_children.count++;
3233 			break;
3234 		case JOB_CONTROL_STATE_STOPPED:
3235 			childList = &team->parent->stopped_children;
3236 			break;
3237 		case JOB_CONTROL_STATE_CONTINUED:
3238 			childList = &team->parent->continued_children;
3239 			break;
3240 	}
3241 
3242 	if (childList != NULL) {
3243 		childList->entries.Add(entry);
3244 		team->parent->dead_children.condition_variable.NotifyAll(
3245 			schedulerLocked);
3246 	}
3247 }
3248 
3249 
3250 /*!	Inits the given team's exit information, if not yet initialized, to some
3251 	generic "killed" status.
3252 	The caller must not hold the team's lock. Interrupts must be enabled.
3253 
3254 	\param team The team whose exit info shall be initialized.
3255 */
3256 void
3257 team_init_exit_info_on_error(Team* team)
3258 {
3259 	TeamLocker teamLocker(team);
3260 
3261 	if (!team->exit.initialized) {
3262 		team->exit.reason = CLD_KILLED;
3263 		team->exit.signal = SIGKILL;
3264 		team->exit.signaling_user = geteuid();
3265 		team->exit.status = 0;
3266 		team->exit.initialized = true;
3267 	}
3268 }
3269 
3270 
3271 /*! Adds a hook to the team that is called as soon as this team goes away.
3272 	This call might get public in the future.
3273 */
3274 status_t
3275 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3276 {
3277 	if (hook == NULL || teamID < B_OK)
3278 		return B_BAD_VALUE;
3279 
3280 	// create the watcher object
3281 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3282 	if (watcher == NULL)
3283 		return B_NO_MEMORY;
3284 
3285 	watcher->hook = hook;
3286 	watcher->data = data;
3287 
3288 	// add watcher, if the team isn't already dying
3289 	// get the team
3290 	Team* team = Team::GetAndLock(teamID);
3291 	if (team == NULL) {
3292 		free(watcher);
3293 		return B_BAD_TEAM_ID;
3294 	}
3295 
3296 	list_add_item(&team->watcher_list, watcher);
3297 
3298 	team->UnlockAndReleaseReference();
3299 
3300 	return B_OK;
3301 }
3302 
3303 
3304 status_t
3305 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3306 {
3307 	if (hook == NULL || teamID < 0)
3308 		return B_BAD_VALUE;
3309 
3310 	// get team and remove watcher (if present)
3311 	Team* team = Team::GetAndLock(teamID);
3312 	if (team == NULL)
3313 		return B_BAD_TEAM_ID;
3314 
3315 	// search for watcher
3316 	team_watcher* watcher = NULL;
3317 	while ((watcher = (team_watcher*)list_get_next_item(
3318 			&team->watcher_list, watcher)) != NULL) {
3319 		if (watcher->hook == hook && watcher->data == data) {
3320 			// got it!
3321 			list_remove_item(&team->watcher_list, watcher);
3322 			break;
3323 		}
3324 	}
3325 
3326 	team->UnlockAndReleaseReference();
3327 
3328 	if (watcher == NULL)
3329 		return B_ENTRY_NOT_FOUND;
3330 
3331 	free(watcher);
3332 	return B_OK;
3333 }
3334 
3335 
3336 /*!	Allocates a user_thread structure from the team.
3337 	The team lock must be held, unless the function is called for the team's
3338 	main thread. Interrupts must be enabled.
3339 */
3340 struct user_thread*
3341 team_allocate_user_thread(Team* team)
3342 {
3343 	if (team->user_data == 0)
3344 		return NULL;
3345 
3346 	// take an entry from the free list, if any
3347 	if (struct free_user_thread* entry = team->free_user_threads) {
3348 		user_thread* thread = entry->thread;
3349 		team->free_user_threads = entry->next;
3350 		free(entry);
3351 		return thread;
3352 	}
3353 
3354 	while (true) {
3355 		// enough space left?
3356 		size_t needed = ROUNDUP(sizeof(user_thread), 8);
3357 		if (team->user_data_size - team->used_user_data < needed) {
3358 			// try to resize the area
3359 			if (resize_area(team->user_data_area,
3360 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3361 				return NULL;
3362 			}
3363 
3364 			// resized user area successfully -- try to allocate the user_thread
3365 			// again
3366 			team->user_data_size += B_PAGE_SIZE;
3367 			continue;
3368 		}
3369 
3370 		// allocate the user_thread
3371 		user_thread* thread
3372 			= (user_thread*)(team->user_data + team->used_user_data);
3373 		team->used_user_data += needed;
3374 
3375 		return thread;
3376 	}
3377 }
3378 
3379 
3380 /*!	Frees the given user_thread structure.
3381 	The team's lock must not be held. Interrupts must be enabled.
3382 	\param team The team the user thread was allocated from.
3383 	\param userThread The user thread to free.
3384 */
3385 void
3386 team_free_user_thread(Team* team, struct user_thread* userThread)
3387 {
3388 	if (userThread == NULL)
3389 		return;
3390 
3391 	// create a free list entry
3392 	free_user_thread* entry
3393 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3394 	if (entry == NULL) {
3395 		// we have to leak the user thread :-/
3396 		return;
3397 	}
3398 
3399 	// add to free list
3400 	TeamLocker teamLocker(team);
3401 
3402 	entry->thread = userThread;
3403 	entry->next = team->free_user_threads;
3404 	team->free_user_threads = entry;
3405 }
3406 
3407 
3408 //	#pragma mark - Associated data interface
3409 
3410 
3411 AssociatedData::AssociatedData()
3412 	:
3413 	fOwner(NULL)
3414 {
3415 }
3416 
3417 
3418 AssociatedData::~AssociatedData()
3419 {
3420 }
3421 
3422 
3423 void
3424 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3425 {
3426 }
3427 
3428 
3429 AssociatedDataOwner::AssociatedDataOwner()
3430 {
3431 	mutex_init(&fLock, "associated data owner");
3432 }
3433 
3434 
3435 AssociatedDataOwner::~AssociatedDataOwner()
3436 {
3437 	mutex_destroy(&fLock);
3438 }
3439 
3440 
3441 bool
3442 AssociatedDataOwner::AddData(AssociatedData* data)
3443 {
3444 	MutexLocker locker(fLock);
3445 
3446 	if (data->Owner() != NULL)
3447 		return false;
3448 
3449 	data->AcquireReference();
3450 	fList.Add(data);
3451 	data->SetOwner(this);
3452 
3453 	return true;
3454 }
3455 
3456 
3457 bool
3458 AssociatedDataOwner::RemoveData(AssociatedData* data)
3459 {
3460 	MutexLocker locker(fLock);
3461 
3462 	if (data->Owner() != this)
3463 		return false;
3464 
3465 	data->SetOwner(NULL);
3466 	fList.Remove(data);
3467 
3468 	locker.Unlock();
3469 
3470 	data->ReleaseReference();
3471 
3472 	return true;
3473 }
3474 
3475 
3476 void
3477 AssociatedDataOwner::PrepareForDeletion()
3478 {
3479 	MutexLocker locker(fLock);
3480 
3481 	// move all data to a temporary list and unset the owner
3482 	DataList list;
3483 	list.MoveFrom(&fList);
3484 
3485 	for (DataList::Iterator it = list.GetIterator();
3486 		AssociatedData* data = it.Next();) {
3487 		data->SetOwner(NULL);
3488 	}
3489 
3490 	locker.Unlock();
3491 
3492 	// call the notification hooks and release our references
3493 	while (AssociatedData* data = list.RemoveHead()) {
3494 		data->OwnerDeleted(this);
3495 		data->ReleaseReference();
3496 	}
3497 }
3498 
3499 
3500 /*!	Associates data with the current team.
3501 	When the team is deleted, the data object is notified.
3502 	The team acquires a reference to the object.
3503 
3504 	\param data The data object.
3505 	\return \c true on success, \c false otherwise. Fails only when the supplied
3506 		data object is already associated with another owner.
3507 */
3508 bool
3509 team_associate_data(AssociatedData* data)
3510 {
3511 	return thread_get_current_thread()->team->AddData(data);
3512 }
3513 
3514 
3515 /*!	Dissociates data from the current team.
3516 	Balances an earlier call to team_associate_data().
3517 
3518 	\param data The data object.
3519 	\return \c true on success, \c false otherwise. Fails only when the data
3520 		object is not associated with the current team.
3521 */
3522 bool
3523 team_dissociate_data(AssociatedData* data)
3524 {
3525 	return thread_get_current_thread()->team->RemoveData(data);
3526 }
3527 
3528 
3529 //	#pragma mark - Public kernel API
3530 
3531 
3532 thread_id
3533 load_image(int32 argCount, const char** args, const char** env)
3534 {
3535 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3536 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3537 }
3538 
3539 
3540 thread_id
3541 load_image_etc(int32 argCount, const char* const* args,
3542 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3543 {
3544 	// we need to flatten the args and environment
3545 
3546 	if (args == NULL)
3547 		return B_BAD_VALUE;
3548 
3549 	// determine total needed size
3550 	int32 argSize = 0;
3551 	for (int32 i = 0; i < argCount; i++)
3552 		argSize += strlen(args[i]) + 1;
3553 
3554 	int32 envCount = 0;
3555 	int32 envSize = 0;
3556 	while (env != NULL && env[envCount] != NULL)
3557 		envSize += strlen(env[envCount++]) + 1;
3558 
3559 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3560 	if (size > MAX_PROCESS_ARGS_SIZE)
3561 		return B_TOO_MANY_ARGS;
3562 
3563 	// allocate space
3564 	char** flatArgs = (char**)malloc(size);
3565 	if (flatArgs == NULL)
3566 		return B_NO_MEMORY;
3567 
3568 	char** slot = flatArgs;
3569 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3570 
3571 	// copy arguments and environment
3572 	for (int32 i = 0; i < argCount; i++) {
3573 		int32 argSize = strlen(args[i]) + 1;
3574 		memcpy(stringSpace, args[i], argSize);
3575 		*slot++ = stringSpace;
3576 		stringSpace += argSize;
3577 	}
3578 
3579 	*slot++ = NULL;
3580 
3581 	for (int32 i = 0; i < envCount; i++) {
3582 		int32 envSize = strlen(env[i]) + 1;
3583 		memcpy(stringSpace, env[i], envSize);
3584 		*slot++ = stringSpace;
3585 		stringSpace += envSize;
3586 	}
3587 
3588 	*slot++ = NULL;
3589 
3590 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3591 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3592 
3593 	free(flatArgs);
3594 		// load_image_internal() unset our variable if it took over ownership
3595 
3596 	return thread;
3597 }
3598 
3599 
3600 status_t
3601 wait_for_team(team_id id, status_t* _returnCode)
3602 {
3603 	// check whether the team exists
3604 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3605 
3606 	Team* team = team_get_team_struct_locked(id);
3607 	if (team == NULL)
3608 		return B_BAD_TEAM_ID;
3609 
3610 	id = team->id;
3611 
3612 	teamsLocker.Unlock();
3613 
3614 	// wait for the main thread (it has the same ID as the team)
3615 	return wait_for_thread(id, _returnCode);
3616 }
3617 
3618 
3619 status_t
3620 kill_team(team_id id)
3621 {
3622 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3623 
3624 	Team* team = team_get_team_struct_locked(id);
3625 	if (team == NULL)
3626 		return B_BAD_TEAM_ID;
3627 
3628 	id = team->id;
3629 
3630 	teamsLocker.Unlock();
3631 
3632 	if (team == sKernelTeam)
3633 		return B_NOT_ALLOWED;
3634 
3635 	// Just kill the team's main thread (it has same ID as the team). The
3636 	// cleanup code there will take care of the team.
3637 	return kill_thread(id);
3638 }
3639 
3640 
3641 status_t
3642 _get_team_info(team_id id, team_info* info, size_t size)
3643 {
3644 	// get the team
3645 	Team* team = Team::Get(id);
3646 	if (team == NULL)
3647 		return B_BAD_TEAM_ID;
3648 	BReference<Team> teamReference(team, true);
3649 
3650 	// fill in the info
3651 	return fill_team_info(team, info, size);
3652 }
3653 
3654 
3655 status_t
3656 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3657 {
3658 	int32 slot = *cookie;
3659 	if (slot < 1)
3660 		slot = 1;
3661 
3662 	InterruptsSpinLocker locker(sTeamHashLock);
3663 
3664 	team_id lastTeamID = peek_next_thread_id();
3665 		// TODO: This is broken, since the id can wrap around!
3666 
3667 	// get next valid team
3668 	Team* team = NULL;
3669 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3670 		slot++;
3671 
3672 	if (team == NULL)
3673 		return B_BAD_TEAM_ID;
3674 
3675 	// get a reference to the team and unlock
3676 	BReference<Team> teamReference(team);
3677 	locker.Unlock();
3678 
3679 	// fill in the info
3680 	*cookie = ++slot;
3681 	return fill_team_info(team, info, size);
3682 }
3683 
3684 
3685 status_t
3686 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3687 {
3688 	if (size != sizeof(team_usage_info))
3689 		return B_BAD_VALUE;
3690 
3691 	return common_get_team_usage_info(id, who, info, 0);
3692 }
3693 
3694 
3695 pid_t
3696 getpid(void)
3697 {
3698 	return thread_get_current_thread()->team->id;
3699 }
3700 
3701 
3702 pid_t
3703 getppid(void)
3704 {
3705 	Team* team = thread_get_current_thread()->team;
3706 
3707 	TeamLocker teamLocker(team);
3708 
3709 	return team->parent->id;
3710 }
3711 
3712 
3713 pid_t
3714 getpgid(pid_t id)
3715 {
3716 	if (id < 0) {
3717 		errno = EINVAL;
3718 		return -1;
3719 	}
3720 
3721 	if (id == 0) {
3722 		// get process group of the calling process
3723 		Team* team = thread_get_current_thread()->team;
3724 		TeamLocker teamLocker(team);
3725 		return team->group_id;
3726 	}
3727 
3728 	// get the team
3729 	Team* team = Team::GetAndLock(id);
3730 	if (team == NULL) {
3731 		errno = ESRCH;
3732 		return -1;
3733 	}
3734 
3735 	// get the team's process group ID
3736 	pid_t groupID = team->group_id;
3737 
3738 	team->UnlockAndReleaseReference();
3739 
3740 	return groupID;
3741 }
3742 
3743 
3744 pid_t
3745 getsid(pid_t id)
3746 {
3747 	if (id < 0) {
3748 		errno = EINVAL;
3749 		return -1;
3750 	}
3751 
3752 	if (id == 0) {
3753 		// get session of the calling process
3754 		Team* team = thread_get_current_thread()->team;
3755 		TeamLocker teamLocker(team);
3756 		return team->session_id;
3757 	}
3758 
3759 	// get the team
3760 	Team* team = Team::GetAndLock(id);
3761 	if (team == NULL) {
3762 		errno = ESRCH;
3763 		return -1;
3764 	}
3765 
3766 	// get the team's session ID
3767 	pid_t sessionID = team->session_id;
3768 
3769 	team->UnlockAndReleaseReference();
3770 
3771 	return sessionID;
3772 }
3773 
3774 
3775 //	#pragma mark - User syscalls
3776 
3777 
3778 status_t
3779 _user_exec(const char* userPath, const char* const* userFlatArgs,
3780 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3781 {
3782 	// NOTE: Since this function normally doesn't return, don't use automatic
3783 	// variables that need destruction in the function scope.
3784 	char path[B_PATH_NAME_LENGTH];
3785 
3786 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3787 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3788 		return B_BAD_ADDRESS;
3789 
3790 	// copy and relocate the flat arguments
3791 	char** flatArgs;
3792 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3793 		argCount, envCount, flatArgs);
3794 
3795 	if (error == B_OK) {
3796 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3797 			envCount, umask);
3798 			// this one only returns in case of error
3799 	}
3800 
3801 	free(flatArgs);
3802 	return error;
3803 }
3804 
3805 
3806 thread_id
3807 _user_fork(void)
3808 {
3809 	return fork_team();
3810 }
3811 
3812 
3813 pid_t
3814 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3815 {
3816 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3817 		return B_BAD_ADDRESS;
3818 
3819 	siginfo_t info;
3820 	pid_t foundChild = wait_for_child(child, flags, info);
3821 	if (foundChild < 0)
3822 		return syscall_restart_handle_post(foundChild);
3823 
3824 	// copy info back to userland
3825 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3826 		return B_BAD_ADDRESS;
3827 
3828 	return foundChild;
3829 }
3830 
3831 
3832 pid_t
3833 _user_process_info(pid_t process, int32 which)
3834 {
3835 	// we only allow to return the parent of the current process
3836 	if (which == PARENT_ID
3837 		&& process != 0 && process != thread_get_current_thread()->team->id)
3838 		return B_BAD_VALUE;
3839 
3840 	pid_t result;
3841 	switch (which) {
3842 		case SESSION_ID:
3843 			result = getsid(process);
3844 			break;
3845 		case GROUP_ID:
3846 			result = getpgid(process);
3847 			break;
3848 		case PARENT_ID:
3849 			result = getppid();
3850 			break;
3851 		default:
3852 			return B_BAD_VALUE;
3853 	}
3854 
3855 	return result >= 0 ? result : errno;
3856 }
3857 
3858 
3859 pid_t
3860 _user_setpgid(pid_t processID, pid_t groupID)
3861 {
3862 	// setpgid() can be called either by the parent of the target process or
3863 	// by the process itself to do one of two things:
3864 	// * Create a new process group with the target process' ID and the target
3865 	//   process as group leader.
3866 	// * Set the target process' process group to an already existing one in the
3867 	//   same session.
3868 
3869 	if (groupID < 0)
3870 		return B_BAD_VALUE;
3871 
3872 	Team* currentTeam = thread_get_current_thread()->team;
3873 	if (processID == 0)
3874 		processID = currentTeam->id;
3875 
3876 	// if the group ID is not specified, use the target process' ID
3877 	if (groupID == 0)
3878 		groupID = processID;
3879 
3880 	// We loop when running into the following race condition: We create a new
3881 	// process group, because there isn't one with that ID yet, but later when
3882 	// trying to publish it, we find that someone else created and published
3883 	// a group with that ID in the meantime. In that case we just restart the
3884 	// whole action.
3885 	while (true) {
3886 		// Look up the process group by ID. If it doesn't exist yet and we are
3887 		// allowed to create a new one, do that.
3888 		ProcessGroup* group = ProcessGroup::Get(groupID);
3889 		bool newGroup = false;
3890 		if (group == NULL) {
3891 			if (groupID != processID)
3892 				return B_NOT_ALLOWED;
3893 
3894 			group = new(std::nothrow) ProcessGroup(groupID);
3895 			if (group == NULL)
3896 				return B_NO_MEMORY;
3897 
3898 			newGroup = true;
3899 		}
3900 		BReference<ProcessGroup> groupReference(group, true);
3901 
3902 		// get the target team
3903 		Team* team = Team::Get(processID);
3904 		if (team == NULL)
3905 			return ESRCH;
3906 		BReference<Team> teamReference(team, true);
3907 
3908 		// lock the new process group and the team's current process group
3909 		while (true) {
3910 			// lock the team's current process group
3911 			team->LockProcessGroup();
3912 
3913 			ProcessGroup* oldGroup = team->group;
3914 			if (oldGroup == group) {
3915 				// it's the same as the target group, so just bail out
3916 				oldGroup->Unlock();
3917 				return group->id;
3918 			}
3919 
3920 			oldGroup->AcquireReference();
3921 
3922 			// lock the target process group, if locking order allows it
3923 			if (newGroup || group->id > oldGroup->id) {
3924 				group->Lock();
3925 				break;
3926 			}
3927 
3928 			// try to lock
3929 			if (group->TryLock())
3930 				break;
3931 
3932 			// no dice -- unlock the team's current process group and relock in
3933 			// the correct order
3934 			oldGroup->Unlock();
3935 
3936 			group->Lock();
3937 			oldGroup->Lock();
3938 
3939 			// check whether things are still the same
3940 			TeamLocker teamLocker(team);
3941 			if (team->group == oldGroup)
3942 				break;
3943 
3944 			// something changed -- unlock everything and retry
3945 			teamLocker.Unlock();
3946 			oldGroup->Unlock();
3947 			group->Unlock();
3948 			oldGroup->ReleaseReference();
3949 		}
3950 
3951 		// we now have references and locks of both new and old process group
3952 		BReference<ProcessGroup> oldGroupReference(team->group, true);
3953 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
3954 		AutoLocker<ProcessGroup> groupLocker(group, true);
3955 
3956 		// also lock the target team and its parent
3957 		team->LockTeamAndParent(false);
3958 		TeamLocker parentLocker(team->parent, true);
3959 		TeamLocker teamLocker(team, true);
3960 
3961 		// perform the checks
3962 		if (team == currentTeam) {
3963 			// we set our own group
3964 
3965 			// we must not change our process group ID if we're a session leader
3966 			if (is_session_leader(currentTeam))
3967 				return B_NOT_ALLOWED;
3968 		} else {
3969 			// Calling team != target team. The target team must be a child of
3970 			// the calling team and in the same session. (If that's the case it
3971 			// isn't a session leader either.)
3972 			if (team->parent != currentTeam
3973 				|| team->session_id != currentTeam->session_id) {
3974 				return B_NOT_ALLOWED;
3975 			}
3976 
3977 			// The call is also supposed to fail on a child, when the child has
3978 			// already executed exec*() [EACCES].
3979 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3980 				return EACCES;
3981 		}
3982 
3983 		// If we created a new process group, publish it now.
3984 		if (newGroup) {
3985 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3986 			if (sGroupHash.Lookup(groupID)) {
3987 				// A group with the group ID appeared since we first checked.
3988 				// Back to square one.
3989 				continue;
3990 			}
3991 
3992 			group->PublishLocked(team->group->Session());
3993 		} else if (group->Session()->id != team->session_id) {
3994 			// The existing target process group belongs to a different session.
3995 			// That's not allowed.
3996 			return B_NOT_ALLOWED;
3997 		}
3998 
3999 		// Everything is ready -- set the group.
4000 		remove_team_from_group(team);
4001 		insert_team_into_group(group, team);
4002 
4003 		// Changing the process group might have changed the situation for a
4004 		// parent waiting in wait_for_child(). Hence we notify it.
4005 		team->parent->dead_children.condition_variable.NotifyAll(false);
4006 
4007 		return group->id;
4008 	}
4009 }
4010 
4011 
4012 pid_t
4013 _user_setsid(void)
4014 {
4015 	Team* team = thread_get_current_thread()->team;
4016 
4017 	// create a new process group and session
4018 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4019 	if (group == NULL)
4020 		return B_NO_MEMORY;
4021 	BReference<ProcessGroup> groupReference(group, true);
4022 	AutoLocker<ProcessGroup> groupLocker(group);
4023 
4024 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4025 	if (session == NULL)
4026 		return B_NO_MEMORY;
4027 	BReference<ProcessSession> sessionReference(session, true);
4028 
4029 	// lock the team's current process group, parent, and the team itself
4030 	team->LockTeamParentAndProcessGroup();
4031 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4032 	TeamLocker parentLocker(team->parent, true);
4033 	TeamLocker teamLocker(team, true);
4034 
4035 	// the team must not already be a process group leader
4036 	if (is_process_group_leader(team))
4037 		return B_NOT_ALLOWED;
4038 
4039 	// remove the team from the old and add it to the new process group
4040 	remove_team_from_group(team);
4041 	group->Publish(session);
4042 	insert_team_into_group(group, team);
4043 
4044 	// Changing the process group might have changed the situation for a
4045 	// parent waiting in wait_for_child(). Hence we notify it.
4046 	team->parent->dead_children.condition_variable.NotifyAll(false);
4047 
4048 	return group->id;
4049 }
4050 
4051 
4052 status_t
4053 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4054 {
4055 	status_t returnCode;
4056 	status_t status;
4057 
4058 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4059 		return B_BAD_ADDRESS;
4060 
4061 	status = wait_for_team(id, &returnCode);
4062 	if (status >= B_OK && _userReturnCode != NULL) {
4063 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4064 				!= B_OK)
4065 			return B_BAD_ADDRESS;
4066 		return B_OK;
4067 	}
4068 
4069 	return syscall_restart_handle_post(status);
4070 }
4071 
4072 
4073 thread_id
4074 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4075 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4076 	port_id errorPort, uint32 errorToken)
4077 {
4078 	TRACE(("_user_load_image: argc = %ld\n", argCount));
4079 
4080 	if (argCount < 1)
4081 		return B_BAD_VALUE;
4082 
4083 	// copy and relocate the flat arguments
4084 	char** flatArgs;
4085 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4086 		argCount, envCount, flatArgs);
4087 	if (error != B_OK)
4088 		return error;
4089 
4090 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4091 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4092 		errorToken);
4093 
4094 	free(flatArgs);
4095 		// load_image_internal() unset our variable if it took over ownership
4096 
4097 	return thread;
4098 }
4099 
4100 
4101 void
4102 _user_exit_team(status_t returnValue)
4103 {
4104 	Thread* thread = thread_get_current_thread();
4105 	Team* team = thread->team;
4106 
4107 	// set this thread's exit status
4108 	thread->exit.status = returnValue;
4109 
4110 	// set the team exit status
4111 	TeamLocker teamLocker(team);
4112 
4113 	if (!team->exit.initialized) {
4114 		team->exit.reason = CLD_EXITED;
4115 		team->exit.signal = 0;
4116 		team->exit.signaling_user = 0;
4117 		team->exit.status = returnValue;
4118 		team->exit.initialized = true;
4119 	}
4120 
4121 	teamLocker.Unlock();
4122 
4123 	// Stop the thread, if the team is being debugged and that has been
4124 	// requested.
4125 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4126 		user_debug_stop_thread();
4127 
4128 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4129 	// userland. The signal handling code forwards the signal to the main
4130 	// thread (if that's not already this one), which will take the team down.
4131 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4132 	send_signal_to_thread(thread, signal, 0);
4133 }
4134 
4135 
4136 status_t
4137 _user_kill_team(team_id team)
4138 {
4139 	return kill_team(team);
4140 }
4141 
4142 
4143 status_t
4144 _user_get_team_info(team_id id, team_info* userInfo)
4145 {
4146 	status_t status;
4147 	team_info info;
4148 
4149 	if (!IS_USER_ADDRESS(userInfo))
4150 		return B_BAD_ADDRESS;
4151 
4152 	status = _get_team_info(id, &info, sizeof(team_info));
4153 	if (status == B_OK) {
4154 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4155 			return B_BAD_ADDRESS;
4156 	}
4157 
4158 	return status;
4159 }
4160 
4161 
4162 status_t
4163 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4164 {
4165 	status_t status;
4166 	team_info info;
4167 	int32 cookie;
4168 
4169 	if (!IS_USER_ADDRESS(userCookie)
4170 		|| !IS_USER_ADDRESS(userInfo)
4171 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4172 		return B_BAD_ADDRESS;
4173 
4174 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4175 	if (status != B_OK)
4176 		return status;
4177 
4178 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4179 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4180 		return B_BAD_ADDRESS;
4181 
4182 	return status;
4183 }
4184 
4185 
4186 team_id
4187 _user_get_current_team(void)
4188 {
4189 	return team_get_current_team_id();
4190 }
4191 
4192 
4193 status_t
4194 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4195 	size_t size)
4196 {
4197 	if (size != sizeof(team_usage_info))
4198 		return B_BAD_VALUE;
4199 
4200 	team_usage_info info;
4201 	status_t status = common_get_team_usage_info(team, who, &info,
4202 		B_CHECK_PERMISSION);
4203 
4204 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4205 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4206 		return B_BAD_ADDRESS;
4207 	}
4208 
4209 	return status;
4210 }
4211 
4212 
4213 status_t
4214 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4215 	size_t size, size_t* _sizeNeeded)
4216 {
4217 	// check parameters
4218 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4219 		|| (buffer == NULL && size > 0)
4220 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4221 		return B_BAD_ADDRESS;
4222 	}
4223 
4224 	KMessage info;
4225 
4226 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4227 		// allocate memory for a copy of the needed team data
4228 		struct ExtendedTeamData {
4229 			team_id	id;
4230 			pid_t	group_id;
4231 			pid_t	session_id;
4232 			uid_t	real_uid;
4233 			gid_t	real_gid;
4234 			uid_t	effective_uid;
4235 			gid_t	effective_gid;
4236 			char	name[B_OS_NAME_LENGTH];
4237 		};
4238 
4239 		ExtendedTeamData* teamClone
4240 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4241 			// It would be nicer to use new, but then we'd have to use
4242 			// ObjectDeleter and declare the structure outside of the function
4243 			// due to template parameter restrictions.
4244 		if (teamClone == NULL)
4245 			return B_NO_MEMORY;
4246 		MemoryDeleter teamCloneDeleter(teamClone);
4247 
4248 		io_context* ioContext;
4249 		{
4250 			// get the team structure
4251 			Team* team = Team::GetAndLock(teamID);
4252 			if (team == NULL)
4253 				return B_BAD_TEAM_ID;
4254 			BReference<Team> teamReference(team, true);
4255 			TeamLocker teamLocker(team, true);
4256 
4257 			// copy the data
4258 			teamClone->id = team->id;
4259 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4260 			teamClone->group_id = team->group_id;
4261 			teamClone->session_id = team->session_id;
4262 			teamClone->real_uid = team->real_uid;
4263 			teamClone->real_gid = team->real_gid;
4264 			teamClone->effective_uid = team->effective_uid;
4265 			teamClone->effective_gid = team->effective_gid;
4266 
4267 			// also fetch a reference to the I/O context
4268 			ioContext = team->io_context;
4269 			vfs_get_io_context(ioContext);
4270 		}
4271 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4272 			&vfs_put_io_context);
4273 
4274 		// add the basic data to the info message
4275 		if (info.AddInt32("id", teamClone->id) != B_OK
4276 			|| info.AddString("name", teamClone->name) != B_OK
4277 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4278 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4279 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4280 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4281 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4282 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4283 			return B_NO_MEMORY;
4284 		}
4285 
4286 		// get the current working directory from the I/O context
4287 		dev_t cwdDevice;
4288 		ino_t cwdDirectory;
4289 		{
4290 			MutexLocker ioContextLocker(ioContext->io_mutex);
4291 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4292 		}
4293 
4294 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4295 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4296 			return B_NO_MEMORY;
4297 		}
4298 	}
4299 
4300 	// TODO: Support the other flags!
4301 
4302 	// copy the needed size and, if it fits, the message back to userland
4303 	size_t sizeNeeded = info.ContentSize();
4304 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4305 		return B_BAD_ADDRESS;
4306 
4307 	if (sizeNeeded > size)
4308 		return B_BUFFER_OVERFLOW;
4309 
4310 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4311 		return B_BAD_ADDRESS;
4312 
4313 	return B_OK;
4314 }
4315