xref: /haiku/src/system/kernel/team.cpp (revision 820dca4df6c7bf955c46e8f6521b9408f50b2900)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <sys/wait.h>
21 
22 #include <OS.h>
23 
24 #include <AutoDeleter.h>
25 #include <FindDirectory.h>
26 
27 #include <extended_system_info_defs.h>
28 
29 #include <boot_device.h>
30 #include <elf.h>
31 #include <file_cache.h>
32 #include <fs/KPath.h>
33 #include <heap.h>
34 #include <int.h>
35 #include <kernel.h>
36 #include <kimage.h>
37 #include <kscheduler.h>
38 #include <ksignal.h>
39 #include <Notifications.h>
40 #include <port.h>
41 #include <posix/realtime_sem.h>
42 #include <posix/xsi_semaphore.h>
43 #include <sem.h>
44 #include <syscall_process_info.h>
45 #include <syscall_restart.h>
46 #include <syscalls.h>
47 #include <tls.h>
48 #include <tracing.h>
49 #include <user_runtime.h>
50 #include <user_thread.h>
51 #include <usergroup.h>
52 #include <vfs.h>
53 #include <vm/vm.h>
54 #include <vm/VMAddressSpace.h>
55 #include <util/AutoLock.h>
56 
57 #include "TeamThreadTables.h"
58 
59 
60 //#define TRACE_TEAM
61 #ifdef TRACE_TEAM
62 #	define TRACE(x) dprintf x
63 #else
64 #	define TRACE(x) ;
65 #endif
66 
67 
68 struct team_key {
69 	team_id id;
70 };
71 
72 struct team_arg {
73 	char	*path;
74 	char	**flat_args;
75 	size_t	flat_args_size;
76 	uint32	arg_count;
77 	uint32	env_count;
78 	mode_t	umask;
79 	port_id	error_port;
80 	uint32	error_token;
81 };
82 
83 
84 namespace {
85 
86 
87 class TeamNotificationService : public DefaultNotificationService {
88 public:
89 							TeamNotificationService();
90 
91 			void			Notify(uint32 eventCode, Team* team);
92 };
93 
94 
95 // #pragma mark - TeamTable
96 
97 
98 typedef BKernel::TeamThreadTable<Team> TeamTable;
99 
100 
101 // #pragma mark - ProcessGroupHashDefinition
102 
103 
104 struct ProcessGroupHashDefinition {
105 	typedef pid_t			KeyType;
106 	typedef	ProcessGroup	ValueType;
107 
108 	size_t HashKey(pid_t key) const
109 	{
110 		return key;
111 	}
112 
113 	size_t Hash(ProcessGroup* value) const
114 	{
115 		return HashKey(value->id);
116 	}
117 
118 	bool Compare(pid_t key, ProcessGroup* value) const
119 	{
120 		return value->id == key;
121 	}
122 
123 	ProcessGroup*& GetLink(ProcessGroup* value) const
124 	{
125 		return value->next;
126 	}
127 };
128 
129 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
130 
131 
132 }	// unnamed namespace
133 
134 
135 // #pragma mark -
136 
137 
138 // the team_id -> Team hash table and the lock protecting it
139 static TeamTable sTeamHash;
140 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
141 
142 // the pid_t -> ProcessGroup hash table and the lock protecting it
143 static ProcessGroupHashTable sGroupHash;
144 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
145 
146 static Team* sKernelTeam = NULL;
147 
148 // A list of process groups of children of dying session leaders that need to
149 // be signalled, if they have become orphaned and contain stopped processes.
150 static ProcessGroupList sOrphanedCheckProcessGroups;
151 static mutex sOrphanedCheckLock
152 	= MUTEX_INITIALIZER("orphaned process group check");
153 
154 // some arbitrarily chosen limits -- should probably depend on the available
155 // memory (the limit is not yet enforced)
156 static int32 sMaxTeams = 2048;
157 static int32 sUsedTeams = 1;
158 
159 static TeamNotificationService sNotificationService;
160 
161 
162 // #pragma mark - TeamListIterator
163 
164 
165 TeamListIterator::TeamListIterator()
166 {
167 	// queue the entry
168 	InterruptsSpinLocker locker(sTeamHashLock);
169 	sTeamHash.InsertIteratorEntry(&fEntry);
170 }
171 
172 
173 TeamListIterator::~TeamListIterator()
174 {
175 	// remove the entry
176 	InterruptsSpinLocker locker(sTeamHashLock);
177 	sTeamHash.RemoveIteratorEntry(&fEntry);
178 }
179 
180 
181 Team*
182 TeamListIterator::Next()
183 {
184 	// get the next team -- if there is one, get reference for it
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	Team* team = sTeamHash.NextElement(&fEntry);
187 	if (team != NULL)
188 		team->AcquireReference();
189 
190 	return team;
191 }
192 
193 
194 // #pragma mark - Tracing
195 
196 
197 #if TEAM_TRACING
198 namespace TeamTracing {
199 
200 class TeamForked : public AbstractTraceEntry {
201 public:
202 	TeamForked(thread_id forkedThread)
203 		:
204 		fForkedThread(forkedThread)
205 	{
206 		Initialized();
207 	}
208 
209 	virtual void AddDump(TraceOutput& out)
210 	{
211 		out.Print("team forked, new thread %ld", fForkedThread);
212 	}
213 
214 private:
215 	thread_id			fForkedThread;
216 };
217 
218 
219 class ExecTeam : public AbstractTraceEntry {
220 public:
221 	ExecTeam(const char* path, int32 argCount, const char* const* args,
222 			int32 envCount, const char* const* env)
223 		:
224 		fArgCount(argCount),
225 		fArgs(NULL)
226 	{
227 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
228 			false);
229 
230 		// determine the buffer size we need for the args
231 		size_t argBufferSize = 0;
232 		for (int32 i = 0; i < argCount; i++)
233 			argBufferSize += strlen(args[i]) + 1;
234 
235 		// allocate a buffer
236 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
237 		if (fArgs) {
238 			char* buffer = fArgs;
239 			for (int32 i = 0; i < argCount; i++) {
240 				size_t argSize = strlen(args[i]) + 1;
241 				memcpy(buffer, args[i], argSize);
242 				buffer += argSize;
243 			}
244 		}
245 
246 		// ignore env for the time being
247 		(void)envCount;
248 		(void)env;
249 
250 		Initialized();
251 	}
252 
253 	virtual void AddDump(TraceOutput& out)
254 	{
255 		out.Print("team exec, \"%p\", args:", fPath);
256 
257 		if (fArgs != NULL) {
258 			char* args = fArgs;
259 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
260 				out.Print(" \"%s\"", args);
261 				args += strlen(args) + 1;
262 			}
263 		} else
264 			out.Print(" <too long>");
265 	}
266 
267 private:
268 	char*	fPath;
269 	int32	fArgCount;
270 	char*	fArgs;
271 };
272 
273 
274 static const char*
275 job_control_state_name(job_control_state state)
276 {
277 	switch (state) {
278 		case JOB_CONTROL_STATE_NONE:
279 			return "none";
280 		case JOB_CONTROL_STATE_STOPPED:
281 			return "stopped";
282 		case JOB_CONTROL_STATE_CONTINUED:
283 			return "continued";
284 		case JOB_CONTROL_STATE_DEAD:
285 			return "dead";
286 		default:
287 			return "invalid";
288 	}
289 }
290 
291 
292 class SetJobControlState : public AbstractTraceEntry {
293 public:
294 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
295 		:
296 		fTeam(team),
297 		fNewState(newState),
298 		fSignal(signal != NULL ? signal->Number() : 0)
299 	{
300 		Initialized();
301 	}
302 
303 	virtual void AddDump(TraceOutput& out)
304 	{
305 		out.Print("team set job control state, team %ld, "
306 			"new state: %s, signal: %d",
307 			fTeam, job_control_state_name(fNewState), fSignal);
308 	}
309 
310 private:
311 	team_id				fTeam;
312 	job_control_state	fNewState;
313 	int					fSignal;
314 };
315 
316 
317 class WaitForChild : public AbstractTraceEntry {
318 public:
319 	WaitForChild(pid_t child, uint32 flags)
320 		:
321 		fChild(child),
322 		fFlags(flags)
323 	{
324 		Initialized();
325 	}
326 
327 	virtual void AddDump(TraceOutput& out)
328 	{
329 		out.Print("team wait for child, child: %ld, "
330 			"flags: 0x%lx", fChild, fFlags);
331 	}
332 
333 private:
334 	pid_t	fChild;
335 	uint32	fFlags;
336 };
337 
338 
339 class WaitForChildDone : public AbstractTraceEntry {
340 public:
341 	WaitForChildDone(const job_control_entry& entry)
342 		:
343 		fState(entry.state),
344 		fTeam(entry.thread),
345 		fStatus(entry.status),
346 		fReason(entry.reason),
347 		fSignal(entry.signal)
348 	{
349 		Initialized();
350 	}
351 
352 	WaitForChildDone(status_t error)
353 		:
354 		fTeam(error)
355 	{
356 		Initialized();
357 	}
358 
359 	virtual void AddDump(TraceOutput& out)
360 	{
361 		if (fTeam >= 0) {
362 			out.Print("team wait for child done, team: %ld, "
363 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
364 				fTeam, job_control_state_name(fState), fStatus, fReason,
365 				fSignal);
366 		} else {
367 			out.Print("team wait for child failed, error: "
368 				"0x%lx, ", fTeam);
369 		}
370 	}
371 
372 private:
373 	job_control_state	fState;
374 	team_id				fTeam;
375 	status_t			fStatus;
376 	uint16				fReason;
377 	uint16				fSignal;
378 };
379 
380 }	// namespace TeamTracing
381 
382 #	define T(x) new(std::nothrow) TeamTracing::x;
383 #else
384 #	define T(x) ;
385 #endif
386 
387 
388 //	#pragma mark - TeamNotificationService
389 
390 
391 TeamNotificationService::TeamNotificationService()
392 	: DefaultNotificationService("teams")
393 {
394 }
395 
396 
397 void
398 TeamNotificationService::Notify(uint32 eventCode, Team* team)
399 {
400 	char eventBuffer[128];
401 	KMessage event;
402 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
403 	event.AddInt32("event", eventCode);
404 	event.AddInt32("team", team->id);
405 	event.AddPointer("teamStruct", team);
406 
407 	DefaultNotificationService::Notify(event, eventCode);
408 }
409 
410 
411 //	#pragma mark - Team
412 
413 
414 Team::Team(team_id id, bool kernel)
415 {
416 	// allocate an ID
417 	this->id = id;
418 	visible = true;
419 	serial_number = -1;
420 
421 	// init mutex
422 	if (kernel) {
423 		mutex_init(&fLock, "Team:kernel");
424 	} else {
425 		char lockName[16];
426 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
427 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
428 	}
429 
430 	hash_next = siblings_next = children = parent = NULL;
431 	fName[0] = '\0';
432 	fArgs[0] = '\0';
433 	num_threads = 0;
434 	io_context = NULL;
435 	address_space = NULL;
436 	realtime_sem_context = NULL;
437 	xsi_sem_context = NULL;
438 	thread_list = NULL;
439 	main_thread = NULL;
440 	loading_info = NULL;
441 	state = TEAM_STATE_BIRTH;
442 	flags = 0;
443 	death_entry = NULL;
444 	user_data_area = -1;
445 	user_data = 0;
446 	used_user_data = 0;
447 	user_data_size = 0;
448 	free_user_threads = NULL;
449 
450 	supplementary_groups = NULL;
451 	supplementary_group_count = 0;
452 
453 	dead_threads_kernel_time = 0;
454 	dead_threads_user_time = 0;
455 	cpu_clock_offset = 0;
456 
457 	// dead threads
458 	list_init(&dead_threads);
459 	dead_threads_count = 0;
460 
461 	// dead children
462 	dead_children.count = 0;
463 	dead_children.kernel_time = 0;
464 	dead_children.user_time = 0;
465 
466 	// job control entry
467 	job_control_entry = new(nothrow) ::job_control_entry;
468 	if (job_control_entry != NULL) {
469 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
470 		job_control_entry->thread = id;
471 		job_control_entry->team = this;
472 	}
473 
474 	// exit status -- setting initialized to false suffices
475 	exit.initialized = false;
476 
477 	list_init(&sem_list);
478 	list_init(&port_list);
479 	list_init(&image_list);
480 	list_init(&watcher_list);
481 
482 	clear_team_debug_info(&debug_info, true);
483 
484 	// init dead/stopped/continued children condition vars
485 	dead_children.condition_variable.Init(&dead_children, "team children");
486 
487 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
488 		kernel ? -1 : MAX_QUEUED_SIGNALS);
489 	memset(fSignalActions, 0, sizeof(fSignalActions));
490 
491 	fUserDefinedTimerCount = 0;
492 }
493 
494 
495 Team::~Team()
496 {
497 	// get rid of all associated data
498 	PrepareForDeletion();
499 
500 	vfs_put_io_context(io_context);
501 	delete_owned_ports(this);
502 	sem_delete_owned_sems(this);
503 
504 	DeleteUserTimers(false);
505 
506 	fPendingSignals.Clear();
507 
508 	if (fQueuedSignalsCounter != NULL)
509 		fQueuedSignalsCounter->ReleaseReference();
510 
511 	while (thread_death_entry* threadDeathEntry
512 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
513 		free(threadDeathEntry);
514 	}
515 
516 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
517 		delete entry;
518 
519 	while (free_user_thread* entry = free_user_threads) {
520 		free_user_threads = entry->next;
521 		free(entry);
522 	}
523 
524 	malloc_referenced_release(supplementary_groups);
525 
526 	delete job_control_entry;
527 		// usually already NULL and transferred to the parent
528 
529 	mutex_destroy(&fLock);
530 }
531 
532 
533 /*static*/ Team*
534 Team::Create(team_id id, const char* name, bool kernel)
535 {
536 	// create the team object
537 	Team* team = new(std::nothrow) Team(id, kernel);
538 	if (team == NULL)
539 		return NULL;
540 	ObjectDeleter<Team> teamDeleter(team);
541 
542 	if (name != NULL)
543 		team->SetName(name);
544 
545 	// check initialization
546 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
547 		return NULL;
548 
549 	// finish initialization (arch specifics)
550 	if (arch_team_init_team_struct(team, kernel) != B_OK)
551 		return NULL;
552 
553 	if (!kernel) {
554 		status_t error = user_timer_create_team_timers(team);
555 		if (error != B_OK)
556 			return NULL;
557 	}
558 
559 	// everything went fine
560 	return teamDeleter.Detach();
561 }
562 
563 
564 /*!	\brief Returns the team with the given ID.
565 	Returns a reference to the team.
566 	Team and thread spinlock must not be held.
567 */
568 /*static*/ Team*
569 Team::Get(team_id id)
570 {
571 	if (id == B_CURRENT_TEAM) {
572 		Team* team = thread_get_current_thread()->team;
573 		team->AcquireReference();
574 		return team;
575 	}
576 
577 	InterruptsSpinLocker locker(sTeamHashLock);
578 	Team* team = sTeamHash.Lookup(id);
579 	if (team != NULL)
580 		team->AcquireReference();
581 	return team;
582 }
583 
584 
585 /*!	\brief Returns the team with the given ID in a locked state.
586 	Returns a reference to the team.
587 	Team and thread spinlock must not be held.
588 */
589 /*static*/ Team*
590 Team::GetAndLock(team_id id)
591 {
592 	// get the team
593 	Team* team = Get(id);
594 	if (team == NULL)
595 		return NULL;
596 
597 	// lock it
598 	team->Lock();
599 
600 	// only return the team, when it isn't already dying
601 	if (team->state >= TEAM_STATE_SHUTDOWN) {
602 		team->Unlock();
603 		team->ReleaseReference();
604 		return NULL;
605 	}
606 
607 	return team;
608 }
609 
610 
611 /*!	Locks the team and its parent team (if any).
612 	The caller must hold a reference to the team or otherwise make sure that
613 	it won't be deleted.
614 	If the team doesn't have a parent, only the team itself is locked. If the
615 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
616 	only the team itself is locked.
617 
618 	\param dontLockParentIfKernel If \c true, the team's parent team is only
619 		locked, if it is not the kernel team.
620 */
621 void
622 Team::LockTeamAndParent(bool dontLockParentIfKernel)
623 {
624 	// The locking order is parent -> child. Since the parent can change as long
625 	// as we don't lock the team, we need to do a trial and error loop.
626 	Lock();
627 
628 	while (true) {
629 		// If the team doesn't have a parent, we're done. Otherwise try to lock
630 		// the parent.This will succeed in most cases, simplifying things.
631 		Team* parent = this->parent;
632 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
633 			|| parent->TryLock()) {
634 			return;
635 		}
636 
637 		// get a temporary reference to the parent, unlock this team, lock the
638 		// parent, and re-lock this team
639 		BReference<Team> parentReference(parent);
640 
641 		Unlock();
642 		parent->Lock();
643 		Lock();
644 
645 		// If the parent hasn't changed in the meantime, we're done.
646 		if (this->parent == parent)
647 			return;
648 
649 		// The parent has changed -- unlock and retry.
650 		parent->Unlock();
651 	}
652 }
653 
654 
655 /*!	Unlocks the team and its parent team (if any).
656 */
657 void
658 Team::UnlockTeamAndParent()
659 {
660 	if (parent != NULL)
661 		parent->Unlock();
662 
663 	Unlock();
664 }
665 
666 
667 /*!	Locks the team, its parent team (if any), and the team's process group.
668 	The caller must hold a reference to the team or otherwise make sure that
669 	it won't be deleted.
670 	If the team doesn't have a parent, only the team itself is locked.
671 */
672 void
673 Team::LockTeamParentAndProcessGroup()
674 {
675 	LockTeamAndProcessGroup();
676 
677 	// We hold the group's and the team's lock, but not the parent team's lock.
678 	// If we have a parent, try to lock it.
679 	if (this->parent == NULL || this->parent->TryLock())
680 		return;
681 
682 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
683 	// the job.
684 	Unlock();
685 	LockTeamAndParent(false);
686 }
687 
688 
689 /*!	Unlocks the team, its parent team (if any), and the team's process group.
690 */
691 void
692 Team::UnlockTeamParentAndProcessGroup()
693 {
694 	group->Unlock();
695 
696 	if (parent != NULL)
697 		parent->Unlock();
698 
699 	Unlock();
700 }
701 
702 
703 void
704 Team::LockTeamAndProcessGroup()
705 {
706 	// The locking order is process group -> child. Since the process group can
707 	// change as long as we don't lock the team, we need to do a trial and error
708 	// loop.
709 	Lock();
710 
711 	while (true) {
712 		// Try to lock the group. This will succeed in most cases, simplifying
713 		// things.
714 		ProcessGroup* group = this->group;
715 		if (group->TryLock())
716 			return;
717 
718 		// get a temporary reference to the group, unlock this team, lock the
719 		// group, and re-lock this team
720 		BReference<ProcessGroup> groupReference(group);
721 
722 		Unlock();
723 		group->Lock();
724 		Lock();
725 
726 		// If the group hasn't changed in the meantime, we're done.
727 		if (this->group == group)
728 			return;
729 
730 		// The group has changed -- unlock and retry.
731 		group->Unlock();
732 	}
733 }
734 
735 
736 void
737 Team::UnlockTeamAndProcessGroup()
738 {
739 	group->Unlock();
740 	Unlock();
741 }
742 
743 
744 void
745 Team::SetName(const char* name)
746 {
747 	if (const char* lastSlash = strrchr(name, '/'))
748 		name = lastSlash + 1;
749 
750 	strlcpy(fName, name, B_OS_NAME_LENGTH);
751 }
752 
753 
754 void
755 Team::SetArgs(const char* args)
756 {
757 	strlcpy(fArgs, args, sizeof(fArgs));
758 }
759 
760 
761 void
762 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
763 {
764 	fArgs[0] = '\0';
765 	strlcpy(fArgs, path, sizeof(fArgs));
766 	for (int i = 0; i < otherArgCount; i++) {
767 		strlcat(fArgs, " ", sizeof(fArgs));
768 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
769 	}
770 }
771 
772 
773 void
774 Team::ResetSignalsOnExec()
775 {
776 	// We are supposed to keep pending signals. Signal actions shall be reset
777 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
778 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
779 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
780 	// flags, but since there aren't any handlers, they make little sense, so
781 	// we clear them.
782 
783 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
784 		struct sigaction& action = SignalActionFor(i);
785 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
786 			action.sa_handler = SIG_DFL;
787 
788 		action.sa_mask = 0;
789 		action.sa_flags = 0;
790 		action.sa_userdata = NULL;
791 	}
792 }
793 
794 
795 void
796 Team::InheritSignalActions(Team* parent)
797 {
798 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
799 }
800 
801 
802 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
803 	ID.
804 
805 	The caller must hold the team's lock.
806 
807 	\param timer The timer to be added. If it doesn't have an ID yet, it is
808 		considered user-defined and will be assigned an ID.
809 	\return \c B_OK, if the timer was added successfully, another error code
810 		otherwise.
811 */
812 status_t
813 Team::AddUserTimer(UserTimer* timer)
814 {
815 	// don't allow addition of timers when already shutting the team down
816 	if (state >= TEAM_STATE_SHUTDOWN)
817 		return B_BAD_TEAM_ID;
818 
819 	// If the timer is user-defined, check timer limit and increment
820 	// user-defined count.
821 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
822 		return EAGAIN;
823 
824 	fUserTimers.AddTimer(timer);
825 
826 	return B_OK;
827 }
828 
829 
830 /*!	Removes the given user timer from the team.
831 
832 	The caller must hold the team's lock.
833 
834 	\param timer The timer to be removed.
835 
836 */
837 void
838 Team::RemoveUserTimer(UserTimer* timer)
839 {
840 	fUserTimers.RemoveTimer(timer);
841 
842 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
843 		UserDefinedTimersRemoved(1);
844 }
845 
846 
847 /*!	Deletes all (or all user-defined) user timers of the team.
848 
849 	Timer's belonging to the team's threads are not affected.
850 	The caller must hold the team's lock.
851 
852 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
853 		otherwise all timers are deleted.
854 */
855 void
856 Team::DeleteUserTimers(bool userDefinedOnly)
857 {
858 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
859 	UserDefinedTimersRemoved(count);
860 }
861 
862 
863 /*!	If not at the limit yet, increments the team's user-defined timer count.
864 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
865 */
866 bool
867 Team::CheckAddUserDefinedTimer()
868 {
869 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
870 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
871 		atomic_add(&fUserDefinedTimerCount, -1);
872 		return false;
873 	}
874 
875 	return true;
876 }
877 
878 
879 /*!	Subtracts the given count for the team's user-defined timer count.
880 	\param count The count to subtract.
881 */
882 void
883 Team::UserDefinedTimersRemoved(int32 count)
884 {
885 	atomic_add(&fUserDefinedTimerCount, -count);
886 }
887 
888 
889 void
890 Team::DeactivateCPUTimeUserTimers()
891 {
892 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
893 		timer->Deactivate();
894 
895 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
896 		timer->Deactivate();
897 }
898 
899 
900 /*!	Returns the team's current total CPU time (kernel + user + offset).
901 
902 	The caller must hold the scheduler lock.
903 
904 	\param ignoreCurrentRun If \c true and the current thread is one team's
905 		threads, don't add the time since the last time \c last_time was
906 		updated. Should be used in "thread unscheduled" scheduler callbacks,
907 		since although the thread is still running at that time, its time has
908 		already been stopped.
909 	\return The team's current total CPU time.
910 */
911 bigtime_t
912 Team::CPUTime(bool ignoreCurrentRun) const
913 {
914 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
915 		+ dead_threads_user_time;
916 
917 	Thread* currentThread = thread_get_current_thread();
918 	bigtime_t now = system_time();
919 
920 	for (Thread* thread = thread_list; thread != NULL;
921 			thread = thread->team_next) {
922 		SpinLocker threadTimeLocker(thread->time_lock);
923 		time += thread->kernel_time + thread->user_time;
924 
925 		if (thread->IsRunning()) {
926 			if (!ignoreCurrentRun || thread != currentThread)
927 				time += now - thread->last_time;
928 		}
929 	}
930 
931 	return time;
932 }
933 
934 
935 /*!	Returns the team's current user CPU time.
936 
937 	The caller must hold the scheduler lock.
938 
939 	\return The team's current user CPU time.
940 */
941 bigtime_t
942 Team::UserCPUTime() const
943 {
944 	bigtime_t time = dead_threads_user_time;
945 
946 	bigtime_t now = system_time();
947 
948 	for (Thread* thread = thread_list; thread != NULL;
949 			thread = thread->team_next) {
950 		SpinLocker threadTimeLocker(thread->time_lock);
951 		time += thread->user_time;
952 
953 		if (thread->IsRunning() && !thread->in_kernel)
954 			time += now - thread->last_time;
955 	}
956 
957 	return time;
958 }
959 
960 
961 //	#pragma mark - ProcessGroup
962 
963 
964 ProcessGroup::ProcessGroup(pid_t id)
965 	:
966 	id(id),
967 	teams(NULL),
968 	fSession(NULL),
969 	fInOrphanedCheckList(false)
970 {
971 	char lockName[32];
972 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
973 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
974 }
975 
976 
977 ProcessGroup::~ProcessGroup()
978 {
979 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
980 
981 	// If the group is in the orphaned check list, remove it.
982 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
983 
984 	if (fInOrphanedCheckList)
985 		sOrphanedCheckProcessGroups.Remove(this);
986 
987 	orphanedCheckLocker.Unlock();
988 
989 	// remove group from the hash table and from the session
990 	if (fSession != NULL) {
991 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
992 		sGroupHash.RemoveUnchecked(this);
993 		groupHashLocker.Unlock();
994 
995 		fSession->ReleaseReference();
996 	}
997 
998 	mutex_destroy(&fLock);
999 }
1000 
1001 
1002 /*static*/ ProcessGroup*
1003 ProcessGroup::Get(pid_t id)
1004 {
1005 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1006 	ProcessGroup* group = sGroupHash.Lookup(id);
1007 	if (group != NULL)
1008 		group->AcquireReference();
1009 	return group;
1010 }
1011 
1012 
1013 /*!	Adds the group the given session and makes it publicly accessible.
1014 	The caller must not hold the process group hash lock.
1015 */
1016 void
1017 ProcessGroup::Publish(ProcessSession* session)
1018 {
1019 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1020 	PublishLocked(session);
1021 }
1022 
1023 
1024 /*!	Adds the group to the given session and makes it publicly accessible.
1025 	The caller must hold the process group hash lock.
1026 */
1027 void
1028 ProcessGroup::PublishLocked(ProcessSession* session)
1029 {
1030 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1031 
1032 	fSession = session;
1033 	fSession->AcquireReference();
1034 
1035 	sGroupHash.InsertUnchecked(this);
1036 }
1037 
1038 
1039 /*!	Checks whether the process group is orphaned.
1040 	The caller must hold the group's lock.
1041 	\return \c true, if the group is orphaned, \c false otherwise.
1042 */
1043 bool
1044 ProcessGroup::IsOrphaned() const
1045 {
1046 	// Orphaned Process Group: "A process group in which the parent of every
1047 	// member is either itself a member of the group or is not a member of the
1048 	// group's session." (Open Group Base Specs Issue 7)
1049 	bool orphaned = true;
1050 
1051 	Team* team = teams;
1052 	while (orphaned && team != NULL) {
1053 		team->LockTeamAndParent(false);
1054 
1055 		Team* parent = team->parent;
1056 		if (parent != NULL && parent->group_id != id
1057 			&& parent->session_id == fSession->id) {
1058 			orphaned = false;
1059 		}
1060 
1061 		team->UnlockTeamAndParent();
1062 
1063 		team = team->group_next;
1064 	}
1065 
1066 	return orphaned;
1067 }
1068 
1069 
1070 void
1071 ProcessGroup::ScheduleOrphanedCheck()
1072 {
1073 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1074 
1075 	if (!fInOrphanedCheckList) {
1076 		sOrphanedCheckProcessGroups.Add(this);
1077 		fInOrphanedCheckList = true;
1078 	}
1079 }
1080 
1081 
1082 void
1083 ProcessGroup::UnsetOrphanedCheck()
1084 {
1085 	fInOrphanedCheckList = false;
1086 }
1087 
1088 
1089 //	#pragma mark - ProcessSession
1090 
1091 
1092 ProcessSession::ProcessSession(pid_t id)
1093 	:
1094 	id(id),
1095 	controlling_tty(-1),
1096 	foreground_group(-1)
1097 {
1098 	char lockName[32];
1099 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1100 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1101 }
1102 
1103 
1104 ProcessSession::~ProcessSession()
1105 {
1106 	mutex_destroy(&fLock);
1107 }
1108 
1109 
1110 //	#pragma mark - KDL functions
1111 
1112 
1113 static void
1114 _dump_team_info(Team* team)
1115 {
1116 	kprintf("TEAM: %p\n", team);
1117 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1118 		team->id);
1119 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1120 	kprintf("name:             '%s'\n", team->Name());
1121 	kprintf("args:             '%s'\n", team->Args());
1122 	kprintf("hash_next:        %p\n", team->hash_next);
1123 	kprintf("parent:           %p", team->parent);
1124 	if (team->parent != NULL) {
1125 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1126 	} else
1127 		kprintf("\n");
1128 
1129 	kprintf("children:         %p\n", team->children);
1130 	kprintf("num_threads:      %d\n", team->num_threads);
1131 	kprintf("state:            %d\n", team->state);
1132 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1133 	kprintf("io_context:       %p\n", team->io_context);
1134 	if (team->address_space)
1135 		kprintf("address_space:    %p\n", team->address_space);
1136 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1137 		(void*)team->user_data, team->user_data_area);
1138 	kprintf("free user thread: %p\n", team->free_user_threads);
1139 	kprintf("main_thread:      %p\n", team->main_thread);
1140 	kprintf("thread_list:      %p\n", team->thread_list);
1141 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1142 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1143 }
1144 
1145 
1146 static int
1147 dump_team_info(int argc, char** argv)
1148 {
1149 	ulong arg;
1150 	bool found = false;
1151 
1152 	if (argc < 2) {
1153 		Thread* thread = thread_get_current_thread();
1154 		if (thread != NULL && thread->team != NULL)
1155 			_dump_team_info(thread->team);
1156 		else
1157 			kprintf("No current team!\n");
1158 		return 0;
1159 	}
1160 
1161 	arg = strtoul(argv[1], NULL, 0);
1162 	if (IS_KERNEL_ADDRESS(arg)) {
1163 		// semi-hack
1164 		_dump_team_info((Team*)arg);
1165 		return 0;
1166 	}
1167 
1168 	// walk through the thread list, trying to match name or id
1169 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1170 		Team* team = it.Next();) {
1171 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1172 			|| team->id == (team_id)arg) {
1173 			_dump_team_info(team);
1174 			found = true;
1175 			break;
1176 		}
1177 	}
1178 
1179 	if (!found)
1180 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1181 	return 0;
1182 }
1183 
1184 
1185 static int
1186 dump_teams(int argc, char** argv)
1187 {
1188 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1189 		B_PRINTF_POINTER_WIDTH, "parent");
1190 
1191 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1192 		Team* team = it.Next();) {
1193 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 
1200 //	#pragma mark - Private functions
1201 
1202 
1203 /*!	Inserts team \a team into the child list of team \a parent.
1204 
1205 	The caller must hold the lock of both \a parent and \a team.
1206 
1207 	\param parent The parent team.
1208 	\param team The team to be inserted into \a parent's child list.
1209 */
1210 static void
1211 insert_team_into_parent(Team* parent, Team* team)
1212 {
1213 	ASSERT(parent != NULL);
1214 
1215 	team->siblings_next = parent->children;
1216 	parent->children = team;
1217 	team->parent = parent;
1218 }
1219 
1220 
1221 /*!	Removes team \a team from the child list of team \a parent.
1222 
1223 	The caller must hold the lock of both \a parent and \a team.
1224 
1225 	\param parent The parent team.
1226 	\param team The team to be removed from \a parent's child list.
1227 */
1228 static void
1229 remove_team_from_parent(Team* parent, Team* team)
1230 {
1231 	Team* child;
1232 	Team* last = NULL;
1233 
1234 	for (child = parent->children; child != NULL;
1235 			child = child->siblings_next) {
1236 		if (child == team) {
1237 			if (last == NULL)
1238 				parent->children = child->siblings_next;
1239 			else
1240 				last->siblings_next = child->siblings_next;
1241 
1242 			team->parent = NULL;
1243 			break;
1244 		}
1245 		last = child;
1246 	}
1247 }
1248 
1249 
1250 /*!	Returns whether the given team is a session leader.
1251 	The caller must hold the team's lock or its process group's lock.
1252 */
1253 static bool
1254 is_session_leader(Team* team)
1255 {
1256 	return team->session_id == team->id;
1257 }
1258 
1259 
1260 /*!	Returns whether the given team is a process group leader.
1261 	The caller must hold the team's lock or its process group's lock.
1262 */
1263 static bool
1264 is_process_group_leader(Team* team)
1265 {
1266 	return team->group_id == team->id;
1267 }
1268 
1269 
1270 /*!	Inserts the given team into the given process group.
1271 	The caller must hold the process group's lock, the team's lock, and the
1272 	team's parent's lock.
1273 */
1274 static void
1275 insert_team_into_group(ProcessGroup* group, Team* team)
1276 {
1277 	team->group = group;
1278 	team->group_id = group->id;
1279 	team->session_id = group->Session()->id;
1280 
1281 	team->group_next = group->teams;
1282 	group->teams = team;
1283 	group->AcquireReference();
1284 }
1285 
1286 
1287 /*!	Removes the given team from its process group.
1288 
1289 	The caller must hold the process group's lock, the team's lock, and the
1290 	team's parent's lock. Interrupts must be enabled.
1291 
1292 	\param team The team that'll be removed from its process group.
1293 */
1294 static void
1295 remove_team_from_group(Team* team)
1296 {
1297 	ProcessGroup* group = team->group;
1298 	Team* current;
1299 	Team* last = NULL;
1300 
1301 	// the team must be in a process group to let this function have any effect
1302 	if  (group == NULL)
1303 		return;
1304 
1305 	for (current = group->teams; current != NULL;
1306 			current = current->group_next) {
1307 		if (current == team) {
1308 			if (last == NULL)
1309 				group->teams = current->group_next;
1310 			else
1311 				last->group_next = current->group_next;
1312 
1313 			team->group = NULL;
1314 			break;
1315 		}
1316 		last = current;
1317 	}
1318 
1319 	team->group = NULL;
1320 	team->group_next = NULL;
1321 
1322 	group->ReleaseReference();
1323 }
1324 
1325 
1326 static status_t
1327 create_team_user_data(Team* team)
1328 {
1329 	void* address;
1330 	size_t size = 4 * B_PAGE_SIZE;
1331 	virtual_address_restrictions virtualRestrictions = {};
1332 	virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1333 	virtualRestrictions.address_specification = B_BASE_ADDRESS;
1334 	physical_address_restrictions physicalRestrictions = {};
1335 	team->user_data_area = create_area_etc(team->id, "user area", size,
1336 		B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0, &virtualRestrictions,
1337 		&physicalRestrictions, &address);
1338 	if (team->user_data_area < 0)
1339 		return team->user_data_area;
1340 
1341 	team->user_data = (addr_t)address;
1342 	team->used_user_data = 0;
1343 	team->user_data_size = size;
1344 	team->free_user_threads = NULL;
1345 
1346 	return B_OK;
1347 }
1348 
1349 
1350 static void
1351 delete_team_user_data(Team* team)
1352 {
1353 	if (team->user_data_area >= 0) {
1354 		vm_delete_area(team->id, team->user_data_area, true);
1355 		team->user_data = 0;
1356 		team->used_user_data = 0;
1357 		team->user_data_size = 0;
1358 		team->user_data_area = -1;
1359 		while (free_user_thread* entry = team->free_user_threads) {
1360 			team->free_user_threads = entry->next;
1361 			free(entry);
1362 		}
1363 	}
1364 }
1365 
1366 
1367 static status_t
1368 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1369 	int32 argCount, int32 envCount, char**& _flatArgs)
1370 {
1371 	if (argCount < 0 || envCount < 0)
1372 		return B_BAD_VALUE;
1373 
1374 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1375 		return B_TOO_MANY_ARGS;
1376 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1377 		return B_BAD_VALUE;
1378 
1379 	if (!IS_USER_ADDRESS(userFlatArgs))
1380 		return B_BAD_ADDRESS;
1381 
1382 	// allocate kernel memory
1383 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1384 	if (flatArgs == NULL)
1385 		return B_NO_MEMORY;
1386 
1387 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1388 		free(flatArgs);
1389 		return B_BAD_ADDRESS;
1390 	}
1391 
1392 	// check and relocate the array
1393 	status_t error = B_OK;
1394 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1395 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1396 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1397 		if (i == argCount || i == argCount + envCount + 1) {
1398 			// check array null termination
1399 			if (flatArgs[i] != NULL) {
1400 				error = B_BAD_VALUE;
1401 				break;
1402 			}
1403 		} else {
1404 			// check string
1405 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1406 			size_t maxLen = stringEnd - arg;
1407 			if (arg < stringBase || arg >= stringEnd
1408 					|| strnlen(arg, maxLen) == maxLen) {
1409 				error = B_BAD_VALUE;
1410 				break;
1411 			}
1412 
1413 			flatArgs[i] = arg;
1414 		}
1415 	}
1416 
1417 	if (error == B_OK)
1418 		_flatArgs = flatArgs;
1419 	else
1420 		free(flatArgs);
1421 
1422 	return error;
1423 }
1424 
1425 
1426 static void
1427 free_team_arg(struct team_arg* teamArg)
1428 {
1429 	if (teamArg != NULL) {
1430 		free(teamArg->flat_args);
1431 		free(teamArg->path);
1432 		free(teamArg);
1433 	}
1434 }
1435 
1436 
1437 static status_t
1438 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1439 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1440 	port_id port, uint32 token)
1441 {
1442 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1443 	if (teamArg == NULL)
1444 		return B_NO_MEMORY;
1445 
1446 	teamArg->path = strdup(path);
1447 	if (teamArg->path == NULL) {
1448 		free(teamArg);
1449 		return B_NO_MEMORY;
1450 	}
1451 
1452 	// copy the args over
1453 
1454 	teamArg->flat_args = flatArgs;
1455 	teamArg->flat_args_size = flatArgsSize;
1456 	teamArg->arg_count = argCount;
1457 	teamArg->env_count = envCount;
1458 	teamArg->umask = umask;
1459 	teamArg->error_port = port;
1460 	teamArg->error_token = token;
1461 
1462 	*_teamArg = teamArg;
1463 	return B_OK;
1464 }
1465 
1466 
1467 static status_t
1468 team_create_thread_start_internal(void* args)
1469 {
1470 	status_t err;
1471 	Thread* thread;
1472 	Team* team;
1473 	struct team_arg* teamArgs = (struct team_arg*)args;
1474 	const char* path;
1475 	addr_t entry;
1476 	char** userArgs;
1477 	char** userEnv;
1478 	struct user_space_program_args* programArgs;
1479 	uint32 argCount, envCount;
1480 
1481 	thread = thread_get_current_thread();
1482 	team = thread->team;
1483 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1484 
1485 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1486 		thread->id));
1487 
1488 	// Main stack area layout is currently as follows (starting from 0):
1489 	//
1490 	// size								| usage
1491 	// ---------------------------------+--------------------------------
1492 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1493 	// TLS_SIZE							| TLS data
1494 	// sizeof(user_space_program_args)	| argument structure for the runtime
1495 	//									| loader
1496 	// flat arguments size				| flat process arguments and environment
1497 
1498 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1499 	// the heap
1500 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1501 
1502 	argCount = teamArgs->arg_count;
1503 	envCount = teamArgs->env_count;
1504 
1505 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1506 		+ thread->user_stack_size + TLS_SIZE);
1507 
1508 	userArgs = (char**)(programArgs + 1);
1509 	userEnv = userArgs + argCount + 1;
1510 	path = teamArgs->path;
1511 
1512 	if (user_strlcpy(programArgs->program_path, path,
1513 				sizeof(programArgs->program_path)) < B_OK
1514 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1515 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1516 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1517 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1518 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1519 				sizeof(port_id)) < B_OK
1520 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1521 				sizeof(uint32)) < B_OK
1522 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1523 		|| user_memcpy(userArgs, teamArgs->flat_args,
1524 				teamArgs->flat_args_size) < B_OK) {
1525 		// the team deletion process will clean this mess
1526 		free_team_arg(teamArgs);
1527 		return B_BAD_ADDRESS;
1528 	}
1529 
1530 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1531 
1532 	// set team args and update state
1533 	team->Lock();
1534 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1535 	team->state = TEAM_STATE_NORMAL;
1536 	team->Unlock();
1537 
1538 	free_team_arg(teamArgs);
1539 		// the arguments are already on the user stack, we no longer need
1540 		// them in this form
1541 
1542 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1543 	// automatic variables with function scope will never be destroyed.
1544 	{
1545 		// find runtime_loader path
1546 		KPath runtimeLoaderPath;
1547 		err = find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1548 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1549 		if (err < B_OK) {
1550 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1551 				strerror(err)));
1552 			return err;
1553 		}
1554 		runtimeLoaderPath.UnlockBuffer();
1555 		err = runtimeLoaderPath.Append("runtime_loader");
1556 
1557 		if (err == B_OK) {
1558 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1559 				&entry);
1560 		}
1561 	}
1562 
1563 	if (err < B_OK) {
1564 		// Luckily, we don't have to clean up the mess we created - that's
1565 		// done for us by the normal team deletion process
1566 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1567 			"%s\n", strerror(err)));
1568 		return err;
1569 	}
1570 
1571 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1572 
1573 	// enter userspace -- returns only in case of error
1574 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1575 		programArgs, NULL);
1576 }
1577 
1578 
1579 static status_t
1580 team_create_thread_start(void* args)
1581 {
1582 	team_create_thread_start_internal(args);
1583 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1584 	thread_exit();
1585 		// does not return
1586 	return B_OK;
1587 }
1588 
1589 
1590 static thread_id
1591 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1592 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1593 	port_id errorPort, uint32 errorToken)
1594 {
1595 	char** flatArgs = _flatArgs;
1596 	thread_id thread;
1597 	status_t status;
1598 	struct team_arg* teamArgs;
1599 	struct team_loading_info loadingInfo;
1600 	io_context* parentIOContext = NULL;
1601 	team_id teamID;
1602 
1603 	if (flatArgs == NULL || argCount == 0)
1604 		return B_BAD_VALUE;
1605 
1606 	const char* path = flatArgs[0];
1607 
1608 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1609 		"\n", path, flatArgs, argCount));
1610 
1611 	// cut the path from the main thread name
1612 	const char* threadName = strrchr(path, '/');
1613 	if (threadName != NULL)
1614 		threadName++;
1615 	else
1616 		threadName = path;
1617 
1618 	// create the main thread object
1619 	Thread* mainThread;
1620 	status = Thread::Create(threadName, mainThread);
1621 	if (status != B_OK)
1622 		return status;
1623 	BReference<Thread> mainThreadReference(mainThread, true);
1624 
1625 	// create team object
1626 	Team* team = Team::Create(mainThread->id, path, false);
1627 	if (team == NULL)
1628 		return B_NO_MEMORY;
1629 	BReference<Team> teamReference(team, true);
1630 
1631 	if (flags & B_WAIT_TILL_LOADED) {
1632 		loadingInfo.thread = thread_get_current_thread();
1633 		loadingInfo.result = B_ERROR;
1634 		loadingInfo.done = false;
1635 		team->loading_info = &loadingInfo;
1636 	}
1637 
1638 	// get the parent team
1639 	Team* parent = Team::Get(parentID);
1640 	if (parent == NULL)
1641 		return B_BAD_TEAM_ID;
1642 	BReference<Team> parentReference(parent, true);
1643 
1644 	parent->LockTeamAndProcessGroup();
1645 	team->Lock();
1646 
1647 	// inherit the parent's user/group
1648 	inherit_parent_user_and_group(team, parent);
1649 
1650  	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1651 
1652 	sTeamHash.Insert(team);
1653 	sUsedTeams++;
1654 
1655 	teamsLocker.Unlock();
1656 
1657 	insert_team_into_parent(parent, team);
1658 	insert_team_into_group(parent->group, team);
1659 
1660 	// get a reference to the parent's I/O context -- we need it to create ours
1661 	parentIOContext = parent->io_context;
1662 	vfs_get_io_context(parentIOContext);
1663 
1664 	team->Unlock();
1665 	parent->UnlockTeamAndProcessGroup();
1666 
1667 	// notify team listeners
1668 	sNotificationService.Notify(TEAM_ADDED, team);
1669 
1670 	// check the executable's set-user/group-id permission
1671 	update_set_id_user_and_group(team, path);
1672 
1673 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1674 		envCount, (mode_t)-1, errorPort, errorToken);
1675 	if (status != B_OK)
1676 		goto err1;
1677 
1678 	_flatArgs = NULL;
1679 		// args are owned by the team_arg structure now
1680 
1681 	// create a new io_context for this team
1682 	team->io_context = vfs_new_io_context(parentIOContext, true);
1683 	if (!team->io_context) {
1684 		status = B_NO_MEMORY;
1685 		goto err2;
1686 	}
1687 
1688 	// We don't need the parent's I/O context any longer.
1689 	vfs_put_io_context(parentIOContext);
1690 	parentIOContext = NULL;
1691 
1692 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1693 	vfs_exec_io_context(team->io_context);
1694 
1695 	// create an address space for this team
1696 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1697 		&team->address_space);
1698 	if (status != B_OK)
1699 		goto err3;
1700 
1701 	// create the user data area
1702 	status = create_team_user_data(team);
1703 	if (status != B_OK)
1704 		goto err4;
1705 
1706 	// In case we start the main thread, we shouldn't access the team object
1707 	// afterwards, so cache the team's ID.
1708 	teamID = team->id;
1709 
1710 	// Create a kernel thread, but under the context of the new team
1711 	// The new thread will take over ownership of teamArgs.
1712 	{
1713 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1714 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1715 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1716 			+ teamArgs->flat_args_size;
1717 		thread = thread_create_thread(threadAttributes, false);
1718 		if (thread < 0) {
1719 			status = thread;
1720 			goto err5;
1721 		}
1722 	}
1723 
1724 	// The team has been created successfully, so we keep the reference. Or
1725 	// more precisely: It's owned by the team's main thread, now.
1726 	teamReference.Detach();
1727 
1728 	// wait for the loader of the new team to finish its work
1729 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1730 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1731 
1732 		// resume the team's main thread
1733 		if (mainThread != NULL && mainThread->state == B_THREAD_SUSPENDED)
1734 			scheduler_enqueue_in_run_queue(mainThread);
1735 
1736 		// Now suspend ourselves until loading is finished. We will be woken
1737 		// either by the thread, when it finished or aborted loading, or when
1738 		// the team is going to die (e.g. is killed). In either case the one
1739 		// setting `loadingInfo.done' is responsible for removing the info from
1740 		// the team structure.
1741 		while (!loadingInfo.done) {
1742 			thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1743 			scheduler_reschedule();
1744 		}
1745 
1746 		schedulerLocker.Unlock();
1747 
1748 		if (loadingInfo.result < B_OK)
1749 			return loadingInfo.result;
1750 	}
1751 
1752 	// notify the debugger
1753 	user_debug_team_created(teamID);
1754 
1755 	return thread;
1756 
1757 err5:
1758 	delete_team_user_data(team);
1759 err4:
1760 	team->address_space->Put();
1761 err3:
1762 	vfs_put_io_context(team->io_context);
1763 err2:
1764 	free_team_arg(teamArgs);
1765 err1:
1766 	if (parentIOContext != NULL)
1767 		vfs_put_io_context(parentIOContext);
1768 
1769 	// Remove the team structure from the process group, the parent team, and
1770 	// the team hash table and delete the team structure.
1771 	parent->LockTeamAndProcessGroup();
1772 	team->Lock();
1773 
1774 	remove_team_from_group(team);
1775 	remove_team_from_parent(team->parent, team);
1776 
1777 	team->Unlock();
1778 	parent->UnlockTeamAndProcessGroup();
1779 
1780 	teamsLocker.Lock();
1781 	sTeamHash.Remove(team);
1782 	teamsLocker.Unlock();
1783 
1784 	sNotificationService.Notify(TEAM_REMOVED, team);
1785 
1786 	return status;
1787 }
1788 
1789 
1790 /*!	Almost shuts down the current team and loads a new image into it.
1791 	If successful, this function does not return and will takeover ownership of
1792 	the arguments provided.
1793 	This function may only be called in a userland team (caused by one of the
1794 	exec*() syscalls).
1795 */
1796 static status_t
1797 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1798 	int32 argCount, int32 envCount, mode_t umask)
1799 {
1800 	// NOTE: Since this function normally doesn't return, don't use automatic
1801 	// variables that need destruction in the function scope.
1802 	char** flatArgs = _flatArgs;
1803 	Team* team = thread_get_current_thread()->team;
1804 	struct team_arg* teamArgs;
1805 	const char* threadName;
1806 	thread_id nubThreadID = -1;
1807 
1808 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1809 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1810 		team->id));
1811 
1812 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1813 
1814 	// switching the kernel at run time is probably not a good idea :)
1815 	if (team == team_get_kernel_team())
1816 		return B_NOT_ALLOWED;
1817 
1818 	// we currently need to be single threaded here
1819 	// TODO: maybe we should just kill all other threads and
1820 	//	make the current thread the team's main thread?
1821 	Thread* currentThread = thread_get_current_thread();
1822 	if (currentThread != team->main_thread)
1823 		return B_NOT_ALLOWED;
1824 
1825 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1826 	// We iterate through the thread list to make sure that there's no other
1827 	// thread.
1828 	TeamLocker teamLocker(team);
1829 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1830 
1831 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1832 		nubThreadID = team->debug_info.nub_thread;
1833 
1834 	debugInfoLocker.Unlock();
1835 
1836 	for (Thread* thread = team->thread_list; thread != NULL;
1837 			thread = thread->team_next) {
1838 		if (thread != team->main_thread && thread->id != nubThreadID)
1839 			return B_NOT_ALLOWED;
1840 	}
1841 
1842 	team->DeleteUserTimers(true);
1843 	team->ResetSignalsOnExec();
1844 
1845 	teamLocker.Unlock();
1846 
1847 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1848 		argCount, envCount, umask, -1, 0);
1849 	if (status != B_OK)
1850 		return status;
1851 
1852 	_flatArgs = NULL;
1853 		// args are owned by the team_arg structure now
1854 
1855 	// TODO: remove team resources if there are any left
1856 	// thread_atkernel_exit() might not be called at all
1857 
1858 	thread_reset_for_exec();
1859 
1860 	user_debug_prepare_for_exec();
1861 
1862 	delete_team_user_data(team);
1863 	vm_delete_areas(team->address_space, false);
1864 	xsi_sem_undo(team);
1865 	delete_owned_ports(team);
1866 	sem_delete_owned_sems(team);
1867 	remove_images(team);
1868 	vfs_exec_io_context(team->io_context);
1869 	delete_realtime_sem_context(team->realtime_sem_context);
1870 	team->realtime_sem_context = NULL;
1871 
1872 	status = create_team_user_data(team);
1873 	if (status != B_OK) {
1874 		// creating the user data failed -- we're toast
1875 		// TODO: We should better keep the old user area in the first place.
1876 		free_team_arg(teamArgs);
1877 		exit_thread(status);
1878 		return status;
1879 	}
1880 
1881 	user_debug_finish_after_exec();
1882 
1883 	// rename the team
1884 
1885 	team->Lock();
1886 	team->SetName(path);
1887 	team->Unlock();
1888 
1889 	// cut the path from the team name and rename the main thread, too
1890 	threadName = strrchr(path, '/');
1891 	if (threadName != NULL)
1892 		threadName++;
1893 	else
1894 		threadName = path;
1895 	rename_thread(thread_get_current_thread_id(), threadName);
1896 
1897 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1898 
1899 	// Update user/group according to the executable's set-user/group-id
1900 	// permission.
1901 	update_set_id_user_and_group(team, path);
1902 
1903 	user_debug_team_exec();
1904 
1905 	// notify team listeners
1906 	sNotificationService.Notify(TEAM_EXEC, team);
1907 
1908 	// get a user thread for the thread
1909 	user_thread* userThread = team_allocate_user_thread(team);
1910 		// cannot fail (the allocation for the team would have failed already)
1911 	ThreadLocker currentThreadLocker(currentThread);
1912 	currentThread->user_thread = userThread;
1913 	currentThreadLocker.Unlock();
1914 
1915 	// create the user stack for the thread
1916 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
1917 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
1918 	if (status == B_OK) {
1919 		// prepare the stack, load the runtime loader, and enter userspace
1920 		team_create_thread_start(teamArgs);
1921 			// does never return
1922 	} else
1923 		free_team_arg(teamArgs);
1924 
1925 	// Sorry, we have to kill ourselves, there is no way out anymore
1926 	// (without any areas left and all that).
1927 	exit_thread(status);
1928 
1929 	// We return a status here since the signal that is sent by the
1930 	// call above is not immediately handled.
1931 	return B_ERROR;
1932 }
1933 
1934 
1935 static thread_id
1936 fork_team(void)
1937 {
1938 	Thread* parentThread = thread_get_current_thread();
1939 	Team* parentTeam = parentThread->team;
1940 	Team* team;
1941 	arch_fork_arg* forkArgs;
1942 	struct area_info info;
1943 	thread_id threadID;
1944 	status_t status;
1945 	ssize_t areaCookie;
1946 	int32 imageCookie;
1947 
1948 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
1949 
1950 	if (parentTeam == team_get_kernel_team())
1951 		return B_NOT_ALLOWED;
1952 
1953 	// create a new team
1954 	// TODO: this is very similar to load_image_internal() - maybe we can do
1955 	// something about it :)
1956 
1957 	// create the main thread object
1958 	Thread* thread;
1959 	status = Thread::Create(parentThread->name, thread);
1960 	if (status != B_OK)
1961 		return status;
1962 	BReference<Thread> threadReference(thread, true);
1963 
1964 	// create the team object
1965 	team = Team::Create(thread->id, NULL, false);
1966 	if (team == NULL)
1967 		return B_NO_MEMORY;
1968 
1969 	parentTeam->LockTeamAndProcessGroup();
1970 	team->Lock();
1971 
1972 	team->SetName(parentTeam->Name());
1973 	team->SetArgs(parentTeam->Args());
1974 
1975 	// Inherit the parent's user/group.
1976 	inherit_parent_user_and_group(team, parentTeam);
1977 
1978 	// inherit signal handlers
1979 	team->InheritSignalActions(parentTeam);
1980 
1981 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
1982 
1983 	sTeamHash.Insert(team);
1984 	sUsedTeams++;
1985 
1986 	teamsLocker.Unlock();
1987 
1988 	insert_team_into_parent(parentTeam, team);
1989 	insert_team_into_group(parentTeam->group, team);
1990 
1991 	team->Unlock();
1992 	parentTeam->UnlockTeamAndProcessGroup();
1993 
1994 	// notify team listeners
1995 	sNotificationService.Notify(TEAM_ADDED, team);
1996 
1997 	// inherit some team debug flags
1998 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1999 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2000 
2001 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2002 	if (forkArgs == NULL) {
2003 		status = B_NO_MEMORY;
2004 		goto err1;
2005 	}
2006 
2007 	// create a new io_context for this team
2008 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2009 	if (!team->io_context) {
2010 		status = B_NO_MEMORY;
2011 		goto err2;
2012 	}
2013 
2014 	// duplicate the realtime sem context
2015 	if (parentTeam->realtime_sem_context) {
2016 		team->realtime_sem_context = clone_realtime_sem_context(
2017 			parentTeam->realtime_sem_context);
2018 		if (team->realtime_sem_context == NULL) {
2019 			status = B_NO_MEMORY;
2020 			goto err25;
2021 		}
2022 	}
2023 
2024 	// create an address space for this team
2025 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2026 		&team->address_space);
2027 	if (status < B_OK)
2028 		goto err3;
2029 
2030 	// copy all areas of the team
2031 	// TODO: should be able to handle stack areas differently (ie. don't have
2032 	// them copy-on-write)
2033 
2034 	areaCookie = 0;
2035 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2036 		if (info.area == parentTeam->user_data_area) {
2037 			// don't clone the user area; just create a new one
2038 			status = create_team_user_data(team);
2039 			if (status != B_OK)
2040 				break;
2041 
2042 			thread->user_thread = team_allocate_user_thread(team);
2043 		} else {
2044 			void* address;
2045 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2046 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2047 			if (area < B_OK) {
2048 				status = area;
2049 				break;
2050 			}
2051 
2052 			if (info.area == parentThread->user_stack_area)
2053 				thread->user_stack_area = area;
2054 		}
2055 	}
2056 
2057 	if (status < B_OK)
2058 		goto err4;
2059 
2060 	if (thread->user_thread == NULL) {
2061 #if KDEBUG
2062 		panic("user data area not found, parent area is %" B_PRId32,
2063 			parentTeam->user_data_area);
2064 #endif
2065 		status = B_ERROR;
2066 		goto err4;
2067 	}
2068 
2069 	thread->user_stack_base = parentThread->user_stack_base;
2070 	thread->user_stack_size = parentThread->user_stack_size;
2071 	thread->user_local_storage = parentThread->user_local_storage;
2072 	thread->sig_block_mask = parentThread->sig_block_mask;
2073 	thread->signal_stack_base = parentThread->signal_stack_base;
2074 	thread->signal_stack_size = parentThread->signal_stack_size;
2075 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2076 
2077 	arch_store_fork_frame(forkArgs);
2078 
2079 	// copy image list
2080 	image_info imageInfo;
2081 	imageCookie = 0;
2082 	while (get_next_image_info(parentTeam->id, &imageCookie, &imageInfo)
2083 			== B_OK) {
2084 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
2085 		if (image < 0)
2086 			goto err5;
2087 	}
2088 
2089 	// create the main thread
2090 	{
2091 		ThreadCreationAttributes threadCreationAttributes(NULL,
2092 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2093 		threadCreationAttributes.forkArgs = forkArgs;
2094 		threadID = thread_create_thread(threadCreationAttributes, false);
2095 		if (threadID < 0) {
2096 			status = threadID;
2097 			goto err5;
2098 		}
2099 	}
2100 
2101 	// notify the debugger
2102 	user_debug_team_created(team->id);
2103 
2104 	T(TeamForked(threadID));
2105 
2106 	resume_thread(threadID);
2107 	return threadID;
2108 
2109 err5:
2110 	remove_images(team);
2111 err4:
2112 	team->address_space->RemoveAndPut();
2113 err3:
2114 	delete_realtime_sem_context(team->realtime_sem_context);
2115 err25:
2116 	vfs_put_io_context(team->io_context);
2117 err2:
2118 	free(forkArgs);
2119 err1:
2120 	// Remove the team structure from the process group, the parent team, and
2121 	// the team hash table and delete the team structure.
2122 	parentTeam->LockTeamAndProcessGroup();
2123 	team->Lock();
2124 
2125 	remove_team_from_group(team);
2126 	remove_team_from_parent(team->parent, team);
2127 
2128 	team->Unlock();
2129 	parentTeam->UnlockTeamAndProcessGroup();
2130 
2131 	teamsLocker.Lock();
2132 	sTeamHash.Remove(team);
2133 	teamsLocker.Unlock();
2134 
2135 	sNotificationService.Notify(TEAM_REMOVED, team);
2136 
2137 	team->ReleaseReference();
2138 
2139 	return status;
2140 }
2141 
2142 
2143 /*!	Returns if the specified team \a parent has any children belonging to the
2144 	process group with the specified ID \a groupID.
2145 	The caller must hold \a parent's lock.
2146 */
2147 static bool
2148 has_children_in_group(Team* parent, pid_t groupID)
2149 {
2150 	for (Team* child = parent->children; child != NULL;
2151 			child = child->siblings_next) {
2152 		TeamLocker childLocker(child);
2153 		if (child->group_id == groupID)
2154 			return true;
2155 	}
2156 
2157 	return false;
2158 }
2159 
2160 
2161 /*!	Returns the first job control entry from \a children, which matches \a id.
2162 	\a id can be:
2163 	- \code > 0 \endcode: Matching an entry with that team ID.
2164 	- \code == -1 \endcode: Matching any entry.
2165 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2166 	\c 0 is an invalid value for \a id.
2167 
2168 	The caller must hold the lock of the team that \a children belongs to.
2169 
2170 	\param children The job control entry list to check.
2171 	\param id The match criterion.
2172 	\return The first matching entry or \c NULL, if none matches.
2173 */
2174 static job_control_entry*
2175 get_job_control_entry(team_job_control_children& children, pid_t id)
2176 {
2177 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2178 		 job_control_entry* entry = it.Next();) {
2179 
2180 		if (id > 0) {
2181 			if (entry->thread == id)
2182 				return entry;
2183 		} else if (id == -1) {
2184 			return entry;
2185 		} else {
2186 			pid_t processGroup
2187 				= (entry->team ? entry->team->group_id : entry->group_id);
2188 			if (processGroup == -id)
2189 				return entry;
2190 		}
2191 	}
2192 
2193 	return NULL;
2194 }
2195 
2196 
2197 /*!	Returns the first job control entry from one of team's dead, continued, or
2198     stopped children which matches \a id.
2199 	\a id can be:
2200 	- \code > 0 \endcode: Matching an entry with that team ID.
2201 	- \code == -1 \endcode: Matching any entry.
2202 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2203 	\c 0 is an invalid value for \a id.
2204 
2205 	The caller must hold \a team's lock.
2206 
2207 	\param team The team whose dead, stopped, and continued child lists shall be
2208 		checked.
2209 	\param id The match criterion.
2210 	\param flags Specifies which children shall be considered. Dead children
2211 		always are. Stopped children are considered when \a flags is ORed
2212 		bitwise with \c WUNTRACED, continued children when \a flags is ORed
2213 		bitwise with \c WCONTINUED.
2214 	\return The first matching entry or \c NULL, if none matches.
2215 */
2216 static job_control_entry*
2217 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2218 {
2219 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
2220 
2221 	if (entry == NULL && (flags & WCONTINUED) != 0)
2222 		entry = get_job_control_entry(team->continued_children, id);
2223 
2224 	if (entry == NULL && (flags & WUNTRACED) != 0)
2225 		entry = get_job_control_entry(team->stopped_children, id);
2226 
2227 	return entry;
2228 }
2229 
2230 
2231 job_control_entry::job_control_entry()
2232 	:
2233 	has_group_ref(false)
2234 {
2235 }
2236 
2237 
2238 job_control_entry::~job_control_entry()
2239 {
2240 	if (has_group_ref) {
2241 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2242 
2243 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2244 		if (group == NULL) {
2245 			panic("job_control_entry::~job_control_entry(): unknown group "
2246 				"ID: %" B_PRId32, group_id);
2247 			return;
2248 		}
2249 
2250 		groupHashLocker.Unlock();
2251 
2252 		group->ReleaseReference();
2253 	}
2254 }
2255 
2256 
2257 /*!	Invoked when the owning team is dying, initializing the entry according to
2258 	the dead state.
2259 
2260 	The caller must hold the owning team's lock and the scheduler lock.
2261 */
2262 void
2263 job_control_entry::InitDeadState()
2264 {
2265 	if (team != NULL) {
2266 		ASSERT(team->exit.initialized);
2267 
2268 		group_id = team->group_id;
2269 		team->group->AcquireReference();
2270 		has_group_ref = true;
2271 
2272 		thread = team->id;
2273 		status = team->exit.status;
2274 		reason = team->exit.reason;
2275 		signal = team->exit.signal;
2276 		signaling_user = team->exit.signaling_user;
2277 
2278 		team = NULL;
2279 	}
2280 }
2281 
2282 
2283 job_control_entry&
2284 job_control_entry::operator=(const job_control_entry& other)
2285 {
2286 	state = other.state;
2287 	thread = other.thread;
2288 	signal = other.signal;
2289 	has_group_ref = false;
2290 	signaling_user = other.signaling_user;
2291 	team = other.team;
2292 	group_id = other.group_id;
2293 	status = other.status;
2294 	reason = other.reason;
2295 
2296 	return *this;
2297 }
2298 
2299 
2300 /*! This is the kernel backend for waitid().
2301 */
2302 static thread_id
2303 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info)
2304 {
2305 	Thread* thread = thread_get_current_thread();
2306 	Team* team = thread->team;
2307 	struct job_control_entry foundEntry;
2308 	struct job_control_entry* freeDeathEntry = NULL;
2309 	status_t status = B_OK;
2310 
2311 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2312 		child, flags));
2313 
2314 	T(WaitForChild(child, flags));
2315 
2316 	pid_t originalChild = child;
2317 
2318 	bool ignoreFoundEntries = false;
2319 	bool ignoreFoundEntriesChecked = false;
2320 
2321 	while (true) {
2322 		// lock the team
2323 		TeamLocker teamLocker(team);
2324 
2325 		// A 0 child argument means to wait for all children in the process
2326 		// group of the calling team.
2327 		child = originalChild == 0 ? -team->group_id : originalChild;
2328 
2329 		// check whether any condition holds
2330 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2331 
2332 		// If we don't have an entry yet, check whether there are any children
2333 		// complying to the process group specification at all.
2334 		if (entry == NULL) {
2335 			// No success yet -- check whether there are any children complying
2336 			// to the process group specification at all.
2337 			bool childrenExist = false;
2338 			if (child == -1) {
2339 				childrenExist = team->children != NULL;
2340 			} else if (child < -1) {
2341 				childrenExist = has_children_in_group(team, -child);
2342 			} else {
2343 				if (Team* childTeam = Team::Get(child)) {
2344 					BReference<Team> childTeamReference(childTeam, true);
2345 					TeamLocker childTeamLocker(childTeam);
2346 					childrenExist = childTeam->parent == team;
2347 				}
2348 			}
2349 
2350 			if (!childrenExist) {
2351 				// there is no child we could wait for
2352 				status = ECHILD;
2353 			} else {
2354 				// the children we're waiting for are still running
2355 				status = B_WOULD_BLOCK;
2356 			}
2357 		} else {
2358 			// got something
2359 			foundEntry = *entry;
2360 
2361 			// unless WNOWAIT has been specified, "consume" the wait state
2362 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2363 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2364 					// The child is dead. Reap its death entry.
2365 					freeDeathEntry = entry;
2366 					team->dead_children.entries.Remove(entry);
2367 					team->dead_children.count--;
2368 				} else {
2369 					// The child is well. Reset its job control state.
2370 					team_set_job_control_state(entry->team,
2371 						JOB_CONTROL_STATE_NONE, NULL, false);
2372 				}
2373 			}
2374 		}
2375 
2376 		// If we haven't got anything yet, prepare for waiting for the
2377 		// condition variable.
2378 		ConditionVariableEntry deadWaitEntry;
2379 
2380 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2381 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2382 
2383 		teamLocker.Unlock();
2384 
2385 		// we got our entry and can return to our caller
2386 		if (status == B_OK) {
2387 			if (ignoreFoundEntries) {
2388 				// ... unless we shall ignore found entries
2389 				delete freeDeathEntry;
2390 				freeDeathEntry = NULL;
2391 				continue;
2392 			}
2393 
2394 			break;
2395 		}
2396 
2397 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2398 			T(WaitForChildDone(status));
2399 			return status;
2400 		}
2401 
2402 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2403 		if (status == B_INTERRUPTED) {
2404 			T(WaitForChildDone(status));
2405 			return status;
2406 		}
2407 
2408 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2409 		// all our children are dead and fail with ECHILD. We check the
2410 		// condition at this point.
2411 		if (!ignoreFoundEntriesChecked) {
2412 			teamLocker.Lock();
2413 
2414 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2415 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2416 				|| handler.sa_handler == SIG_IGN) {
2417 				ignoreFoundEntries = true;
2418 			}
2419 
2420 			teamLocker.Unlock();
2421 
2422 			ignoreFoundEntriesChecked = true;
2423 		}
2424 	}
2425 
2426 	delete freeDeathEntry;
2427 
2428 	// When we got here, we have a valid death entry, and already got
2429 	// unregistered from the team or group. Fill in the returned info.
2430 	memset(&_info, 0, sizeof(_info));
2431 	_info.si_signo = SIGCHLD;
2432 	_info.si_pid = foundEntry.thread;
2433 	_info.si_uid = foundEntry.signaling_user;
2434 	// TODO: Fill in si_errno?
2435 
2436 	switch (foundEntry.state) {
2437 		case JOB_CONTROL_STATE_DEAD:
2438 			_info.si_code = foundEntry.reason;
2439 			_info.si_status = foundEntry.reason == CLD_EXITED
2440 				? foundEntry.status : foundEntry.signal;
2441 			break;
2442 		case JOB_CONTROL_STATE_STOPPED:
2443 			_info.si_code = CLD_STOPPED;
2444 			_info.si_status = foundEntry.signal;
2445 			break;
2446 		case JOB_CONTROL_STATE_CONTINUED:
2447 			_info.si_code = CLD_CONTINUED;
2448 			_info.si_status = 0;
2449 			break;
2450 		case JOB_CONTROL_STATE_NONE:
2451 			// can't happen
2452 			break;
2453 	}
2454 
2455 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2456 	// status is available.
2457 	TeamLocker teamLocker(team);
2458 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2459 
2460 	if (is_team_signal_blocked(team, SIGCHLD)) {
2461 		if (get_job_control_entry(team, child, flags) == NULL)
2462 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2463 	}
2464 
2465 	schedulerLocker.Unlock();
2466 	teamLocker.Unlock();
2467 
2468 	// When the team is dead, the main thread continues to live in the kernel
2469 	// team for a very short time. To avoid surprises for the caller we rather
2470 	// wait until the thread is really gone.
2471 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2472 		wait_for_thread(foundEntry.thread, NULL);
2473 
2474 	T(WaitForChildDone(foundEntry));
2475 
2476 	return foundEntry.thread;
2477 }
2478 
2479 
2480 /*! Fills the team_info structure with information from the specified team.
2481 	Interrupts must be enabled. The team must not be locked.
2482 */
2483 static status_t
2484 fill_team_info(Team* team, team_info* info, size_t size)
2485 {
2486 	if (size != sizeof(team_info))
2487 		return B_BAD_VALUE;
2488 
2489 	// TODO: Set more informations for team_info
2490 	memset(info, 0, size);
2491 
2492 	info->team = team->id;
2493 		// immutable
2494 	info->image_count = count_images(team);
2495 		// protected by sImageMutex
2496 
2497 	TeamLocker teamLocker(team);
2498 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2499 
2500 	info->thread_count = team->num_threads;
2501 	//info->area_count =
2502 	info->debugger_nub_thread = team->debug_info.nub_thread;
2503 	info->debugger_nub_port = team->debug_info.nub_port;
2504 	info->uid = team->effective_uid;
2505 	info->gid = team->effective_gid;
2506 
2507 	strlcpy(info->args, team->Args(), sizeof(info->args));
2508 	info->argc = 1;
2509 
2510 	return B_OK;
2511 }
2512 
2513 
2514 /*!	Returns whether the process group contains stopped processes.
2515 	The caller must hold the process group's lock.
2516 */
2517 static bool
2518 process_group_has_stopped_processes(ProcessGroup* group)
2519 {
2520 	Team* team = group->teams;
2521 	while (team != NULL) {
2522 		// the parent team's lock guards the job control entry -- acquire it
2523 		team->LockTeamAndParent(false);
2524 
2525 		if (team->job_control_entry != NULL
2526 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2527 			team->UnlockTeamAndParent();
2528 			return true;
2529 		}
2530 
2531 		team->UnlockTeamAndParent();
2532 
2533 		team = team->group_next;
2534 	}
2535 
2536 	return false;
2537 }
2538 
2539 
2540 /*!	Iterates through all process groups queued in team_remove_team() and signals
2541 	those that are orphaned and have stopped processes.
2542 	The caller must not hold any team or process group locks.
2543 */
2544 static void
2545 orphaned_process_group_check()
2546 {
2547 	// process as long as there are groups in the list
2548 	while (true) {
2549 		// remove the head from the list
2550 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2551 
2552 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2553 		if (group == NULL)
2554 			return;
2555 
2556 		group->UnsetOrphanedCheck();
2557 		BReference<ProcessGroup> groupReference(group);
2558 
2559 		orphanedCheckLocker.Unlock();
2560 
2561 		AutoLocker<ProcessGroup> groupLocker(group);
2562 
2563 		// If the group is orphaned and contains stopped processes, we're
2564 		// supposed to send SIGHUP + SIGCONT.
2565 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2566 			Thread* currentThread = thread_get_current_thread();
2567 
2568 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2569 			send_signal_to_process_group_locked(group, signal, 0);
2570 
2571 			signal.SetNumber(SIGCONT);
2572 			send_signal_to_process_group_locked(group, signal, 0);
2573 		}
2574 	}
2575 }
2576 
2577 
2578 static status_t
2579 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2580 	uint32 flags)
2581 {
2582 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2583 		return B_BAD_VALUE;
2584 
2585 	// get the team
2586 	Team* team = Team::GetAndLock(id);
2587 	if (team == NULL)
2588 		return B_BAD_TEAM_ID;
2589 	BReference<Team> teamReference(team, true);
2590 	TeamLocker teamLocker(team, true);
2591 
2592 	if ((flags & B_CHECK_PERMISSION) != 0) {
2593 		uid_t uid = geteuid();
2594 		if (uid != 0 && uid != team->effective_uid)
2595 			return B_NOT_ALLOWED;
2596 	}
2597 
2598 	bigtime_t kernelTime = 0;
2599 	bigtime_t userTime = 0;
2600 
2601 	switch (who) {
2602 		case B_TEAM_USAGE_SELF:
2603 		{
2604 			Thread* thread = team->thread_list;
2605 
2606 			for (; thread != NULL; thread = thread->team_next) {
2607 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2608 				kernelTime += thread->kernel_time;
2609 				userTime += thread->user_time;
2610 			}
2611 
2612 			kernelTime += team->dead_threads_kernel_time;
2613 			userTime += team->dead_threads_user_time;
2614 			break;
2615 		}
2616 
2617 		case B_TEAM_USAGE_CHILDREN:
2618 		{
2619 			Team* child = team->children;
2620 			for (; child != NULL; child = child->siblings_next) {
2621 				TeamLocker childLocker(child);
2622 
2623 				Thread* thread = team->thread_list;
2624 
2625 				for (; thread != NULL; thread = thread->team_next) {
2626 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2627 					kernelTime += thread->kernel_time;
2628 					userTime += thread->user_time;
2629 				}
2630 
2631 				kernelTime += child->dead_threads_kernel_time;
2632 				userTime += child->dead_threads_user_time;
2633 			}
2634 
2635 			kernelTime += team->dead_children.kernel_time;
2636 			userTime += team->dead_children.user_time;
2637 			break;
2638 		}
2639 	}
2640 
2641 	info->kernel_time = kernelTime;
2642 	info->user_time = userTime;
2643 
2644 	return B_OK;
2645 }
2646 
2647 
2648 //	#pragma mark - Private kernel API
2649 
2650 
2651 status_t
2652 team_init(kernel_args* args)
2653 {
2654 	// create the team hash table
2655 	new(&sTeamHash) TeamTable;
2656 	if (sTeamHash.Init(64) != B_OK)
2657 		panic("Failed to init team hash table!");
2658 
2659 	new(&sGroupHash) ProcessGroupHashTable;
2660 	if (sGroupHash.Init() != B_OK)
2661 		panic("Failed to init process group hash table!");
2662 
2663 	// create initial session and process groups
2664 
2665 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2666 	if (session == NULL)
2667 		panic("Could not create initial session.\n");
2668 	BReference<ProcessSession> sessionReference(session, true);
2669 
2670 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2671 	if (group == NULL)
2672 		panic("Could not create initial process group.\n");
2673 	BReference<ProcessGroup> groupReference(group, true);
2674 
2675 	group->Publish(session);
2676 
2677 	// create the kernel team
2678 	sKernelTeam = Team::Create(1, "kernel_team", true);
2679 	if (sKernelTeam == NULL)
2680 		panic("could not create kernel team!\n");
2681 	sKernelTeam->SetArgs(sKernelTeam->Name());
2682 	sKernelTeam->state = TEAM_STATE_NORMAL;
2683 
2684 	sKernelTeam->saved_set_uid = 0;
2685 	sKernelTeam->real_uid = 0;
2686 	sKernelTeam->effective_uid = 0;
2687 	sKernelTeam->saved_set_gid = 0;
2688 	sKernelTeam->real_gid = 0;
2689 	sKernelTeam->effective_gid = 0;
2690 	sKernelTeam->supplementary_groups = NULL;
2691 	sKernelTeam->supplementary_group_count = 0;
2692 
2693 	insert_team_into_group(group, sKernelTeam);
2694 
2695 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2696 	if (sKernelTeam->io_context == NULL)
2697 		panic("could not create io_context for kernel team!\n");
2698 
2699 	// stick it in the team hash
2700 	sTeamHash.Insert(sKernelTeam);
2701 
2702 	add_debugger_command_etc("team", &dump_team_info,
2703 		"Dump info about a particular team",
2704 		"[ <id> | <address> | <name> ]\n"
2705 		"Prints information about the specified team. If no argument is given\n"
2706 		"the current team is selected.\n"
2707 		"  <id>       - The ID of the team.\n"
2708 		"  <address>  - The address of the team structure.\n"
2709 		"  <name>     - The team's name.\n", 0);
2710 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2711 		"\n"
2712 		"Prints a list of all existing teams.\n", 0);
2713 
2714 	new(&sNotificationService) TeamNotificationService();
2715 
2716 	return B_OK;
2717 }
2718 
2719 
2720 int32
2721 team_max_teams(void)
2722 {
2723 	return sMaxTeams;
2724 }
2725 
2726 
2727 int32
2728 team_used_teams(void)
2729 {
2730 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2731 	return sUsedTeams;
2732 }
2733 
2734 
2735 /*! Returns a death entry of a child team specified by ID (if any).
2736 	The caller must hold the team's lock.
2737 
2738 	\param team The team whose dead children list to check.
2739 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2740 	\param _deleteEntry Return variable, indicating whether the caller needs to
2741 		delete the returned entry.
2742 	\return The death entry of the matching team, or \c NULL, if no death entry
2743 		for the team was found.
2744 */
2745 job_control_entry*
2746 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2747 {
2748 	if (child <= 0)
2749 		return NULL;
2750 
2751 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2752 		child);
2753 	if (entry) {
2754 		// remove the entry only, if the caller is the parent of the found team
2755 		if (team_get_current_team_id() == entry->thread) {
2756 			team->dead_children.entries.Remove(entry);
2757 			team->dead_children.count--;
2758 			*_deleteEntry = true;
2759 		} else {
2760 			*_deleteEntry = false;
2761 		}
2762 	}
2763 
2764 	return entry;
2765 }
2766 
2767 
2768 /*! Quick check to see if we have a valid team ID. */
2769 bool
2770 team_is_valid(team_id id)
2771 {
2772 	if (id <= 0)
2773 		return false;
2774 
2775 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2776 
2777 	return team_get_team_struct_locked(id) != NULL;
2778 }
2779 
2780 
2781 Team*
2782 team_get_team_struct_locked(team_id id)
2783 {
2784 	return sTeamHash.Lookup(id);
2785 }
2786 
2787 
2788 void
2789 team_set_controlling_tty(int32 ttyIndex)
2790 {
2791 	// lock the team, so its session won't change while we're playing with it
2792 	Team* team = thread_get_current_thread()->team;
2793 	TeamLocker teamLocker(team);
2794 
2795 	// get and lock the session
2796 	ProcessSession* session = team->group->Session();
2797 	AutoLocker<ProcessSession> sessionLocker(session);
2798 
2799 	// set the session's fields
2800 	session->controlling_tty = ttyIndex;
2801 	session->foreground_group = -1;
2802 }
2803 
2804 
2805 int32
2806 team_get_controlling_tty()
2807 {
2808 	// lock the team, so its session won't change while we're playing with it
2809 	Team* team = thread_get_current_thread()->team;
2810 	TeamLocker teamLocker(team);
2811 
2812 	// get and lock the session
2813 	ProcessSession* session = team->group->Session();
2814 	AutoLocker<ProcessSession> sessionLocker(session);
2815 
2816 	// get the session's field
2817 	return session->controlling_tty;
2818 }
2819 
2820 
2821 status_t
2822 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2823 {
2824 	// lock the team, so its session won't change while we're playing with it
2825 	Thread* thread = thread_get_current_thread();
2826 	Team* team = thread->team;
2827 	TeamLocker teamLocker(team);
2828 
2829 	// get and lock the session
2830 	ProcessSession* session = team->group->Session();
2831 	AutoLocker<ProcessSession> sessionLocker(session);
2832 
2833 	// check given TTY -- must be the controlling tty of the calling process
2834 	if (session->controlling_tty != ttyIndex)
2835 		return ENOTTY;
2836 
2837 	// check given process group -- must belong to our session
2838 	{
2839 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2840 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2841 		if (group == NULL || group->Session() != session)
2842 			return B_BAD_VALUE;
2843 	}
2844 
2845 	// If we are a background group, we can do that unharmed only when we
2846 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2847 	if (session->foreground_group != -1
2848 		&& session->foreground_group != team->group_id
2849 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN) {
2850 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2851 
2852 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2853 			pid_t groupID = team->group_id;
2854 
2855 			schedulerLocker.Unlock();
2856 			sessionLocker.Unlock();
2857 			teamLocker.Unlock();
2858 
2859 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2860 			send_signal_to_process_group(groupID, signal, 0);
2861 			return B_INTERRUPTED;
2862 		}
2863 	}
2864 
2865 	session->foreground_group = processGroupID;
2866 
2867 	return B_OK;
2868 }
2869 
2870 
2871 /*!	Removes the specified team from the global team hash, from its process
2872 	group, and from its parent.
2873 	It also moves all of its children to the kernel team.
2874 
2875 	The caller must hold the following locks:
2876 	- \a team's process group's lock,
2877 	- the kernel team's lock,
2878 	- \a team's parent team's lock (might be the kernel team), and
2879 	- \a team's lock.
2880 */
2881 void
2882 team_remove_team(Team* team, pid_t& _signalGroup)
2883 {
2884 	Team* parent = team->parent;
2885 
2886 	// remember how long this team lasted
2887 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
2888 		+ team->dead_children.kernel_time;
2889 	parent->dead_children.user_time += team->dead_threads_user_time
2890 		+ team->dead_children.user_time;
2891 
2892 	// remove the team from the hash table
2893 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2894 	sTeamHash.Remove(team);
2895 	sUsedTeams--;
2896 	teamsLocker.Unlock();
2897 
2898 	// The team can no longer be accessed by ID. Navigation to it is still
2899 	// possible from its process group and its parent and children, but that
2900 	// will be rectified shortly.
2901 	team->state = TEAM_STATE_DEATH;
2902 
2903 	// If we're a controlling process (i.e. a session leader with controlling
2904 	// terminal), there's a bit of signalling we have to do. We can't do any of
2905 	// the signaling here due to the bunch of locks we're holding, but we need
2906 	// to determine, whom to signal.
2907 	_signalGroup = -1;
2908 	bool isSessionLeader = false;
2909 	if (team->session_id == team->id
2910 		&& team->group->Session()->controlling_tty >= 0) {
2911 		isSessionLeader = true;
2912 
2913 		ProcessSession* session = team->group->Session();
2914 
2915 		AutoLocker<ProcessSession> sessionLocker(session);
2916 
2917 		session->controlling_tty = -1;
2918 		_signalGroup = session->foreground_group;
2919 	}
2920 
2921 	// remove us from our process group
2922 	remove_team_from_group(team);
2923 
2924 	// move the team's children to the kernel team
2925 	while (Team* child = team->children) {
2926 		// remove the child from the current team and add it to the kernel team
2927 		TeamLocker childLocker(child);
2928 
2929 		remove_team_from_parent(team, child);
2930 		insert_team_into_parent(sKernelTeam, child);
2931 
2932 		// move job control entries too
2933 		sKernelTeam->stopped_children.entries.MoveFrom(
2934 			&team->stopped_children.entries);
2935 		sKernelTeam->continued_children.entries.MoveFrom(
2936 			&team->continued_children.entries);
2937 
2938 		// If the team was a session leader with controlling terminal,
2939 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
2940 		// groups with stopped processes. Due to locking complications we can't
2941 		// do that here, so we only check whether we were a reason for the
2942 		// child's process group not being an orphan and, if so, schedule a
2943 		// later check (cf. orphaned_process_group_check()).
2944 		if (isSessionLeader) {
2945 			ProcessGroup* childGroup = child->group;
2946 			if (childGroup->Session()->id == team->session_id
2947 				&& childGroup->id != team->group_id) {
2948 				childGroup->ScheduleOrphanedCheck();
2949 			}
2950 		}
2951 
2952 		// Note, we don't move the dead children entries. Those will be deleted
2953 		// when the team structure is deleted.
2954 	}
2955 
2956 	// remove us from our parent
2957 	remove_team_from_parent(parent, team);
2958 }
2959 
2960 
2961 /*!	Kills all threads but the main thread of the team and shuts down user
2962 	debugging for it.
2963 	To be called on exit of the team's main thread. No locks must be held.
2964 
2965 	\param team The team in question.
2966 	\return The port of the debugger for the team, -1 if none. To be passed to
2967 		team_delete_team().
2968 */
2969 port_id
2970 team_shutdown_team(Team* team)
2971 {
2972 	ASSERT(thread_get_current_thread() == team->main_thread);
2973 
2974 	TeamLocker teamLocker(team);
2975 
2976 	// Make sure debugging changes won't happen anymore.
2977 	port_id debuggerPort = -1;
2978 	while (true) {
2979 		// If a debugger change is in progress for the team, we'll have to
2980 		// wait until it is done.
2981 		ConditionVariableEntry waitForDebuggerEntry;
2982 		bool waitForDebugger = false;
2983 
2984 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2985 
2986 		if (team->debug_info.debugger_changed_condition != NULL) {
2987 			team->debug_info.debugger_changed_condition->Add(
2988 				&waitForDebuggerEntry);
2989 			waitForDebugger = true;
2990 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2991 			// The team is being debugged. That will stop with the termination
2992 			// of the nub thread. Since we set the team state to death, no one
2993 			// can install a debugger anymore. We fetch the debugger's port to
2994 			// send it a message at the bitter end.
2995 			debuggerPort = team->debug_info.debugger_port;
2996 		}
2997 
2998 		debugInfoLocker.Unlock();
2999 
3000 		if (!waitForDebugger)
3001 			break;
3002 
3003 		// wait for the debugger change to be finished
3004 		teamLocker.Unlock();
3005 
3006 		waitForDebuggerEntry.Wait();
3007 
3008 		teamLocker.Lock();
3009 	}
3010 
3011 	// Mark the team as shutting down. That will prevent new threads from being
3012 	// created and debugger changes from taking place.
3013 	team->state = TEAM_STATE_SHUTDOWN;
3014 
3015 	// delete all timers
3016 	team->DeleteUserTimers(false);
3017 
3018 	// deactivate CPU time user timers for the team
3019 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3020 
3021 	if (team->HasActiveCPUTimeUserTimers())
3022 		team->DeactivateCPUTimeUserTimers();
3023 
3024 	schedulerLocker.Unlock();
3025 
3026 	// kill all threads but the main thread
3027 	team_death_entry deathEntry;
3028 	deathEntry.condition.Init(team, "team death");
3029 
3030 	while (true) {
3031 		team->death_entry = &deathEntry;
3032 		deathEntry.remaining_threads = 0;
3033 
3034 		Thread* thread = team->thread_list;
3035 		while (thread != NULL) {
3036 			if (thread != team->main_thread) {
3037 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3038 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3039 				deathEntry.remaining_threads++;
3040 			}
3041 
3042 			thread = thread->team_next;
3043 		}
3044 
3045 		if (deathEntry.remaining_threads == 0)
3046 			break;
3047 
3048 		// there are threads to wait for
3049 		ConditionVariableEntry entry;
3050 		deathEntry.condition.Add(&entry);
3051 
3052 		teamLocker.Unlock();
3053 
3054 		entry.Wait();
3055 
3056 		teamLocker.Lock();
3057 	}
3058 
3059 	team->death_entry = NULL;
3060 
3061 	return debuggerPort;
3062 }
3063 
3064 
3065 /*!	Called on team exit to notify threads waiting on the team and free most
3066 	resources associated with it.
3067 	The caller shouldn't hold any locks.
3068 */
3069 void
3070 team_delete_team(Team* team, port_id debuggerPort)
3071 {
3072 	// Not quite in our job description, but work that has been left by
3073 	// team_remove_team() and that can be done now that we're not holding any
3074 	// locks.
3075 	orphaned_process_group_check();
3076 
3077 	team_id teamID = team->id;
3078 
3079 	ASSERT(team->num_threads == 0);
3080 
3081 	// If someone is waiting for this team to be loaded, but it dies
3082 	// unexpectedly before being done, we need to notify the waiting
3083 	// thread now.
3084 
3085 	TeamLocker teamLocker(team);
3086 
3087 	if (team->loading_info) {
3088 		// there's indeed someone waiting
3089 		struct team_loading_info* loadingInfo = team->loading_info;
3090 		team->loading_info = NULL;
3091 
3092 		loadingInfo->result = B_ERROR;
3093 		loadingInfo->done = true;
3094 
3095 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
3096 
3097 		// wake up the waiting thread
3098 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
3099 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
3100 	}
3101 
3102 	// notify team watchers
3103 
3104 	{
3105 		// we're not reachable from anyone anymore at this point, so we
3106 		// can safely access the list without any locking
3107 		struct team_watcher* watcher;
3108 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3109 				&team->watcher_list)) != NULL) {
3110 			watcher->hook(teamID, watcher->data);
3111 			free(watcher);
3112 		}
3113 	}
3114 
3115 	teamLocker.Unlock();
3116 
3117 	sNotificationService.Notify(TEAM_REMOVED, team);
3118 
3119 	// free team resources
3120 
3121 	delete_realtime_sem_context(team->realtime_sem_context);
3122 	xsi_sem_undo(team);
3123 	remove_images(team);
3124 	team->address_space->RemoveAndPut();
3125 
3126 	team->ReleaseReference();
3127 
3128 	// notify the debugger, that the team is gone
3129 	user_debug_team_deleted(teamID, debuggerPort);
3130 }
3131 
3132 
3133 Team*
3134 team_get_kernel_team(void)
3135 {
3136 	return sKernelTeam;
3137 }
3138 
3139 
3140 team_id
3141 team_get_kernel_team_id(void)
3142 {
3143 	if (!sKernelTeam)
3144 		return 0;
3145 
3146 	return sKernelTeam->id;
3147 }
3148 
3149 
3150 team_id
3151 team_get_current_team_id(void)
3152 {
3153 	return thread_get_current_thread()->team->id;
3154 }
3155 
3156 
3157 status_t
3158 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3159 {
3160 	if (id == sKernelTeam->id) {
3161 		// we're the kernel team, so we don't have to go through all
3162 		// the hassle (locking and hash lookup)
3163 		*_addressSpace = VMAddressSpace::GetKernel();
3164 		return B_OK;
3165 	}
3166 
3167 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3168 
3169 	Team* team = team_get_team_struct_locked(id);
3170 	if (team == NULL)
3171 		return B_BAD_VALUE;
3172 
3173 	team->address_space->Get();
3174 	*_addressSpace = team->address_space;
3175 	return B_OK;
3176 }
3177 
3178 
3179 /*!	Sets the team's job control state.
3180 	The caller must hold the parent team's lock. Interrupts are allowed to be
3181 	enabled or disabled. In the latter case the scheduler lock may be held as
3182 	well.
3183 	\a team The team whose job control state shall be set.
3184 	\a newState The new state to be set.
3185 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3186 		the caller is responsible for filling in the following fields of the
3187 		entry before releasing the parent team's lock, unless the new state is
3188 		\c JOB_CONTROL_STATE_NONE:
3189 		- \c signal: The number of the signal causing the state change.
3190 		- \c signaling_user: The real UID of the user sending the signal.
3191 	\a schedulerLocked indicates whether the scheduler lock is being held, too.
3192 */
3193 void
3194 team_set_job_control_state(Team* team, job_control_state newState,
3195 	Signal* signal, bool schedulerLocked)
3196 {
3197 	if (team == NULL || team->job_control_entry == NULL)
3198 		return;
3199 
3200 	// don't touch anything, if the state stays the same or the team is already
3201 	// dead
3202 	job_control_entry* entry = team->job_control_entry;
3203 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3204 		return;
3205 
3206 	T(SetJobControlState(team->id, newState, signal));
3207 
3208 	// remove from the old list
3209 	switch (entry->state) {
3210 		case JOB_CONTROL_STATE_NONE:
3211 			// entry is in no list ATM
3212 			break;
3213 		case JOB_CONTROL_STATE_DEAD:
3214 			// can't get here
3215 			break;
3216 		case JOB_CONTROL_STATE_STOPPED:
3217 			team->parent->stopped_children.entries.Remove(entry);
3218 			break;
3219 		case JOB_CONTROL_STATE_CONTINUED:
3220 			team->parent->continued_children.entries.Remove(entry);
3221 			break;
3222 	}
3223 
3224 	entry->state = newState;
3225 
3226 	if (signal != NULL) {
3227 		entry->signal = signal->Number();
3228 		entry->signaling_user = signal->SendingUser();
3229 	}
3230 
3231 	// add to new list
3232 	team_job_control_children* childList = NULL;
3233 	switch (entry->state) {
3234 		case JOB_CONTROL_STATE_NONE:
3235 			// entry doesn't get into any list
3236 			break;
3237 		case JOB_CONTROL_STATE_DEAD:
3238 			childList = &team->parent->dead_children;
3239 			team->parent->dead_children.count++;
3240 			break;
3241 		case JOB_CONTROL_STATE_STOPPED:
3242 			childList = &team->parent->stopped_children;
3243 			break;
3244 		case JOB_CONTROL_STATE_CONTINUED:
3245 			childList = &team->parent->continued_children;
3246 			break;
3247 	}
3248 
3249 	if (childList != NULL) {
3250 		childList->entries.Add(entry);
3251 		team->parent->dead_children.condition_variable.NotifyAll(
3252 			schedulerLocked);
3253 	}
3254 }
3255 
3256 
3257 /*!	Inits the given team's exit information, if not yet initialized, to some
3258 	generic "killed" status.
3259 	The caller must not hold the team's lock. Interrupts must be enabled.
3260 
3261 	\param team The team whose exit info shall be initialized.
3262 */
3263 void
3264 team_init_exit_info_on_error(Team* team)
3265 {
3266 	TeamLocker teamLocker(team);
3267 
3268 	if (!team->exit.initialized) {
3269 		team->exit.reason = CLD_KILLED;
3270 		team->exit.signal = SIGKILL;
3271 		team->exit.signaling_user = geteuid();
3272 		team->exit.status = 0;
3273 		team->exit.initialized = true;
3274 	}
3275 }
3276 
3277 
3278 /*! Adds a hook to the team that is called as soon as this team goes away.
3279 	This call might get public in the future.
3280 */
3281 status_t
3282 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3283 {
3284 	if (hook == NULL || teamID < B_OK)
3285 		return B_BAD_VALUE;
3286 
3287 	// create the watcher object
3288 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3289 	if (watcher == NULL)
3290 		return B_NO_MEMORY;
3291 
3292 	watcher->hook = hook;
3293 	watcher->data = data;
3294 
3295 	// add watcher, if the team isn't already dying
3296 	// get the team
3297 	Team* team = Team::GetAndLock(teamID);
3298 	if (team == NULL) {
3299 		free(watcher);
3300 		return B_BAD_TEAM_ID;
3301 	}
3302 
3303 	list_add_item(&team->watcher_list, watcher);
3304 
3305 	team->UnlockAndReleaseReference();
3306 
3307 	return B_OK;
3308 }
3309 
3310 
3311 status_t
3312 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3313 {
3314 	if (hook == NULL || teamID < 0)
3315 		return B_BAD_VALUE;
3316 
3317 	// get team and remove watcher (if present)
3318 	Team* team = Team::GetAndLock(teamID);
3319 	if (team == NULL)
3320 		return B_BAD_TEAM_ID;
3321 
3322 	// search for watcher
3323 	team_watcher* watcher = NULL;
3324 	while ((watcher = (team_watcher*)list_get_next_item(
3325 			&team->watcher_list, watcher)) != NULL) {
3326 		if (watcher->hook == hook && watcher->data == data) {
3327 			// got it!
3328 			list_remove_item(&team->watcher_list, watcher);
3329 			break;
3330 		}
3331 	}
3332 
3333 	team->UnlockAndReleaseReference();
3334 
3335 	if (watcher == NULL)
3336 		return B_ENTRY_NOT_FOUND;
3337 
3338 	free(watcher);
3339 	return B_OK;
3340 }
3341 
3342 
3343 /*!	Allocates a user_thread structure from the team.
3344 	The team lock must be held, unless the function is called for the team's
3345 	main thread. Interrupts must be enabled.
3346 */
3347 struct user_thread*
3348 team_allocate_user_thread(Team* team)
3349 {
3350 	if (team->user_data == 0)
3351 		return NULL;
3352 
3353 	// take an entry from the free list, if any
3354 	if (struct free_user_thread* entry = team->free_user_threads) {
3355 		user_thread* thread = entry->thread;
3356 		team->free_user_threads = entry->next;
3357 		free(entry);
3358 		return thread;
3359 	}
3360 
3361 	while (true) {
3362 		// enough space left?
3363 		size_t needed = ROUNDUP(sizeof(user_thread), 8);
3364 		if (team->user_data_size - team->used_user_data < needed) {
3365 			// try to resize the area
3366 			if (resize_area(team->user_data_area,
3367 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3368 				return NULL;
3369 			}
3370 
3371 			// resized user area successfully -- try to allocate the user_thread
3372 			// again
3373 			team->user_data_size += B_PAGE_SIZE;
3374 			continue;
3375 		}
3376 
3377 		// allocate the user_thread
3378 		user_thread* thread
3379 			= (user_thread*)(team->user_data + team->used_user_data);
3380 		team->used_user_data += needed;
3381 
3382 		return thread;
3383 	}
3384 }
3385 
3386 
3387 /*!	Frees the given user_thread structure.
3388 	The team's lock must not be held. Interrupts must be enabled.
3389 	\param team The team the user thread was allocated from.
3390 	\param userThread The user thread to free.
3391 */
3392 void
3393 team_free_user_thread(Team* team, struct user_thread* userThread)
3394 {
3395 	if (userThread == NULL)
3396 		return;
3397 
3398 	// create a free list entry
3399 	free_user_thread* entry
3400 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3401 	if (entry == NULL) {
3402 		// we have to leak the user thread :-/
3403 		return;
3404 	}
3405 
3406 	// add to free list
3407 	TeamLocker teamLocker(team);
3408 
3409 	entry->thread = userThread;
3410 	entry->next = team->free_user_threads;
3411 	team->free_user_threads = entry;
3412 }
3413 
3414 
3415 //	#pragma mark - Associated data interface
3416 
3417 
3418 AssociatedData::AssociatedData()
3419 	:
3420 	fOwner(NULL)
3421 {
3422 }
3423 
3424 
3425 AssociatedData::~AssociatedData()
3426 {
3427 }
3428 
3429 
3430 void
3431 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3432 {
3433 }
3434 
3435 
3436 AssociatedDataOwner::AssociatedDataOwner()
3437 {
3438 	mutex_init(&fLock, "associated data owner");
3439 }
3440 
3441 
3442 AssociatedDataOwner::~AssociatedDataOwner()
3443 {
3444 	mutex_destroy(&fLock);
3445 }
3446 
3447 
3448 bool
3449 AssociatedDataOwner::AddData(AssociatedData* data)
3450 {
3451 	MutexLocker locker(fLock);
3452 
3453 	if (data->Owner() != NULL)
3454 		return false;
3455 
3456 	data->AcquireReference();
3457 	fList.Add(data);
3458 	data->SetOwner(this);
3459 
3460 	return true;
3461 }
3462 
3463 
3464 bool
3465 AssociatedDataOwner::RemoveData(AssociatedData* data)
3466 {
3467 	MutexLocker locker(fLock);
3468 
3469 	if (data->Owner() != this)
3470 		return false;
3471 
3472 	data->SetOwner(NULL);
3473 	fList.Remove(data);
3474 
3475 	locker.Unlock();
3476 
3477 	data->ReleaseReference();
3478 
3479 	return true;
3480 }
3481 
3482 
3483 void
3484 AssociatedDataOwner::PrepareForDeletion()
3485 {
3486 	MutexLocker locker(fLock);
3487 
3488 	// move all data to a temporary list and unset the owner
3489 	DataList list;
3490 	list.MoveFrom(&fList);
3491 
3492 	for (DataList::Iterator it = list.GetIterator();
3493 		AssociatedData* data = it.Next();) {
3494 		data->SetOwner(NULL);
3495 	}
3496 
3497 	locker.Unlock();
3498 
3499 	// call the notification hooks and release our references
3500 	while (AssociatedData* data = list.RemoveHead()) {
3501 		data->OwnerDeleted(this);
3502 		data->ReleaseReference();
3503 	}
3504 }
3505 
3506 
3507 /*!	Associates data with the current team.
3508 	When the team is deleted, the data object is notified.
3509 	The team acquires a reference to the object.
3510 
3511 	\param data The data object.
3512 	\return \c true on success, \c false otherwise. Fails only when the supplied
3513 		data object is already associated with another owner.
3514 */
3515 bool
3516 team_associate_data(AssociatedData* data)
3517 {
3518 	return thread_get_current_thread()->team->AddData(data);
3519 }
3520 
3521 
3522 /*!	Dissociates data from the current team.
3523 	Balances an earlier call to team_associate_data().
3524 
3525 	\param data The data object.
3526 	\return \c true on success, \c false otherwise. Fails only when the data
3527 		object is not associated with the current team.
3528 */
3529 bool
3530 team_dissociate_data(AssociatedData* data)
3531 {
3532 	return thread_get_current_thread()->team->RemoveData(data);
3533 }
3534 
3535 
3536 //	#pragma mark - Public kernel API
3537 
3538 
3539 thread_id
3540 load_image(int32 argCount, const char** args, const char** env)
3541 {
3542 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3543 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3544 }
3545 
3546 
3547 thread_id
3548 load_image_etc(int32 argCount, const char* const* args,
3549 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3550 {
3551 	// we need to flatten the args and environment
3552 
3553 	if (args == NULL)
3554 		return B_BAD_VALUE;
3555 
3556 	// determine total needed size
3557 	int32 argSize = 0;
3558 	for (int32 i = 0; i < argCount; i++)
3559 		argSize += strlen(args[i]) + 1;
3560 
3561 	int32 envCount = 0;
3562 	int32 envSize = 0;
3563 	while (env != NULL && env[envCount] != NULL)
3564 		envSize += strlen(env[envCount++]) + 1;
3565 
3566 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3567 	if (size > MAX_PROCESS_ARGS_SIZE)
3568 		return B_TOO_MANY_ARGS;
3569 
3570 	// allocate space
3571 	char** flatArgs = (char**)malloc(size);
3572 	if (flatArgs == NULL)
3573 		return B_NO_MEMORY;
3574 
3575 	char** slot = flatArgs;
3576 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3577 
3578 	// copy arguments and environment
3579 	for (int32 i = 0; i < argCount; i++) {
3580 		int32 argSize = strlen(args[i]) + 1;
3581 		memcpy(stringSpace, args[i], argSize);
3582 		*slot++ = stringSpace;
3583 		stringSpace += argSize;
3584 	}
3585 
3586 	*slot++ = NULL;
3587 
3588 	for (int32 i = 0; i < envCount; i++) {
3589 		int32 envSize = strlen(env[i]) + 1;
3590 		memcpy(stringSpace, env[i], envSize);
3591 		*slot++ = stringSpace;
3592 		stringSpace += envSize;
3593 	}
3594 
3595 	*slot++ = NULL;
3596 
3597 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3598 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3599 
3600 	free(flatArgs);
3601 		// load_image_internal() unset our variable if it took over ownership
3602 
3603 	return thread;
3604 }
3605 
3606 
3607 status_t
3608 wait_for_team(team_id id, status_t* _returnCode)
3609 {
3610 	// check whether the team exists
3611 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3612 
3613 	Team* team = team_get_team_struct_locked(id);
3614 	if (team == NULL)
3615 		return B_BAD_TEAM_ID;
3616 
3617 	id = team->id;
3618 
3619 	teamsLocker.Unlock();
3620 
3621 	// wait for the main thread (it has the same ID as the team)
3622 	return wait_for_thread(id, _returnCode);
3623 }
3624 
3625 
3626 status_t
3627 kill_team(team_id id)
3628 {
3629 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3630 
3631 	Team* team = team_get_team_struct_locked(id);
3632 	if (team == NULL)
3633 		return B_BAD_TEAM_ID;
3634 
3635 	id = team->id;
3636 
3637 	teamsLocker.Unlock();
3638 
3639 	if (team == sKernelTeam)
3640 		return B_NOT_ALLOWED;
3641 
3642 	// Just kill the team's main thread (it has same ID as the team). The
3643 	// cleanup code there will take care of the team.
3644 	return kill_thread(id);
3645 }
3646 
3647 
3648 status_t
3649 _get_team_info(team_id id, team_info* info, size_t size)
3650 {
3651 	// get the team
3652 	Team* team = Team::Get(id);
3653 	if (team == NULL)
3654 		return B_BAD_TEAM_ID;
3655 	BReference<Team> teamReference(team, true);
3656 
3657 	// fill in the info
3658 	return fill_team_info(team, info, size);
3659 }
3660 
3661 
3662 status_t
3663 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3664 {
3665 	int32 slot = *cookie;
3666 	if (slot < 1)
3667 		slot = 1;
3668 
3669 	InterruptsSpinLocker locker(sTeamHashLock);
3670 
3671 	team_id lastTeamID = peek_next_thread_id();
3672 		// TODO: This is broken, since the id can wrap around!
3673 
3674 	// get next valid team
3675 	Team* team = NULL;
3676 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3677 		slot++;
3678 
3679 	if (team == NULL)
3680 		return B_BAD_TEAM_ID;
3681 
3682 	// get a reference to the team and unlock
3683 	BReference<Team> teamReference(team);
3684 	locker.Unlock();
3685 
3686 	// fill in the info
3687 	*cookie = ++slot;
3688 	return fill_team_info(team, info, size);
3689 }
3690 
3691 
3692 status_t
3693 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3694 {
3695 	if (size != sizeof(team_usage_info))
3696 		return B_BAD_VALUE;
3697 
3698 	return common_get_team_usage_info(id, who, info, 0);
3699 }
3700 
3701 
3702 pid_t
3703 getpid(void)
3704 {
3705 	return thread_get_current_thread()->team->id;
3706 }
3707 
3708 
3709 pid_t
3710 getppid(void)
3711 {
3712 	Team* team = thread_get_current_thread()->team;
3713 
3714 	TeamLocker teamLocker(team);
3715 
3716 	return team->parent->id;
3717 }
3718 
3719 
3720 pid_t
3721 getpgid(pid_t id)
3722 {
3723 	if (id < 0) {
3724 		errno = EINVAL;
3725 		return -1;
3726 	}
3727 
3728 	if (id == 0) {
3729 		// get process group of the calling process
3730 		Team* team = thread_get_current_thread()->team;
3731 		TeamLocker teamLocker(team);
3732 		return team->group_id;
3733 	}
3734 
3735 	// get the team
3736 	Team* team = Team::GetAndLock(id);
3737 	if (team == NULL) {
3738 		errno = ESRCH;
3739 		return -1;
3740 	}
3741 
3742 	// get the team's process group ID
3743 	pid_t groupID = team->group_id;
3744 
3745 	team->UnlockAndReleaseReference();
3746 
3747 	return groupID;
3748 }
3749 
3750 
3751 pid_t
3752 getsid(pid_t id)
3753 {
3754 	if (id < 0) {
3755 		errno = EINVAL;
3756 		return -1;
3757 	}
3758 
3759 	if (id == 0) {
3760 		// get session of the calling process
3761 		Team* team = thread_get_current_thread()->team;
3762 		TeamLocker teamLocker(team);
3763 		return team->session_id;
3764 	}
3765 
3766 	// get the team
3767 	Team* team = Team::GetAndLock(id);
3768 	if (team == NULL) {
3769 		errno = ESRCH;
3770 		return -1;
3771 	}
3772 
3773 	// get the team's session ID
3774 	pid_t sessionID = team->session_id;
3775 
3776 	team->UnlockAndReleaseReference();
3777 
3778 	return sessionID;
3779 }
3780 
3781 
3782 //	#pragma mark - User syscalls
3783 
3784 
3785 status_t
3786 _user_exec(const char* userPath, const char* const* userFlatArgs,
3787 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3788 {
3789 	// NOTE: Since this function normally doesn't return, don't use automatic
3790 	// variables that need destruction in the function scope.
3791 	char path[B_PATH_NAME_LENGTH];
3792 
3793 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3794 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3795 		return B_BAD_ADDRESS;
3796 
3797 	// copy and relocate the flat arguments
3798 	char** flatArgs;
3799 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3800 		argCount, envCount, flatArgs);
3801 
3802 	if (error == B_OK) {
3803 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3804 			envCount, umask);
3805 			// this one only returns in case of error
3806 	}
3807 
3808 	free(flatArgs);
3809 	return error;
3810 }
3811 
3812 
3813 thread_id
3814 _user_fork(void)
3815 {
3816 	return fork_team();
3817 }
3818 
3819 
3820 pid_t
3821 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo)
3822 {
3823 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3824 		return B_BAD_ADDRESS;
3825 
3826 	siginfo_t info;
3827 	pid_t foundChild = wait_for_child(child, flags, info);
3828 	if (foundChild < 0)
3829 		return syscall_restart_handle_post(foundChild);
3830 
3831 	// copy info back to userland
3832 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3833 		return B_BAD_ADDRESS;
3834 
3835 	return foundChild;
3836 }
3837 
3838 
3839 pid_t
3840 _user_process_info(pid_t process, int32 which)
3841 {
3842 	// we only allow to return the parent of the current process
3843 	if (which == PARENT_ID
3844 		&& process != 0 && process != thread_get_current_thread()->team->id)
3845 		return B_BAD_VALUE;
3846 
3847 	pid_t result;
3848 	switch (which) {
3849 		case SESSION_ID:
3850 			result = getsid(process);
3851 			break;
3852 		case GROUP_ID:
3853 			result = getpgid(process);
3854 			break;
3855 		case PARENT_ID:
3856 			result = getppid();
3857 			break;
3858 		default:
3859 			return B_BAD_VALUE;
3860 	}
3861 
3862 	return result >= 0 ? result : errno;
3863 }
3864 
3865 
3866 pid_t
3867 _user_setpgid(pid_t processID, pid_t groupID)
3868 {
3869 	// setpgid() can be called either by the parent of the target process or
3870 	// by the process itself to do one of two things:
3871 	// * Create a new process group with the target process' ID and the target
3872 	//   process as group leader.
3873 	// * Set the target process' process group to an already existing one in the
3874 	//   same session.
3875 
3876 	if (groupID < 0)
3877 		return B_BAD_VALUE;
3878 
3879 	Team* currentTeam = thread_get_current_thread()->team;
3880 	if (processID == 0)
3881 		processID = currentTeam->id;
3882 
3883 	// if the group ID is not specified, use the target process' ID
3884 	if (groupID == 0)
3885 		groupID = processID;
3886 
3887 	// We loop when running into the following race condition: We create a new
3888 	// process group, because there isn't one with that ID yet, but later when
3889 	// trying to publish it, we find that someone else created and published
3890 	// a group with that ID in the meantime. In that case we just restart the
3891 	// whole action.
3892 	while (true) {
3893 		// Look up the process group by ID. If it doesn't exist yet and we are
3894 		// allowed to create a new one, do that.
3895 		ProcessGroup* group = ProcessGroup::Get(groupID);
3896 		bool newGroup = false;
3897 		if (group == NULL) {
3898 			if (groupID != processID)
3899 				return B_NOT_ALLOWED;
3900 
3901 			group = new(std::nothrow) ProcessGroup(groupID);
3902 			if (group == NULL)
3903 				return B_NO_MEMORY;
3904 
3905 			newGroup = true;
3906 		}
3907 		BReference<ProcessGroup> groupReference(group, true);
3908 
3909 		// get the target team
3910 		Team* team = Team::Get(processID);
3911 		if (team == NULL)
3912 			return ESRCH;
3913 		BReference<Team> teamReference(team, true);
3914 
3915 		// lock the new process group and the team's current process group
3916 		while (true) {
3917 			// lock the team's current process group
3918 			team->LockProcessGroup();
3919 
3920 			ProcessGroup* oldGroup = team->group;
3921 			if (oldGroup == group) {
3922 				// it's the same as the target group, so just bail out
3923 				oldGroup->Unlock();
3924 				return group->id;
3925 			}
3926 
3927 			oldGroup->AcquireReference();
3928 
3929 			// lock the target process group, if locking order allows it
3930 			if (newGroup || group->id > oldGroup->id) {
3931 				group->Lock();
3932 				break;
3933 			}
3934 
3935 			// try to lock
3936 			if (group->TryLock())
3937 				break;
3938 
3939 			// no dice -- unlock the team's current process group and relock in
3940 			// the correct order
3941 			oldGroup->Unlock();
3942 
3943 			group->Lock();
3944 			oldGroup->Lock();
3945 
3946 			// check whether things are still the same
3947 			TeamLocker teamLocker(team);
3948 			if (team->group == oldGroup)
3949 				break;
3950 
3951 			// something changed -- unlock everything and retry
3952 			teamLocker.Unlock();
3953 			oldGroup->Unlock();
3954 			group->Unlock();
3955 			oldGroup->ReleaseReference();
3956 		}
3957 
3958 		// we now have references and locks of both new and old process group
3959 		BReference<ProcessGroup> oldGroupReference(team->group, true);
3960 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
3961 		AutoLocker<ProcessGroup> groupLocker(group, true);
3962 
3963 		// also lock the target team and its parent
3964 		team->LockTeamAndParent(false);
3965 		TeamLocker parentLocker(team->parent, true);
3966 		TeamLocker teamLocker(team, true);
3967 
3968 		// perform the checks
3969 		if (team == currentTeam) {
3970 			// we set our own group
3971 
3972 			// we must not change our process group ID if we're a session leader
3973 			if (is_session_leader(currentTeam))
3974 				return B_NOT_ALLOWED;
3975 		} else {
3976 			// Calling team != target team. The target team must be a child of
3977 			// the calling team and in the same session. (If that's the case it
3978 			// isn't a session leader either.)
3979 			if (team->parent != currentTeam
3980 				|| team->session_id != currentTeam->session_id) {
3981 				return B_NOT_ALLOWED;
3982 			}
3983 
3984 			// The call is also supposed to fail on a child, when the child has
3985 			// already executed exec*() [EACCES].
3986 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3987 				return EACCES;
3988 		}
3989 
3990 		// If we created a new process group, publish it now.
3991 		if (newGroup) {
3992 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3993 			if (sGroupHash.Lookup(groupID)) {
3994 				// A group with the group ID appeared since we first checked.
3995 				// Back to square one.
3996 				continue;
3997 			}
3998 
3999 			group->PublishLocked(team->group->Session());
4000 		} else if (group->Session()->id != team->session_id) {
4001 			// The existing target process group belongs to a different session.
4002 			// That's not allowed.
4003 			return B_NOT_ALLOWED;
4004 		}
4005 
4006 		// Everything is ready -- set the group.
4007 		remove_team_from_group(team);
4008 		insert_team_into_group(group, team);
4009 
4010 		// Changing the process group might have changed the situation for a
4011 		// parent waiting in wait_for_child(). Hence we notify it.
4012 		team->parent->dead_children.condition_variable.NotifyAll(false);
4013 
4014 		return group->id;
4015 	}
4016 }
4017 
4018 
4019 pid_t
4020 _user_setsid(void)
4021 {
4022 	Team* team = thread_get_current_thread()->team;
4023 
4024 	// create a new process group and session
4025 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4026 	if (group == NULL)
4027 		return B_NO_MEMORY;
4028 	BReference<ProcessGroup> groupReference(group, true);
4029 	AutoLocker<ProcessGroup> groupLocker(group);
4030 
4031 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4032 	if (session == NULL)
4033 		return B_NO_MEMORY;
4034 	BReference<ProcessSession> sessionReference(session, true);
4035 
4036 	// lock the team's current process group, parent, and the team itself
4037 	team->LockTeamParentAndProcessGroup();
4038 	BReference<ProcessGroup> oldGroupReference(team->group);
4039 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4040 	TeamLocker parentLocker(team->parent, true);
4041 	TeamLocker teamLocker(team, true);
4042 
4043 	// the team must not already be a process group leader
4044 	if (is_process_group_leader(team))
4045 		return B_NOT_ALLOWED;
4046 
4047 	// remove the team from the old and add it to the new process group
4048 	remove_team_from_group(team);
4049 	group->Publish(session);
4050 	insert_team_into_group(group, team);
4051 
4052 	// Changing the process group might have changed the situation for a
4053 	// parent waiting in wait_for_child(). Hence we notify it.
4054 	team->parent->dead_children.condition_variable.NotifyAll(false);
4055 
4056 	return group->id;
4057 }
4058 
4059 
4060 status_t
4061 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4062 {
4063 	status_t returnCode;
4064 	status_t status;
4065 
4066 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4067 		return B_BAD_ADDRESS;
4068 
4069 	status = wait_for_team(id, &returnCode);
4070 	if (status >= B_OK && _userReturnCode != NULL) {
4071 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4072 				!= B_OK)
4073 			return B_BAD_ADDRESS;
4074 		return B_OK;
4075 	}
4076 
4077 	return syscall_restart_handle_post(status);
4078 }
4079 
4080 
4081 thread_id
4082 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4083 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4084 	port_id errorPort, uint32 errorToken)
4085 {
4086 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4087 
4088 	if (argCount < 1)
4089 		return B_BAD_VALUE;
4090 
4091 	// copy and relocate the flat arguments
4092 	char** flatArgs;
4093 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4094 		argCount, envCount, flatArgs);
4095 	if (error != B_OK)
4096 		return error;
4097 
4098 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4099 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4100 		errorToken);
4101 
4102 	free(flatArgs);
4103 		// load_image_internal() unset our variable if it took over ownership
4104 
4105 	return thread;
4106 }
4107 
4108 
4109 void
4110 _user_exit_team(status_t returnValue)
4111 {
4112 	Thread* thread = thread_get_current_thread();
4113 	Team* team = thread->team;
4114 
4115 	// set this thread's exit status
4116 	thread->exit.status = returnValue;
4117 
4118 	// set the team exit status
4119 	TeamLocker teamLocker(team);
4120 
4121 	if (!team->exit.initialized) {
4122 		team->exit.reason = CLD_EXITED;
4123 		team->exit.signal = 0;
4124 		team->exit.signaling_user = 0;
4125 		team->exit.status = returnValue;
4126 		team->exit.initialized = true;
4127 	}
4128 
4129 	teamLocker.Unlock();
4130 
4131 	// Stop the thread, if the team is being debugged and that has been
4132 	// requested.
4133 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4134 		user_debug_stop_thread();
4135 
4136 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4137 	// userland. The signal handling code forwards the signal to the main
4138 	// thread (if that's not already this one), which will take the team down.
4139 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4140 	send_signal_to_thread(thread, signal, 0);
4141 }
4142 
4143 
4144 status_t
4145 _user_kill_team(team_id team)
4146 {
4147 	return kill_team(team);
4148 }
4149 
4150 
4151 status_t
4152 _user_get_team_info(team_id id, team_info* userInfo)
4153 {
4154 	status_t status;
4155 	team_info info;
4156 
4157 	if (!IS_USER_ADDRESS(userInfo))
4158 		return B_BAD_ADDRESS;
4159 
4160 	status = _get_team_info(id, &info, sizeof(team_info));
4161 	if (status == B_OK) {
4162 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4163 			return B_BAD_ADDRESS;
4164 	}
4165 
4166 	return status;
4167 }
4168 
4169 
4170 status_t
4171 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4172 {
4173 	status_t status;
4174 	team_info info;
4175 	int32 cookie;
4176 
4177 	if (!IS_USER_ADDRESS(userCookie)
4178 		|| !IS_USER_ADDRESS(userInfo)
4179 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4180 		return B_BAD_ADDRESS;
4181 
4182 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4183 	if (status != B_OK)
4184 		return status;
4185 
4186 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4187 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4188 		return B_BAD_ADDRESS;
4189 
4190 	return status;
4191 }
4192 
4193 
4194 team_id
4195 _user_get_current_team(void)
4196 {
4197 	return team_get_current_team_id();
4198 }
4199 
4200 
4201 status_t
4202 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4203 	size_t size)
4204 {
4205 	if (size != sizeof(team_usage_info))
4206 		return B_BAD_VALUE;
4207 
4208 	team_usage_info info;
4209 	status_t status = common_get_team_usage_info(team, who, &info,
4210 		B_CHECK_PERMISSION);
4211 
4212 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4213 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4214 		return B_BAD_ADDRESS;
4215 	}
4216 
4217 	return status;
4218 }
4219 
4220 
4221 status_t
4222 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4223 	size_t size, size_t* _sizeNeeded)
4224 {
4225 	// check parameters
4226 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4227 		|| (buffer == NULL && size > 0)
4228 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4229 		return B_BAD_ADDRESS;
4230 	}
4231 
4232 	KMessage info;
4233 
4234 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4235 		// allocate memory for a copy of the needed team data
4236 		struct ExtendedTeamData {
4237 			team_id	id;
4238 			pid_t	group_id;
4239 			pid_t	session_id;
4240 			uid_t	real_uid;
4241 			gid_t	real_gid;
4242 			uid_t	effective_uid;
4243 			gid_t	effective_gid;
4244 			char	name[B_OS_NAME_LENGTH];
4245 		};
4246 
4247 		ExtendedTeamData* teamClone
4248 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4249 			// It would be nicer to use new, but then we'd have to use
4250 			// ObjectDeleter and declare the structure outside of the function
4251 			// due to template parameter restrictions.
4252 		if (teamClone == NULL)
4253 			return B_NO_MEMORY;
4254 		MemoryDeleter teamCloneDeleter(teamClone);
4255 
4256 		io_context* ioContext;
4257 		{
4258 			// get the team structure
4259 			Team* team = Team::GetAndLock(teamID);
4260 			if (team == NULL)
4261 				return B_BAD_TEAM_ID;
4262 			BReference<Team> teamReference(team, true);
4263 			TeamLocker teamLocker(team, true);
4264 
4265 			// copy the data
4266 			teamClone->id = team->id;
4267 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4268 			teamClone->group_id = team->group_id;
4269 			teamClone->session_id = team->session_id;
4270 			teamClone->real_uid = team->real_uid;
4271 			teamClone->real_gid = team->real_gid;
4272 			teamClone->effective_uid = team->effective_uid;
4273 			teamClone->effective_gid = team->effective_gid;
4274 
4275 			// also fetch a reference to the I/O context
4276 			ioContext = team->io_context;
4277 			vfs_get_io_context(ioContext);
4278 		}
4279 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4280 			&vfs_put_io_context);
4281 
4282 		// add the basic data to the info message
4283 		if (info.AddInt32("id", teamClone->id) != B_OK
4284 			|| info.AddString("name", teamClone->name) != B_OK
4285 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4286 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4287 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4288 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4289 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4290 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4291 			return B_NO_MEMORY;
4292 		}
4293 
4294 		// get the current working directory from the I/O context
4295 		dev_t cwdDevice;
4296 		ino_t cwdDirectory;
4297 		{
4298 			MutexLocker ioContextLocker(ioContext->io_mutex);
4299 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4300 		}
4301 
4302 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4303 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4304 			return B_NO_MEMORY;
4305 		}
4306 	}
4307 
4308 	// TODO: Support the other flags!
4309 
4310 	// copy the needed size and, if it fits, the message back to userland
4311 	size_t sizeNeeded = info.ContentSize();
4312 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4313 		return B_BAD_ADDRESS;
4314 
4315 	if (sizeNeeded > size)
4316 		return B_BUFFER_OVERFLOW;
4317 
4318 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4319 		return B_BAD_ADDRESS;
4320 
4321 	return B_OK;
4322 }
4323