xref: /haiku/src/system/kernel/team.cpp (revision 2897df967633aab846ff4917b53e2af7d1e54eeb)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 /*!	Team functions */
13 
14 
15 #include <team.h>
16 
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22 
23 #include <OS.h>
24 
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27 
28 #include <extended_system_info_defs.h>
29 
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <sem.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
49 #include <syscalls.h>
50 #include <tls.h>
51 #include <tracing.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
55 #include <vfs.h>
56 #include <vm/vm.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
59 
60 #include "TeamThreadTables.h"
61 
62 
63 //#define TRACE_TEAM
64 #ifdef TRACE_TEAM
65 #	define TRACE(x) dprintf x
66 #else
67 #	define TRACE(x) ;
68 #endif
69 
70 
71 struct team_key {
72 	team_id id;
73 };
74 
75 struct team_arg {
76 	char	*path;
77 	char	**flat_args;
78 	size_t	flat_args_size;
79 	uint32	arg_count;
80 	uint32	env_count;
81 	mode_t	umask;
82 	uint32	flags;
83 	port_id	error_port;
84 	uint32	error_token;
85 };
86 
87 #define TEAM_ARGS_FLAG_NO_ASLR	0x01
88 
89 
90 namespace {
91 
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, Team* team);
98 };
99 
100 
101 // #pragma mark - TeamTable
102 
103 
104 typedef BKernel::TeamThreadTable<Team> TeamTable;
105 
106 
107 // #pragma mark - ProcessGroupHashDefinition
108 
109 
110 struct ProcessGroupHashDefinition {
111 	typedef pid_t			KeyType;
112 	typedef	ProcessGroup	ValueType;
113 
114 	size_t HashKey(pid_t key) const
115 	{
116 		return key;
117 	}
118 
119 	size_t Hash(ProcessGroup* value) const
120 	{
121 		return HashKey(value->id);
122 	}
123 
124 	bool Compare(pid_t key, ProcessGroup* value) const
125 	{
126 		return value->id == key;
127 	}
128 
129 	ProcessGroup*& GetLink(ProcessGroup* value) const
130 	{
131 		return value->next;
132 	}
133 };
134 
135 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
136 
137 
138 }	// unnamed namespace
139 
140 
141 // #pragma mark -
142 
143 
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash;
146 static spinlock sTeamHashLock = B_SPINLOCK_INITIALIZER;
147 
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash;
150 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
151 
152 static Team* sKernelTeam = NULL;
153 
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups;
157 static mutex sOrphanedCheckLock
158 	= MUTEX_INITIALIZER("orphaned process group check");
159 
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams = 2048;
163 static int32 sUsedTeams = 1;
164 
165 static TeamNotificationService sNotificationService;
166 
167 static const size_t kTeamUserDataReservedSize	= 128 * B_PAGE_SIZE;
168 static const size_t kTeamUserDataInitialSize	= 4 * B_PAGE_SIZE;
169 
170 
171 // #pragma mark - TeamListIterator
172 
173 
174 TeamListIterator::TeamListIterator()
175 {
176 	// queue the entry
177 	InterruptsSpinLocker locker(sTeamHashLock);
178 	sTeamHash.InsertIteratorEntry(&fEntry);
179 }
180 
181 
182 TeamListIterator::~TeamListIterator()
183 {
184 	// remove the entry
185 	InterruptsSpinLocker locker(sTeamHashLock);
186 	sTeamHash.RemoveIteratorEntry(&fEntry);
187 }
188 
189 
190 Team*
191 TeamListIterator::Next()
192 {
193 	// get the next team -- if there is one, get reference for it
194 	InterruptsSpinLocker locker(sTeamHashLock);
195 	Team* team = sTeamHash.NextElement(&fEntry);
196 	if (team != NULL)
197 		team->AcquireReference();
198 
199 	return team;
200 }
201 
202 
203 // #pragma mark - Tracing
204 
205 
206 #if TEAM_TRACING
207 namespace TeamTracing {
208 
209 class TeamForked : public AbstractTraceEntry {
210 public:
211 	TeamForked(thread_id forkedThread)
212 		:
213 		fForkedThread(forkedThread)
214 	{
215 		Initialized();
216 	}
217 
218 	virtual void AddDump(TraceOutput& out)
219 	{
220 		out.Print("team forked, new thread %" B_PRId32, fForkedThread);
221 	}
222 
223 private:
224 	thread_id			fForkedThread;
225 };
226 
227 
228 class ExecTeam : public AbstractTraceEntry {
229 public:
230 	ExecTeam(const char* path, int32 argCount, const char* const* args,
231 			int32 envCount, const char* const* env)
232 		:
233 		fArgCount(argCount),
234 		fArgs(NULL)
235 	{
236 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
237 			false);
238 
239 		// determine the buffer size we need for the args
240 		size_t argBufferSize = 0;
241 		for (int32 i = 0; i < argCount; i++)
242 			argBufferSize += strlen(args[i]) + 1;
243 
244 		// allocate a buffer
245 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
246 		if (fArgs) {
247 			char* buffer = fArgs;
248 			for (int32 i = 0; i < argCount; i++) {
249 				size_t argSize = strlen(args[i]) + 1;
250 				memcpy(buffer, args[i], argSize);
251 				buffer += argSize;
252 			}
253 		}
254 
255 		// ignore env for the time being
256 		(void)envCount;
257 		(void)env;
258 
259 		Initialized();
260 	}
261 
262 	virtual void AddDump(TraceOutput& out)
263 	{
264 		out.Print("team exec, \"%p\", args:", fPath);
265 
266 		if (fArgs != NULL) {
267 			char* args = fArgs;
268 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
269 				out.Print(" \"%s\"", args);
270 				args += strlen(args) + 1;
271 			}
272 		} else
273 			out.Print(" <too long>");
274 	}
275 
276 private:
277 	char*	fPath;
278 	int32	fArgCount;
279 	char*	fArgs;
280 };
281 
282 
283 static const char*
284 job_control_state_name(job_control_state state)
285 {
286 	switch (state) {
287 		case JOB_CONTROL_STATE_NONE:
288 			return "none";
289 		case JOB_CONTROL_STATE_STOPPED:
290 			return "stopped";
291 		case JOB_CONTROL_STATE_CONTINUED:
292 			return "continued";
293 		case JOB_CONTROL_STATE_DEAD:
294 			return "dead";
295 		default:
296 			return "invalid";
297 	}
298 }
299 
300 
301 class SetJobControlState : public AbstractTraceEntry {
302 public:
303 	SetJobControlState(team_id team, job_control_state newState, Signal* signal)
304 		:
305 		fTeam(team),
306 		fNewState(newState),
307 		fSignal(signal != NULL ? signal->Number() : 0)
308 	{
309 		Initialized();
310 	}
311 
312 	virtual void AddDump(TraceOutput& out)
313 	{
314 		out.Print("team set job control state, team %" B_PRId32 ", "
315 			"new state: %s, signal: %d",
316 			fTeam, job_control_state_name(fNewState), fSignal);
317 	}
318 
319 private:
320 	team_id				fTeam;
321 	job_control_state	fNewState;
322 	int					fSignal;
323 };
324 
325 
326 class WaitForChild : public AbstractTraceEntry {
327 public:
328 	WaitForChild(pid_t child, uint32 flags)
329 		:
330 		fChild(child),
331 		fFlags(flags)
332 	{
333 		Initialized();
334 	}
335 
336 	virtual void AddDump(TraceOutput& out)
337 	{
338 		out.Print("team wait for child, child: %" B_PRId32 ", "
339 			"flags: %#" B_PRIx32, fChild, fFlags);
340 	}
341 
342 private:
343 	pid_t	fChild;
344 	uint32	fFlags;
345 };
346 
347 
348 class WaitForChildDone : public AbstractTraceEntry {
349 public:
350 	WaitForChildDone(const job_control_entry& entry)
351 		:
352 		fState(entry.state),
353 		fTeam(entry.thread),
354 		fStatus(entry.status),
355 		fReason(entry.reason),
356 		fSignal(entry.signal)
357 	{
358 		Initialized();
359 	}
360 
361 	WaitForChildDone(status_t error)
362 		:
363 		fTeam(error)
364 	{
365 		Initialized();
366 	}
367 
368 	virtual void AddDump(TraceOutput& out)
369 	{
370 		if (fTeam >= 0) {
371 			out.Print("team wait for child done, team: %" B_PRId32 ", "
372 				"state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
373 				fTeam, job_control_state_name(fState), fStatus, fReason,
374 				fSignal);
375 		} else {
376 			out.Print("team wait for child failed, error: "
377 				"%#" B_PRIx32 ", ", fTeam);
378 		}
379 	}
380 
381 private:
382 	job_control_state	fState;
383 	team_id				fTeam;
384 	status_t			fStatus;
385 	uint16				fReason;
386 	uint16				fSignal;
387 };
388 
389 }	// namespace TeamTracing
390 
391 #	define T(x) new(std::nothrow) TeamTracing::x;
392 #else
393 #	define T(x) ;
394 #endif
395 
396 
397 //	#pragma mark - TeamNotificationService
398 
399 
400 TeamNotificationService::TeamNotificationService()
401 	: DefaultNotificationService("teams")
402 {
403 }
404 
405 
406 void
407 TeamNotificationService::Notify(uint32 eventCode, Team* team)
408 {
409 	char eventBuffer[128];
410 	KMessage event;
411 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
412 	event.AddInt32("event", eventCode);
413 	event.AddInt32("team", team->id);
414 	event.AddPointer("teamStruct", team);
415 
416 	DefaultNotificationService::Notify(event, eventCode);
417 }
418 
419 
420 //	#pragma mark - Team
421 
422 
423 Team::Team(team_id id, bool kernel)
424 {
425 	// allocate an ID
426 	this->id = id;
427 	visible = true;
428 	serial_number = -1;
429 
430 	// init mutex
431 	if (kernel) {
432 		mutex_init(&fLock, "Team:kernel");
433 	} else {
434 		char lockName[16];
435 		snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
436 		mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
437 	}
438 
439 	hash_next = siblings_next = children = parent = NULL;
440 	fName[0] = '\0';
441 	fArgs[0] = '\0';
442 	num_threads = 0;
443 	io_context = NULL;
444 	address_space = NULL;
445 	realtime_sem_context = NULL;
446 	xsi_sem_context = NULL;
447 	thread_list = NULL;
448 	main_thread = NULL;
449 	loading_info = NULL;
450 	state = TEAM_STATE_BIRTH;
451 	flags = 0;
452 	death_entry = NULL;
453 	user_data_area = -1;
454 	user_data = 0;
455 	used_user_data = 0;
456 	user_data_size = 0;
457 	free_user_threads = NULL;
458 
459 	commpage_address = NULL;
460 
461 	supplementary_groups = NULL;
462 	supplementary_group_count = 0;
463 
464 	dead_threads_kernel_time = 0;
465 	dead_threads_user_time = 0;
466 	cpu_clock_offset = 0;
467 
468 	// dead threads
469 	list_init(&dead_threads);
470 	dead_threads_count = 0;
471 
472 	// dead children
473 	dead_children.count = 0;
474 	dead_children.kernel_time = 0;
475 	dead_children.user_time = 0;
476 
477 	// job control entry
478 	job_control_entry = new(nothrow) ::job_control_entry;
479 	if (job_control_entry != NULL) {
480 		job_control_entry->state = JOB_CONTROL_STATE_NONE;
481 		job_control_entry->thread = id;
482 		job_control_entry->team = this;
483 	}
484 
485 	// exit status -- setting initialized to false suffices
486 	exit.initialized = false;
487 
488 	list_init(&sem_list);
489 	list_init_etc(&port_list, port_team_link_offset());
490 	list_init(&image_list);
491 	list_init(&watcher_list);
492 
493 	clear_team_debug_info(&debug_info, true);
494 
495 	// init dead/stopped/continued children condition vars
496 	dead_children.condition_variable.Init(&dead_children, "team children");
497 
498 	B_INITIALIZE_SPINLOCK(&time_lock);
499 	B_INITIALIZE_SPINLOCK(&signal_lock);
500 
501 	fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
502 		kernel ? -1 : MAX_QUEUED_SIGNALS);
503 	memset(fSignalActions, 0, sizeof(fSignalActions));
504 
505 	fUserDefinedTimerCount = 0;
506 
507 	fCoreDumpCondition = NULL;
508 }
509 
510 
511 Team::~Team()
512 {
513 	// get rid of all associated data
514 	PrepareForDeletion();
515 
516 	if (io_context != NULL)
517 		vfs_put_io_context(io_context);
518 	delete_owned_ports(this);
519 	sem_delete_owned_sems(this);
520 
521 	DeleteUserTimers(false);
522 
523 	fPendingSignals.Clear();
524 
525 	if (fQueuedSignalsCounter != NULL)
526 		fQueuedSignalsCounter->ReleaseReference();
527 
528 	while (thread_death_entry* threadDeathEntry
529 			= (thread_death_entry*)list_remove_head_item(&dead_threads)) {
530 		free(threadDeathEntry);
531 	}
532 
533 	while (::job_control_entry* entry = dead_children.entries.RemoveHead())
534 		delete entry;
535 
536 	while (free_user_thread* entry = free_user_threads) {
537 		free_user_threads = entry->next;
538 		free(entry);
539 	}
540 
541 	malloc_referenced_release(supplementary_groups);
542 
543 	delete job_control_entry;
544 		// usually already NULL and transferred to the parent
545 
546 	mutex_destroy(&fLock);
547 }
548 
549 
550 /*static*/ Team*
551 Team::Create(team_id id, const char* name, bool kernel)
552 {
553 	// create the team object
554 	Team* team = new(std::nothrow) Team(id, kernel);
555 	if (team == NULL)
556 		return NULL;
557 	ObjectDeleter<Team> teamDeleter(team);
558 
559 	if (name != NULL)
560 		team->SetName(name);
561 
562 	// check initialization
563 	if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
564 		return NULL;
565 
566 	// finish initialization (arch specifics)
567 	if (arch_team_init_team_struct(team, kernel) != B_OK)
568 		return NULL;
569 
570 	if (!kernel) {
571 		status_t error = user_timer_create_team_timers(team);
572 		if (error != B_OK)
573 			return NULL;
574 	}
575 
576 	// everything went fine
577 	return teamDeleter.Detach();
578 }
579 
580 
581 /*!	\brief Returns the team with the given ID.
582 	Returns a reference to the team.
583 	Team and thread spinlock must not be held.
584 */
585 /*static*/ Team*
586 Team::Get(team_id id)
587 {
588 	if (id == B_CURRENT_TEAM) {
589 		Team* team = thread_get_current_thread()->team;
590 		team->AcquireReference();
591 		return team;
592 	}
593 
594 	InterruptsSpinLocker locker(sTeamHashLock);
595 	Team* team = sTeamHash.Lookup(id);
596 	if (team != NULL)
597 		team->AcquireReference();
598 	return team;
599 }
600 
601 
602 /*!	\brief Returns the team with the given ID in a locked state.
603 	Returns a reference to the team.
604 	Team and thread spinlock must not be held.
605 */
606 /*static*/ Team*
607 Team::GetAndLock(team_id id)
608 {
609 	// get the team
610 	Team* team = Get(id);
611 	if (team == NULL)
612 		return NULL;
613 
614 	// lock it
615 	team->Lock();
616 
617 	// only return the team, when it isn't already dying
618 	if (team->state >= TEAM_STATE_SHUTDOWN) {
619 		team->Unlock();
620 		team->ReleaseReference();
621 		return NULL;
622 	}
623 
624 	return team;
625 }
626 
627 
628 /*!	Locks the team and its parent team (if any).
629 	The caller must hold a reference to the team or otherwise make sure that
630 	it won't be deleted.
631 	If the team doesn't have a parent, only the team itself is locked. If the
632 	team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
633 	only the team itself is locked.
634 
635 	\param dontLockParentIfKernel If \c true, the team's parent team is only
636 		locked, if it is not the kernel team.
637 */
638 void
639 Team::LockTeamAndParent(bool dontLockParentIfKernel)
640 {
641 	// The locking order is parent -> child. Since the parent can change as long
642 	// as we don't lock the team, we need to do a trial and error loop.
643 	Lock();
644 
645 	while (true) {
646 		// If the team doesn't have a parent, we're done. Otherwise try to lock
647 		// the parent.This will succeed in most cases, simplifying things.
648 		Team* parent = this->parent;
649 		if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
650 			|| parent->TryLock()) {
651 			return;
652 		}
653 
654 		// get a temporary reference to the parent, unlock this team, lock the
655 		// parent, and re-lock this team
656 		BReference<Team> parentReference(parent);
657 
658 		Unlock();
659 		parent->Lock();
660 		Lock();
661 
662 		// If the parent hasn't changed in the meantime, we're done.
663 		if (this->parent == parent)
664 			return;
665 
666 		// The parent has changed -- unlock and retry.
667 		parent->Unlock();
668 	}
669 }
670 
671 
672 /*!	Unlocks the team and its parent team (if any).
673 */
674 void
675 Team::UnlockTeamAndParent()
676 {
677 	if (parent != NULL)
678 		parent->Unlock();
679 
680 	Unlock();
681 }
682 
683 
684 /*!	Locks the team, its parent team (if any), and the team's process group.
685 	The caller must hold a reference to the team or otherwise make sure that
686 	it won't be deleted.
687 	If the team doesn't have a parent, only the team itself is locked.
688 */
689 void
690 Team::LockTeamParentAndProcessGroup()
691 {
692 	LockTeamAndProcessGroup();
693 
694 	// We hold the group's and the team's lock, but not the parent team's lock.
695 	// If we have a parent, try to lock it.
696 	if (this->parent == NULL || this->parent->TryLock())
697 		return;
698 
699 	// No success -- unlock the team and let LockTeamAndParent() do the rest of
700 	// the job.
701 	Unlock();
702 	LockTeamAndParent(false);
703 }
704 
705 
706 /*!	Unlocks the team, its parent team (if any), and the team's process group.
707 */
708 void
709 Team::UnlockTeamParentAndProcessGroup()
710 {
711 	group->Unlock();
712 
713 	if (parent != NULL)
714 		parent->Unlock();
715 
716 	Unlock();
717 }
718 
719 
720 void
721 Team::LockTeamAndProcessGroup()
722 {
723 	// The locking order is process group -> child. Since the process group can
724 	// change as long as we don't lock the team, we need to do a trial and error
725 	// loop.
726 	Lock();
727 
728 	while (true) {
729 		// Try to lock the group. This will succeed in most cases, simplifying
730 		// things.
731 		ProcessGroup* group = this->group;
732 		if (group->TryLock())
733 			return;
734 
735 		// get a temporary reference to the group, unlock this team, lock the
736 		// group, and re-lock this team
737 		BReference<ProcessGroup> groupReference(group);
738 
739 		Unlock();
740 		group->Lock();
741 		Lock();
742 
743 		// If the group hasn't changed in the meantime, we're done.
744 		if (this->group == group)
745 			return;
746 
747 		// The group has changed -- unlock and retry.
748 		group->Unlock();
749 	}
750 }
751 
752 
753 void
754 Team::UnlockTeamAndProcessGroup()
755 {
756 	group->Unlock();
757 	Unlock();
758 }
759 
760 
761 void
762 Team::SetName(const char* name)
763 {
764 	if (const char* lastSlash = strrchr(name, '/'))
765 		name = lastSlash + 1;
766 
767 	strlcpy(fName, name, B_OS_NAME_LENGTH);
768 }
769 
770 
771 void
772 Team::SetArgs(const char* args)
773 {
774 	strlcpy(fArgs, args, sizeof(fArgs));
775 }
776 
777 
778 void
779 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
780 {
781 	fArgs[0] = '\0';
782 	strlcpy(fArgs, path, sizeof(fArgs));
783 	for (int i = 0; i < otherArgCount; i++) {
784 		strlcat(fArgs, " ", sizeof(fArgs));
785 		strlcat(fArgs, otherArgs[i], sizeof(fArgs));
786 	}
787 }
788 
789 
790 void
791 Team::ResetSignalsOnExec()
792 {
793 	// We are supposed to keep pending signals. Signal actions shall be reset
794 	// partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
795 	// (for SIGCHLD it's implementation-defined). Others shall be reset to
796 	// SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
797 	// flags, but since there aren't any handlers, they make little sense, so
798 	// we clear them.
799 
800 	for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
801 		struct sigaction& action = SignalActionFor(i);
802 		if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
803 			action.sa_handler = SIG_DFL;
804 
805 		action.sa_mask = 0;
806 		action.sa_flags = 0;
807 		action.sa_userdata = NULL;
808 	}
809 }
810 
811 
812 void
813 Team::InheritSignalActions(Team* parent)
814 {
815 	memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
816 }
817 
818 
819 /*!	Adds the given user timer to the team and, if user-defined, assigns it an
820 	ID.
821 
822 	The caller must hold the team's lock.
823 
824 	\param timer The timer to be added. If it doesn't have an ID yet, it is
825 		considered user-defined and will be assigned an ID.
826 	\return \c B_OK, if the timer was added successfully, another error code
827 		otherwise.
828 */
829 status_t
830 Team::AddUserTimer(UserTimer* timer)
831 {
832 	// don't allow addition of timers when already shutting the team down
833 	if (state >= TEAM_STATE_SHUTDOWN)
834 		return B_BAD_TEAM_ID;
835 
836 	// If the timer is user-defined, check timer limit and increment
837 	// user-defined count.
838 	if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
839 		return EAGAIN;
840 
841 	fUserTimers.AddTimer(timer);
842 
843 	return B_OK;
844 }
845 
846 
847 /*!	Removes the given user timer from the team.
848 
849 	The caller must hold the team's lock.
850 
851 	\param timer The timer to be removed.
852 
853 */
854 void
855 Team::RemoveUserTimer(UserTimer* timer)
856 {
857 	fUserTimers.RemoveTimer(timer);
858 
859 	if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
860 		UserDefinedTimersRemoved(1);
861 }
862 
863 
864 /*!	Deletes all (or all user-defined) user timers of the team.
865 
866 	Timer's belonging to the team's threads are not affected.
867 	The caller must hold the team's lock.
868 
869 	\param userDefinedOnly If \c true, only the user-defined timers are deleted,
870 		otherwise all timers are deleted.
871 */
872 void
873 Team::DeleteUserTimers(bool userDefinedOnly)
874 {
875 	int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
876 	UserDefinedTimersRemoved(count);
877 }
878 
879 
880 /*!	If not at the limit yet, increments the team's user-defined timer count.
881 	\return \c true, if the limit wasn't reached yet, \c false otherwise.
882 */
883 bool
884 Team::CheckAddUserDefinedTimer()
885 {
886 	int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
887 	if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
888 		atomic_add(&fUserDefinedTimerCount, -1);
889 		return false;
890 	}
891 
892 	return true;
893 }
894 
895 
896 /*!	Subtracts the given count for the team's user-defined timer count.
897 	\param count The count to subtract.
898 */
899 void
900 Team::UserDefinedTimersRemoved(int32 count)
901 {
902 	atomic_add(&fUserDefinedTimerCount, -count);
903 }
904 
905 
906 void
907 Team::DeactivateCPUTimeUserTimers()
908 {
909 	while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
910 		timer->Deactivate();
911 
912 	while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
913 		timer->Deactivate();
914 }
915 
916 
917 /*!	Returns the team's current total CPU time (kernel + user + offset).
918 
919 	The caller must hold \c time_lock.
920 
921 	\param ignoreCurrentRun If \c true and the current thread is one team's
922 		threads, don't add the time since the last time \c last_time was
923 		updated. Should be used in "thread unscheduled" scheduler callbacks,
924 		since although the thread is still running at that time, its time has
925 		already been stopped.
926 	\return The team's current total CPU time.
927 */
928 bigtime_t
929 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
930 {
931 	bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
932 		+ dead_threads_user_time;
933 
934 	Thread* currentThread = thread_get_current_thread();
935 	bigtime_t now = system_time();
936 
937 	for (Thread* thread = thread_list; thread != NULL;
938 			thread = thread->team_next) {
939 		bool alreadyLocked = thread == lockedThread;
940 		SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
941 		time += thread->kernel_time + thread->user_time;
942 
943 		if (thread->last_time != 0) {
944 			if (!ignoreCurrentRun || thread != currentThread)
945 				time += now - thread->last_time;
946 		}
947 
948 		if (alreadyLocked)
949 			threadTimeLocker.Detach();
950 	}
951 
952 	return time;
953 }
954 
955 
956 /*!	Returns the team's current user CPU time.
957 
958 	The caller must hold \c time_lock.
959 
960 	\return The team's current user CPU time.
961 */
962 bigtime_t
963 Team::UserCPUTime() const
964 {
965 	bigtime_t time = dead_threads_user_time;
966 
967 	bigtime_t now = system_time();
968 
969 	for (Thread* thread = thread_list; thread != NULL;
970 			thread = thread->team_next) {
971 		SpinLocker threadTimeLocker(thread->time_lock);
972 		time += thread->user_time;
973 
974 		if (thread->last_time != 0 && !thread->in_kernel)
975 			time += now - thread->last_time;
976 	}
977 
978 	return time;
979 }
980 
981 
982 //	#pragma mark - ProcessGroup
983 
984 
985 ProcessGroup::ProcessGroup(pid_t id)
986 	:
987 	id(id),
988 	teams(NULL),
989 	fSession(NULL),
990 	fInOrphanedCheckList(false)
991 {
992 	char lockName[32];
993 	snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
994 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
995 }
996 
997 
998 ProcessGroup::~ProcessGroup()
999 {
1000 	TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1001 
1002 	// If the group is in the orphaned check list, remove it.
1003 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1004 
1005 	if (fInOrphanedCheckList)
1006 		sOrphanedCheckProcessGroups.Remove(this);
1007 
1008 	orphanedCheckLocker.Unlock();
1009 
1010 	// remove group from the hash table and from the session
1011 	if (fSession != NULL) {
1012 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1013 		sGroupHash.RemoveUnchecked(this);
1014 		groupHashLocker.Unlock();
1015 
1016 		fSession->ReleaseReference();
1017 	}
1018 
1019 	mutex_destroy(&fLock);
1020 }
1021 
1022 
1023 /*static*/ ProcessGroup*
1024 ProcessGroup::Get(pid_t id)
1025 {
1026 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1027 	ProcessGroup* group = sGroupHash.Lookup(id);
1028 	if (group != NULL)
1029 		group->AcquireReference();
1030 	return group;
1031 }
1032 
1033 
1034 /*!	Adds the group the given session and makes it publicly accessible.
1035 	The caller must not hold the process group hash lock.
1036 */
1037 void
1038 ProcessGroup::Publish(ProcessSession* session)
1039 {
1040 	InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1041 	PublishLocked(session);
1042 }
1043 
1044 
1045 /*!	Adds the group to the given session and makes it publicly accessible.
1046 	The caller must hold the process group hash lock.
1047 */
1048 void
1049 ProcessGroup::PublishLocked(ProcessSession* session)
1050 {
1051 	ASSERT(sGroupHash.Lookup(this->id) == NULL);
1052 
1053 	fSession = session;
1054 	fSession->AcquireReference();
1055 
1056 	sGroupHash.InsertUnchecked(this);
1057 }
1058 
1059 
1060 /*!	Checks whether the process group is orphaned.
1061 	The caller must hold the group's lock.
1062 	\return \c true, if the group is orphaned, \c false otherwise.
1063 */
1064 bool
1065 ProcessGroup::IsOrphaned() const
1066 {
1067 	// Orphaned Process Group: "A process group in which the parent of every
1068 	// member is either itself a member of the group or is not a member of the
1069 	// group's session." (Open Group Base Specs Issue 7)
1070 	bool orphaned = true;
1071 
1072 	Team* team = teams;
1073 	while (orphaned && team != NULL) {
1074 		team->LockTeamAndParent(false);
1075 
1076 		Team* parent = team->parent;
1077 		if (parent != NULL && parent->group_id != id
1078 			&& parent->session_id == fSession->id) {
1079 			orphaned = false;
1080 		}
1081 
1082 		team->UnlockTeamAndParent();
1083 
1084 		team = team->group_next;
1085 	}
1086 
1087 	return orphaned;
1088 }
1089 
1090 
1091 void
1092 ProcessGroup::ScheduleOrphanedCheck()
1093 {
1094 	MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1095 
1096 	if (!fInOrphanedCheckList) {
1097 		sOrphanedCheckProcessGroups.Add(this);
1098 		fInOrphanedCheckList = true;
1099 	}
1100 }
1101 
1102 
1103 void
1104 ProcessGroup::UnsetOrphanedCheck()
1105 {
1106 	fInOrphanedCheckList = false;
1107 }
1108 
1109 
1110 //	#pragma mark - ProcessSession
1111 
1112 
1113 ProcessSession::ProcessSession(pid_t id)
1114 	:
1115 	id(id),
1116 	controlling_tty(-1),
1117 	foreground_group(-1)
1118 {
1119 	char lockName[32];
1120 	snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1121 	mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1122 }
1123 
1124 
1125 ProcessSession::~ProcessSession()
1126 {
1127 	mutex_destroy(&fLock);
1128 }
1129 
1130 
1131 //	#pragma mark - KDL functions
1132 
1133 
1134 static void
1135 _dump_team_info(Team* team)
1136 {
1137 	kprintf("TEAM: %p\n", team);
1138 	kprintf("id:               %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1139 		team->id);
1140 	kprintf("serial_number:    %" B_PRId64 "\n", team->serial_number);
1141 	kprintf("name:             '%s'\n", team->Name());
1142 	kprintf("args:             '%s'\n", team->Args());
1143 	kprintf("hash_next:        %p\n", team->hash_next);
1144 	kprintf("parent:           %p", team->parent);
1145 	if (team->parent != NULL) {
1146 		kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1147 	} else
1148 		kprintf("\n");
1149 
1150 	kprintf("children:         %p\n", team->children);
1151 	kprintf("num_threads:      %d\n", team->num_threads);
1152 	kprintf("state:            %d\n", team->state);
1153 	kprintf("flags:            0x%" B_PRIx32 "\n", team->flags);
1154 	kprintf("io_context:       %p\n", team->io_context);
1155 	if (team->address_space)
1156 		kprintf("address_space:    %p\n", team->address_space);
1157 	kprintf("user data:        %p (area %" B_PRId32 ")\n",
1158 		(void*)team->user_data, team->user_data_area);
1159 	kprintf("free user thread: %p\n", team->free_user_threads);
1160 	kprintf("main_thread:      %p\n", team->main_thread);
1161 	kprintf("thread_list:      %p\n", team->thread_list);
1162 	kprintf("group_id:         %" B_PRId32 "\n", team->group_id);
1163 	kprintf("session_id:       %" B_PRId32 "\n", team->session_id);
1164 }
1165 
1166 
1167 static int
1168 dump_team_info(int argc, char** argv)
1169 {
1170 	ulong arg;
1171 	bool found = false;
1172 
1173 	if (argc < 2) {
1174 		Thread* thread = thread_get_current_thread();
1175 		if (thread != NULL && thread->team != NULL)
1176 			_dump_team_info(thread->team);
1177 		else
1178 			kprintf("No current team!\n");
1179 		return 0;
1180 	}
1181 
1182 	arg = strtoul(argv[1], NULL, 0);
1183 	if (IS_KERNEL_ADDRESS(arg)) {
1184 		// semi-hack
1185 		_dump_team_info((Team*)arg);
1186 		return 0;
1187 	}
1188 
1189 	// walk through the thread list, trying to match name or id
1190 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1191 		Team* team = it.Next();) {
1192 		if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1193 			|| team->id == (team_id)arg) {
1194 			_dump_team_info(team);
1195 			found = true;
1196 			break;
1197 		}
1198 	}
1199 
1200 	if (!found)
1201 		kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1202 	return 0;
1203 }
1204 
1205 
1206 static int
1207 dump_teams(int argc, char** argv)
1208 {
1209 	kprintf("%-*s       id  %-*s    name\n", B_PRINTF_POINTER_WIDTH, "team",
1210 		B_PRINTF_POINTER_WIDTH, "parent");
1211 
1212 	for (TeamTable::Iterator it = sTeamHash.GetIterator();
1213 		Team* team = it.Next();) {
1214 		kprintf("%p%7" B_PRId32 "  %p  %s\n", team, team->id, team->parent, team->Name());
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 
1221 //	#pragma mark - Private functions
1222 
1223 
1224 /*!	Inserts team \a team into the child list of team \a parent.
1225 
1226 	The caller must hold the lock of both \a parent and \a team.
1227 
1228 	\param parent The parent team.
1229 	\param team The team to be inserted into \a parent's child list.
1230 */
1231 static void
1232 insert_team_into_parent(Team* parent, Team* team)
1233 {
1234 	ASSERT(parent != NULL);
1235 
1236 	team->siblings_next = parent->children;
1237 	parent->children = team;
1238 	team->parent = parent;
1239 }
1240 
1241 
1242 /*!	Removes team \a team from the child list of team \a parent.
1243 
1244 	The caller must hold the lock of both \a parent and \a team.
1245 
1246 	\param parent The parent team.
1247 	\param team The team to be removed from \a parent's child list.
1248 */
1249 static void
1250 remove_team_from_parent(Team* parent, Team* team)
1251 {
1252 	Team* child;
1253 	Team* last = NULL;
1254 
1255 	for (child = parent->children; child != NULL;
1256 			child = child->siblings_next) {
1257 		if (child == team) {
1258 			if (last == NULL)
1259 				parent->children = child->siblings_next;
1260 			else
1261 				last->siblings_next = child->siblings_next;
1262 
1263 			team->parent = NULL;
1264 			break;
1265 		}
1266 		last = child;
1267 	}
1268 }
1269 
1270 
1271 /*!	Returns whether the given team is a session leader.
1272 	The caller must hold the team's lock or its process group's lock.
1273 */
1274 static bool
1275 is_session_leader(Team* team)
1276 {
1277 	return team->session_id == team->id;
1278 }
1279 
1280 
1281 /*!	Returns whether the given team is a process group leader.
1282 	The caller must hold the team's lock or its process group's lock.
1283 */
1284 static bool
1285 is_process_group_leader(Team* team)
1286 {
1287 	return team->group_id == team->id;
1288 }
1289 
1290 
1291 /*!	Inserts the given team into the given process group.
1292 	The caller must hold the process group's lock, the team's lock, and the
1293 	team's parent's lock.
1294 */
1295 static void
1296 insert_team_into_group(ProcessGroup* group, Team* team)
1297 {
1298 	team->group = group;
1299 	team->group_id = group->id;
1300 	team->session_id = group->Session()->id;
1301 
1302 	team->group_next = group->teams;
1303 	group->teams = team;
1304 	group->AcquireReference();
1305 }
1306 
1307 
1308 /*!	Removes the given team from its process group.
1309 
1310 	The caller must hold the process group's lock, the team's lock, and the
1311 	team's parent's lock. Interrupts must be enabled.
1312 
1313 	\param team The team that'll be removed from its process group.
1314 */
1315 static void
1316 remove_team_from_group(Team* team)
1317 {
1318 	ProcessGroup* group = team->group;
1319 	Team* current;
1320 	Team* last = NULL;
1321 
1322 	// the team must be in a process group to let this function have any effect
1323 	if  (group == NULL)
1324 		return;
1325 
1326 	for (current = group->teams; current != NULL;
1327 			current = current->group_next) {
1328 		if (current == team) {
1329 			if (last == NULL)
1330 				group->teams = current->group_next;
1331 			else
1332 				last->group_next = current->group_next;
1333 
1334 			team->group = NULL;
1335 			break;
1336 		}
1337 		last = current;
1338 	}
1339 
1340 	team->group = NULL;
1341 	team->group_next = NULL;
1342 
1343 	group->ReleaseReference();
1344 }
1345 
1346 
1347 static status_t
1348 create_team_user_data(Team* team, void* exactAddress = NULL)
1349 {
1350 	void* address;
1351 	uint32 addressSpec;
1352 
1353 	if (exactAddress != NULL) {
1354 		address = exactAddress;
1355 		addressSpec = B_EXACT_ADDRESS;
1356 	} else {
1357 		address = (void*)KERNEL_USER_DATA_BASE;
1358 		addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1359 	}
1360 
1361 	status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1362 		kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1363 
1364 	virtual_address_restrictions virtualRestrictions = {};
1365 	if (result == B_OK || exactAddress != NULL) {
1366 		if (exactAddress != NULL)
1367 			virtualRestrictions.address = exactAddress;
1368 		else
1369 			virtualRestrictions.address = address;
1370 		virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1371 	} else {
1372 		virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1373 		virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1374 	}
1375 
1376 	physical_address_restrictions physicalRestrictions = {};
1377 	team->user_data_area = create_area_etc(team->id, "user area",
1378 		kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
1379 		&virtualRestrictions, &physicalRestrictions, &address);
1380 	if (team->user_data_area < 0)
1381 		return team->user_data_area;
1382 
1383 	team->user_data = (addr_t)address;
1384 	team->used_user_data = 0;
1385 	team->user_data_size = kTeamUserDataInitialSize;
1386 	team->free_user_threads = NULL;
1387 
1388 	return B_OK;
1389 }
1390 
1391 
1392 static void
1393 delete_team_user_data(Team* team)
1394 {
1395 	if (team->user_data_area >= 0) {
1396 		vm_delete_area(team->id, team->user_data_area, true);
1397 		vm_unreserve_address_range(team->id, (void*)team->user_data,
1398 			kTeamUserDataReservedSize);
1399 
1400 		team->user_data = 0;
1401 		team->used_user_data = 0;
1402 		team->user_data_size = 0;
1403 		team->user_data_area = -1;
1404 		while (free_user_thread* entry = team->free_user_threads) {
1405 			team->free_user_threads = entry->next;
1406 			free(entry);
1407 		}
1408 	}
1409 }
1410 
1411 
1412 static status_t
1413 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1414 	int32 argCount, int32 envCount, char**& _flatArgs)
1415 {
1416 	if (argCount < 0 || envCount < 0)
1417 		return B_BAD_VALUE;
1418 
1419 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1420 		return B_TOO_MANY_ARGS;
1421 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1422 		return B_BAD_VALUE;
1423 
1424 	if (!IS_USER_ADDRESS(userFlatArgs))
1425 		return B_BAD_ADDRESS;
1426 
1427 	// allocate kernel memory
1428 	char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1429 	if (flatArgs == NULL)
1430 		return B_NO_MEMORY;
1431 
1432 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1433 		free(flatArgs);
1434 		return B_BAD_ADDRESS;
1435 	}
1436 
1437 	// check and relocate the array
1438 	status_t error = B_OK;
1439 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1440 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
1441 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
1442 		if (i == argCount || i == argCount + envCount + 1) {
1443 			// check array null termination
1444 			if (flatArgs[i] != NULL) {
1445 				error = B_BAD_VALUE;
1446 				break;
1447 			}
1448 		} else {
1449 			// check string
1450 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1451 			size_t maxLen = stringEnd - arg;
1452 			if (arg < stringBase || arg >= stringEnd
1453 					|| strnlen(arg, maxLen) == maxLen) {
1454 				error = B_BAD_VALUE;
1455 				break;
1456 			}
1457 
1458 			flatArgs[i] = arg;
1459 		}
1460 	}
1461 
1462 	if (error == B_OK)
1463 		_flatArgs = flatArgs;
1464 	else
1465 		free(flatArgs);
1466 
1467 	return error;
1468 }
1469 
1470 
1471 static void
1472 free_team_arg(struct team_arg* teamArg)
1473 {
1474 	if (teamArg != NULL) {
1475 		free(teamArg->flat_args);
1476 		free(teamArg->path);
1477 		free(teamArg);
1478 	}
1479 }
1480 
1481 
1482 static status_t
1483 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1484 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1485 	port_id port, uint32 token)
1486 {
1487 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1488 	if (teamArg == NULL)
1489 		return B_NO_MEMORY;
1490 
1491 	teamArg->path = strdup(path);
1492 	if (teamArg->path == NULL) {
1493 		free(teamArg);
1494 		return B_NO_MEMORY;
1495 	}
1496 
1497 	// copy the args over
1498 	teamArg->flat_args = flatArgs;
1499 	teamArg->flat_args_size = flatArgsSize;
1500 	teamArg->arg_count = argCount;
1501 	teamArg->env_count = envCount;
1502 	teamArg->flags = 0;
1503 	teamArg->umask = umask;
1504 	teamArg->error_port = port;
1505 	teamArg->error_token = token;
1506 
1507 	// determine the flags from the environment
1508 	const char* const* env = flatArgs + argCount + 1;
1509 	for (int32 i = 0; i < envCount; i++) {
1510 		if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1511 			teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1512 			break;
1513 		}
1514 	}
1515 
1516 	*_teamArg = teamArg;
1517 	return B_OK;
1518 }
1519 
1520 
1521 static status_t
1522 team_create_thread_start_internal(void* args)
1523 {
1524 	status_t err;
1525 	Thread* thread;
1526 	Team* team;
1527 	struct team_arg* teamArgs = (struct team_arg*)args;
1528 	const char* path;
1529 	addr_t entry;
1530 	char** userArgs;
1531 	char** userEnv;
1532 	struct user_space_program_args* programArgs;
1533 	uint32 argCount, envCount;
1534 
1535 	thread = thread_get_current_thread();
1536 	team = thread->team;
1537 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1538 
1539 	TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1540 		thread->id));
1541 
1542 	// Main stack area layout is currently as follows (starting from 0):
1543 	//
1544 	// size								| usage
1545 	// ---------------------------------+--------------------------------
1546 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1547 	// TLS_SIZE							| TLS data
1548 	// sizeof(user_space_program_args)	| argument structure for the runtime
1549 	//									| loader
1550 	// flat arguments size				| flat process arguments and environment
1551 
1552 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1553 	// the heap
1554 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1555 
1556 	argCount = teamArgs->arg_count;
1557 	envCount = teamArgs->env_count;
1558 
1559 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1560 		+ thread->user_stack_size + TLS_SIZE);
1561 
1562 	userArgs = (char**)(programArgs + 1);
1563 	userEnv = userArgs + argCount + 1;
1564 	path = teamArgs->path;
1565 
1566 	if (user_strlcpy(programArgs->program_path, path,
1567 				sizeof(programArgs->program_path)) < B_OK
1568 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1569 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1570 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1571 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1572 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1573 				sizeof(port_id)) < B_OK
1574 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1575 				sizeof(uint32)) < B_OK
1576 		|| user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1577 		|| user_memcpy(userArgs, teamArgs->flat_args,
1578 				teamArgs->flat_args_size) < B_OK) {
1579 		// the team deletion process will clean this mess
1580 		free_team_arg(teamArgs);
1581 		return B_BAD_ADDRESS;
1582 	}
1583 
1584 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1585 
1586 	// set team args and update state
1587 	team->Lock();
1588 	team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1589 	team->state = TEAM_STATE_NORMAL;
1590 	team->Unlock();
1591 
1592 	free_team_arg(teamArgs);
1593 		// the arguments are already on the user stack, we no longer need
1594 		// them in this form
1595 
1596 	// Clone commpage area
1597 	area_id commPageArea = clone_commpage_area(team->id,
1598 		&team->commpage_address);
1599 	if (commPageArea  < B_OK) {
1600 		TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1601 			strerror(commPageArea)));
1602 		return commPageArea;
1603 	}
1604 
1605 	// Register commpage image
1606 	image_id commPageImage = get_commpage_image();
1607 	extended_image_info imageInfo;
1608 	err = get_image_info(commPageImage, &imageInfo.basic_info);
1609 	if (err != B_OK) {
1610 		TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1611 			strerror(err)));
1612 		return err;
1613 	}
1614 	imageInfo.basic_info.text = team->commpage_address;
1615 	imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1616 	imageInfo.symbol_table = NULL;
1617 	imageInfo.symbol_hash = NULL;
1618 	imageInfo.string_table = NULL;
1619 	image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1620 	if (image < 0) {
1621 		TRACE(("team_create_thread_start: register_image() failed: %s\n",
1622 			strerror(image)));
1623 		return image;
1624 	}
1625 
1626 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1627 	// automatic variables with function scope will never be destroyed.
1628 	{
1629 		// find runtime_loader path
1630 		KPath runtimeLoaderPath;
1631 		err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1632 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1633 		if (err < B_OK) {
1634 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1635 				strerror(err)));
1636 			return err;
1637 		}
1638 		runtimeLoaderPath.UnlockBuffer();
1639 		err = runtimeLoaderPath.Append("runtime_loader");
1640 
1641 		if (err == B_OK) {
1642 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1643 				&entry);
1644 		}
1645 	}
1646 
1647 	if (err < B_OK) {
1648 		// Luckily, we don't have to clean up the mess we created - that's
1649 		// done for us by the normal team deletion process
1650 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1651 			"%s\n", strerror(err)));
1652 		return err;
1653 	}
1654 
1655 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1656 
1657 	// enter userspace -- returns only in case of error
1658 	return thread_enter_userspace_new_team(thread, (addr_t)entry,
1659 		programArgs, team->commpage_address);
1660 }
1661 
1662 
1663 static status_t
1664 team_create_thread_start(void* args)
1665 {
1666 	team_create_thread_start_internal(args);
1667 	team_init_exit_info_on_error(thread_get_current_thread()->team);
1668 	thread_exit();
1669 		// does not return
1670 	return B_OK;
1671 }
1672 
1673 
1674 static thread_id
1675 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1676 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1677 	port_id errorPort, uint32 errorToken)
1678 {
1679 	char** flatArgs = _flatArgs;
1680 	thread_id thread;
1681 	status_t status;
1682 	struct team_arg* teamArgs;
1683 	struct team_loading_info loadingInfo;
1684 	io_context* parentIOContext = NULL;
1685 	team_id teamID;
1686 	bool teamLimitReached = false;
1687 
1688 	if (flatArgs == NULL || argCount == 0)
1689 		return B_BAD_VALUE;
1690 
1691 	const char* path = flatArgs[0];
1692 
1693 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1694 		"\n", path, flatArgs, argCount));
1695 
1696 	// cut the path from the main thread name
1697 	const char* threadName = strrchr(path, '/');
1698 	if (threadName != NULL)
1699 		threadName++;
1700 	else
1701 		threadName = path;
1702 
1703 	// create the main thread object
1704 	Thread* mainThread;
1705 	status = Thread::Create(threadName, mainThread);
1706 	if (status != B_OK)
1707 		return status;
1708 	BReference<Thread> mainThreadReference(mainThread, true);
1709 
1710 	// create team object
1711 	Team* team = Team::Create(mainThread->id, path, false);
1712 	if (team == NULL)
1713 		return B_NO_MEMORY;
1714 	BReference<Team> teamReference(team, true);
1715 
1716 	if (flags & B_WAIT_TILL_LOADED) {
1717 		loadingInfo.thread = thread_get_current_thread();
1718 		loadingInfo.result = B_ERROR;
1719 		loadingInfo.done = false;
1720 		team->loading_info = &loadingInfo;
1721 	}
1722 
1723 	// get the parent team
1724 	Team* parent = Team::Get(parentID);
1725 	if (parent == NULL)
1726 		return B_BAD_TEAM_ID;
1727 	BReference<Team> parentReference(parent, true);
1728 
1729 	parent->LockTeamAndProcessGroup();
1730 	team->Lock();
1731 
1732 	// inherit the parent's user/group
1733 	inherit_parent_user_and_group(team, parent);
1734 
1735 	// get a reference to the parent's I/O context -- we need it to create ours
1736 	parentIOContext = parent->io_context;
1737 	vfs_get_io_context(parentIOContext);
1738 
1739 	team->Unlock();
1740 	parent->UnlockTeamAndProcessGroup();
1741 
1742 	// check the executable's set-user/group-id permission
1743 	update_set_id_user_and_group(team, path);
1744 
1745 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1746 		envCount, (mode_t)-1, errorPort, errorToken);
1747 	if (status != B_OK)
1748 		goto err1;
1749 
1750 	_flatArgs = NULL;
1751 		// args are owned by the team_arg structure now
1752 
1753 	// create a new io_context for this team
1754 	team->io_context = vfs_new_io_context(parentIOContext, true);
1755 	if (!team->io_context) {
1756 		status = B_NO_MEMORY;
1757 		goto err2;
1758 	}
1759 
1760 	// We don't need the parent's I/O context any longer.
1761 	vfs_put_io_context(parentIOContext);
1762 	parentIOContext = NULL;
1763 
1764 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1765 	vfs_exec_io_context(team->io_context);
1766 
1767 	// create an address space for this team
1768 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1769 		&team->address_space);
1770 	if (status != B_OK)
1771 		goto err2;
1772 
1773 	team->address_space->SetRandomizingEnabled(
1774 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1775 
1776 	// create the user data area
1777 	status = create_team_user_data(team);
1778 	if (status != B_OK)
1779 		goto err4;
1780 
1781 	// insert the team into its parent and the teams hash
1782 	parent->LockTeamAndProcessGroup();
1783 	team->Lock();
1784 
1785 	{
1786 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
1787 
1788 		sTeamHash.Insert(team);
1789 		teamLimitReached = sUsedTeams >= sMaxTeams;
1790 		if (!teamLimitReached)
1791 			sUsedTeams++;
1792 	}
1793 
1794 	insert_team_into_parent(parent, team);
1795 	insert_team_into_group(parent->group, team);
1796 
1797 	team->Unlock();
1798 	parent->UnlockTeamAndProcessGroup();
1799 
1800 	// notify team listeners
1801 	sNotificationService.Notify(TEAM_ADDED, team);
1802 
1803 	if (teamLimitReached) {
1804 		status = B_NO_MORE_TEAMS;
1805 		goto err6;
1806 	}
1807 
1808 	// In case we start the main thread, we shouldn't access the team object
1809 	// afterwards, so cache the team's ID.
1810 	teamID = team->id;
1811 
1812 	// Create a kernel thread, but under the context of the new team
1813 	// The new thread will take over ownership of teamArgs.
1814 	{
1815 		ThreadCreationAttributes threadAttributes(team_create_thread_start,
1816 			threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1817 		threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1818 			+ teamArgs->flat_args_size;
1819 		thread = thread_create_thread(threadAttributes, false);
1820 		if (thread < 0) {
1821 			status = thread;
1822 			goto err6;
1823 		}
1824 	}
1825 
1826 	// The team has been created successfully, so we keep the reference. Or
1827 	// more precisely: It's owned by the team's main thread, now.
1828 	teamReference.Detach();
1829 
1830 	// wait for the loader of the new team to finish its work
1831 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1832 		if (mainThread != NULL) {
1833 			// resume the team's main thread
1834 			thread_continue(mainThread);
1835 		}
1836 
1837 		// Now suspend ourselves until loading is finished. We will be woken
1838 		// either by the thread, when it finished or aborted loading, or when
1839 		// the team is going to die (e.g. is killed). In either case the one
1840 		// setting `loadingInfo.done' is responsible for removing the info from
1841 		// the team structure.
1842 		while (!loadingInfo.done)
1843 			thread_suspend();
1844 
1845 		if (loadingInfo.result < B_OK)
1846 			return loadingInfo.result;
1847 	}
1848 
1849 	// notify the debugger
1850 	user_debug_team_created(teamID);
1851 
1852 	return thread;
1853 
1854 err6:
1855 	// Remove the team structure from the process group, the parent team, and
1856 	// the team hash table and delete the team structure.
1857 	parent->LockTeamAndProcessGroup();
1858 	team->Lock();
1859 
1860 	remove_team_from_group(team);
1861 	remove_team_from_parent(team->parent, team);
1862 
1863 	team->Unlock();
1864 	parent->UnlockTeamAndProcessGroup();
1865 
1866 	{
1867 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
1868 		sTeamHash.Remove(team);
1869 		if (!teamLimitReached)
1870 			sUsedTeams--;
1871 	}
1872 
1873 	sNotificationService.Notify(TEAM_REMOVED, team);
1874 
1875 	delete_team_user_data(team);
1876 err4:
1877 	team->address_space->Put();
1878 err2:
1879 	free_team_arg(teamArgs);
1880 err1:
1881 	if (parentIOContext != NULL)
1882 		vfs_put_io_context(parentIOContext);
1883 
1884 	return status;
1885 }
1886 
1887 
1888 /*!	Almost shuts down the current team and loads a new image into it.
1889 	If successful, this function does not return and will takeover ownership of
1890 	the arguments provided.
1891 	This function may only be called in a userland team (caused by one of the
1892 	exec*() syscalls).
1893 */
1894 static status_t
1895 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1896 	int32 argCount, int32 envCount, mode_t umask)
1897 {
1898 	// NOTE: Since this function normally doesn't return, don't use automatic
1899 	// variables that need destruction in the function scope.
1900 	char** flatArgs = _flatArgs;
1901 	Team* team = thread_get_current_thread()->team;
1902 	struct team_arg* teamArgs;
1903 	const char* threadName;
1904 	thread_id nubThreadID = -1;
1905 
1906 	TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1907 		B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1908 		team->id));
1909 
1910 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1911 
1912 	// switching the kernel at run time is probably not a good idea :)
1913 	if (team == team_get_kernel_team())
1914 		return B_NOT_ALLOWED;
1915 
1916 	// we currently need to be single threaded here
1917 	// TODO: maybe we should just kill all other threads and
1918 	//	make the current thread the team's main thread?
1919 	Thread* currentThread = thread_get_current_thread();
1920 	if (currentThread != team->main_thread)
1921 		return B_NOT_ALLOWED;
1922 
1923 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1924 	// We iterate through the thread list to make sure that there's no other
1925 	// thread.
1926 	TeamLocker teamLocker(team);
1927 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1928 
1929 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1930 		nubThreadID = team->debug_info.nub_thread;
1931 
1932 	debugInfoLocker.Unlock();
1933 
1934 	for (Thread* thread = team->thread_list; thread != NULL;
1935 			thread = thread->team_next) {
1936 		if (thread != team->main_thread && thread->id != nubThreadID)
1937 			return B_NOT_ALLOWED;
1938 	}
1939 
1940 	team->DeleteUserTimers(true);
1941 	team->ResetSignalsOnExec();
1942 
1943 	teamLocker.Unlock();
1944 
1945 	status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
1946 		argCount, envCount, umask, -1, 0);
1947 	if (status != B_OK)
1948 		return status;
1949 
1950 	_flatArgs = NULL;
1951 		// args are owned by the team_arg structure now
1952 
1953 	// TODO: remove team resources if there are any left
1954 	// thread_atkernel_exit() might not be called at all
1955 
1956 	thread_reset_for_exec();
1957 
1958 	user_debug_prepare_for_exec();
1959 
1960 	delete_team_user_data(team);
1961 	vm_delete_areas(team->address_space, false);
1962 	xsi_sem_undo(team);
1963 	delete_owned_ports(team);
1964 	sem_delete_owned_sems(team);
1965 	remove_images(team);
1966 	vfs_exec_io_context(team->io_context);
1967 	delete_realtime_sem_context(team->realtime_sem_context);
1968 	team->realtime_sem_context = NULL;
1969 
1970 	// update ASLR
1971 	team->address_space->SetRandomizingEnabled(
1972 		(teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1973 
1974 	status = create_team_user_data(team);
1975 	if (status != B_OK) {
1976 		// creating the user data failed -- we're toast
1977 		free_team_arg(teamArgs);
1978 		exit_thread(status);
1979 		return status;
1980 	}
1981 
1982 	user_debug_finish_after_exec();
1983 
1984 	// rename the team
1985 
1986 	team->Lock();
1987 	team->SetName(path);
1988 	team->Unlock();
1989 
1990 	// cut the path from the team name and rename the main thread, too
1991 	threadName = strrchr(path, '/');
1992 	if (threadName != NULL)
1993 		threadName++;
1994 	else
1995 		threadName = path;
1996 	rename_thread(thread_get_current_thread_id(), threadName);
1997 
1998 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1999 
2000 	// Update user/group according to the executable's set-user/group-id
2001 	// permission.
2002 	update_set_id_user_and_group(team, path);
2003 
2004 	user_debug_team_exec();
2005 
2006 	// notify team listeners
2007 	sNotificationService.Notify(TEAM_EXEC, team);
2008 
2009 	// get a user thread for the thread
2010 	user_thread* userThread = team_allocate_user_thread(team);
2011 		// cannot fail (the allocation for the team would have failed already)
2012 	ThreadLocker currentThreadLocker(currentThread);
2013 	currentThread->user_thread = userThread;
2014 	currentThreadLocker.Unlock();
2015 
2016 	// create the user stack for the thread
2017 	status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2018 		0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2019 	if (status == B_OK) {
2020 		// prepare the stack, load the runtime loader, and enter userspace
2021 		team_create_thread_start(teamArgs);
2022 			// does never return
2023 	} else
2024 		free_team_arg(teamArgs);
2025 
2026 	// Sorry, we have to kill ourselves, there is no way out anymore
2027 	// (without any areas left and all that).
2028 	exit_thread(status);
2029 
2030 	// We return a status here since the signal that is sent by the
2031 	// call above is not immediately handled.
2032 	return B_ERROR;
2033 }
2034 
2035 
2036 static thread_id
2037 fork_team(void)
2038 {
2039 	Thread* parentThread = thread_get_current_thread();
2040 	Team* parentTeam = parentThread->team;
2041 	Team* team;
2042 	arch_fork_arg* forkArgs;
2043 	struct area_info info;
2044 	thread_id threadID;
2045 	status_t status;
2046 	ssize_t areaCookie;
2047 	bool teamLimitReached = false;
2048 
2049 	TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2050 
2051 	if (parentTeam == team_get_kernel_team())
2052 		return B_NOT_ALLOWED;
2053 
2054 	// create a new team
2055 	// TODO: this is very similar to load_image_internal() - maybe we can do
2056 	// something about it :)
2057 
2058 	// create the main thread object
2059 	Thread* thread;
2060 	status = Thread::Create(parentThread->name, thread);
2061 	if (status != B_OK)
2062 		return status;
2063 	BReference<Thread> threadReference(thread, true);
2064 
2065 	// create the team object
2066 	team = Team::Create(thread->id, NULL, false);
2067 	if (team == NULL)
2068 		return B_NO_MEMORY;
2069 
2070 	parentTeam->LockTeamAndProcessGroup();
2071 	team->Lock();
2072 
2073 	team->SetName(parentTeam->Name());
2074 	team->SetArgs(parentTeam->Args());
2075 
2076 	team->commpage_address = parentTeam->commpage_address;
2077 
2078 	// Inherit the parent's user/group.
2079 	inherit_parent_user_and_group(team, parentTeam);
2080 
2081 	// inherit signal handlers
2082 	team->InheritSignalActions(parentTeam);
2083 
2084 	team->Unlock();
2085 	parentTeam->UnlockTeamAndProcessGroup();
2086 
2087 	// inherit some team debug flags
2088 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2089 		& B_TEAM_DEBUG_INHERITED_FLAGS;
2090 
2091 	forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2092 	if (forkArgs == NULL) {
2093 		status = B_NO_MEMORY;
2094 		goto err1;
2095 	}
2096 
2097 	// create a new io_context for this team
2098 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2099 	if (!team->io_context) {
2100 		status = B_NO_MEMORY;
2101 		goto err2;
2102 	}
2103 
2104 	// duplicate the realtime sem context
2105 	if (parentTeam->realtime_sem_context) {
2106 		team->realtime_sem_context = clone_realtime_sem_context(
2107 			parentTeam->realtime_sem_context);
2108 		if (team->realtime_sem_context == NULL) {
2109 			status = B_NO_MEMORY;
2110 			goto err2;
2111 		}
2112 	}
2113 
2114 	// create an address space for this team
2115 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2116 		&team->address_space);
2117 	if (status < B_OK)
2118 		goto err3;
2119 
2120 	// copy all areas of the team
2121 	// TODO: should be able to handle stack areas differently (ie. don't have
2122 	// them copy-on-write)
2123 
2124 	areaCookie = 0;
2125 	while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2126 		if (info.area == parentTeam->user_data_area) {
2127 			// don't clone the user area; just create a new one
2128 			status = create_team_user_data(team, info.address);
2129 			if (status != B_OK)
2130 				break;
2131 
2132 			thread->user_thread = team_allocate_user_thread(team);
2133 		} else {
2134 			void* address;
2135 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
2136 				&address, B_CLONE_ADDRESS, info.protection, info.area);
2137 			if (area < B_OK) {
2138 				status = area;
2139 				break;
2140 			}
2141 
2142 			if (info.area == parentThread->user_stack_area)
2143 				thread->user_stack_area = area;
2144 		}
2145 	}
2146 
2147 	if (status < B_OK)
2148 		goto err4;
2149 
2150 	if (thread->user_thread == NULL) {
2151 #if KDEBUG
2152 		panic("user data area not found, parent area is %" B_PRId32,
2153 			parentTeam->user_data_area);
2154 #endif
2155 		status = B_ERROR;
2156 		goto err4;
2157 	}
2158 
2159 	thread->user_stack_base = parentThread->user_stack_base;
2160 	thread->user_stack_size = parentThread->user_stack_size;
2161 	thread->user_local_storage = parentThread->user_local_storage;
2162 	thread->sig_block_mask = parentThread->sig_block_mask;
2163 	thread->signal_stack_base = parentThread->signal_stack_base;
2164 	thread->signal_stack_size = parentThread->signal_stack_size;
2165 	thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2166 
2167 	arch_store_fork_frame(forkArgs);
2168 
2169 	// copy image list
2170 	if (copy_images(parentTeam->id, team) != B_OK)
2171 		goto err5;
2172 
2173 	// insert the team into its parent and the teams hash
2174 	parentTeam->LockTeamAndProcessGroup();
2175 	team->Lock();
2176 
2177 	{
2178 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
2179 
2180 		sTeamHash.Insert(team);
2181 		teamLimitReached = sUsedTeams >= sMaxTeams;
2182 		if (!teamLimitReached)
2183 			sUsedTeams++;
2184 	}
2185 
2186 	insert_team_into_parent(parentTeam, team);
2187 	insert_team_into_group(parentTeam->group, team);
2188 
2189 	team->Unlock();
2190 	parentTeam->UnlockTeamAndProcessGroup();
2191 
2192 	// notify team listeners
2193 	sNotificationService.Notify(TEAM_ADDED, team);
2194 
2195 	if (teamLimitReached) {
2196 		status = B_NO_MORE_TEAMS;
2197 		goto err6;
2198 	}
2199 
2200 	// create the main thread
2201 	{
2202 		ThreadCreationAttributes threadCreationAttributes(NULL,
2203 			parentThread->name, parentThread->priority, NULL, team->id, thread);
2204 		threadCreationAttributes.forkArgs = forkArgs;
2205 		threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2206 		threadID = thread_create_thread(threadCreationAttributes, false);
2207 		if (threadID < 0) {
2208 			status = threadID;
2209 			goto err6;
2210 		}
2211 	}
2212 
2213 	// notify the debugger
2214 	user_debug_team_created(team->id);
2215 
2216 	T(TeamForked(threadID));
2217 
2218 	resume_thread(threadID);
2219 	return threadID;
2220 
2221 err6:
2222 	// Remove the team structure from the process group, the parent team, and
2223 	// the team hash table and delete the team structure.
2224 	parentTeam->LockTeamAndProcessGroup();
2225 	team->Lock();
2226 
2227 	remove_team_from_group(team);
2228 	remove_team_from_parent(team->parent, team);
2229 
2230 	team->Unlock();
2231 	parentTeam->UnlockTeamAndProcessGroup();
2232 
2233 	{
2234 		InterruptsSpinLocker teamsLocker(sTeamHashLock);
2235 		sTeamHash.Remove(team);
2236 		if (!teamLimitReached)
2237 			sUsedTeams--;
2238 	}
2239 
2240 	sNotificationService.Notify(TEAM_REMOVED, team);
2241 err5:
2242 	remove_images(team);
2243 err4:
2244 	team->address_space->RemoveAndPut();
2245 err3:
2246 	delete_realtime_sem_context(team->realtime_sem_context);
2247 err2:
2248 	free(forkArgs);
2249 err1:
2250 	team->ReleaseReference();
2251 
2252 	return status;
2253 }
2254 
2255 
2256 /*!	Returns if the specified team \a parent has any children belonging to the
2257 	process group with the specified ID \a groupID.
2258 	The caller must hold \a parent's lock.
2259 */
2260 static bool
2261 has_children_in_group(Team* parent, pid_t groupID)
2262 {
2263 	for (Team* child = parent->children; child != NULL;
2264 			child = child->siblings_next) {
2265 		TeamLocker childLocker(child);
2266 		if (child->group_id == groupID)
2267 			return true;
2268 	}
2269 
2270 	return false;
2271 }
2272 
2273 
2274 /*!	Returns the first job control entry from \a children, which matches \a id.
2275 	\a id can be:
2276 	- \code > 0 \endcode: Matching an entry with that team ID.
2277 	- \code == -1 \endcode: Matching any entry.
2278 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2279 	\c 0 is an invalid value for \a id.
2280 
2281 	The caller must hold the lock of the team that \a children belongs to.
2282 
2283 	\param children The job control entry list to check.
2284 	\param id The match criterion.
2285 	\return The first matching entry or \c NULL, if none matches.
2286 */
2287 static job_control_entry*
2288 get_job_control_entry(team_job_control_children& children, pid_t id)
2289 {
2290 	for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2291 		 job_control_entry* entry = it.Next();) {
2292 
2293 		if (id > 0) {
2294 			if (entry->thread == id)
2295 				return entry;
2296 		} else if (id == -1) {
2297 			return entry;
2298 		} else {
2299 			pid_t processGroup
2300 				= (entry->team ? entry->team->group_id : entry->group_id);
2301 			if (processGroup == -id)
2302 				return entry;
2303 		}
2304 	}
2305 
2306 	return NULL;
2307 }
2308 
2309 
2310 /*!	Returns the first job control entry from one of team's dead, continued, or
2311 	stopped children which matches \a id.
2312 	\a id can be:
2313 	- \code > 0 \endcode: Matching an entry with that team ID.
2314 	- \code == -1 \endcode: Matching any entry.
2315 	- \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2316 	\c 0 is an invalid value for \a id.
2317 
2318 	The caller must hold \a team's lock.
2319 
2320 	\param team The team whose dead, stopped, and continued child lists shall be
2321 		checked.
2322 	\param id The match criterion.
2323 	\param flags Specifies which children shall be considered. Dead children
2324 		are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2325 		children are considered when \a flags is ORed bitwise with \c WUNTRACED
2326 		or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2327 		\c WCONTINUED.
2328 	\return The first matching entry or \c NULL, if none matches.
2329 */
2330 static job_control_entry*
2331 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2332 {
2333 	job_control_entry* entry = NULL;
2334 
2335 	if ((flags & WEXITED) != 0)
2336 		entry = get_job_control_entry(team->dead_children, id);
2337 
2338 	if (entry == NULL && (flags & WCONTINUED) != 0)
2339 		entry = get_job_control_entry(team->continued_children, id);
2340 
2341 	if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2342 		entry = get_job_control_entry(team->stopped_children, id);
2343 
2344 	return entry;
2345 }
2346 
2347 
2348 job_control_entry::job_control_entry()
2349 	:
2350 	has_group_ref(false)
2351 {
2352 }
2353 
2354 
2355 job_control_entry::~job_control_entry()
2356 {
2357 	if (has_group_ref) {
2358 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2359 
2360 		ProcessGroup* group = sGroupHash.Lookup(group_id);
2361 		if (group == NULL) {
2362 			panic("job_control_entry::~job_control_entry(): unknown group "
2363 				"ID: %" B_PRId32, group_id);
2364 			return;
2365 		}
2366 
2367 		groupHashLocker.Unlock();
2368 
2369 		group->ReleaseReference();
2370 	}
2371 }
2372 
2373 
2374 /*!	Invoked when the owning team is dying, initializing the entry according to
2375 	the dead state.
2376 
2377 	The caller must hold the owning team's lock and the scheduler lock.
2378 */
2379 void
2380 job_control_entry::InitDeadState()
2381 {
2382 	if (team != NULL) {
2383 		ASSERT(team->exit.initialized);
2384 
2385 		group_id = team->group_id;
2386 		team->group->AcquireReference();
2387 		has_group_ref = true;
2388 
2389 		thread = team->id;
2390 		status = team->exit.status;
2391 		reason = team->exit.reason;
2392 		signal = team->exit.signal;
2393 		signaling_user = team->exit.signaling_user;
2394 		user_time = team->dead_threads_user_time
2395 			+ team->dead_children.user_time;
2396 		kernel_time = team->dead_threads_kernel_time
2397 			+ team->dead_children.kernel_time;
2398 
2399 		team = NULL;
2400 	}
2401 }
2402 
2403 
2404 job_control_entry&
2405 job_control_entry::operator=(const job_control_entry& other)
2406 {
2407 	state = other.state;
2408 	thread = other.thread;
2409 	signal = other.signal;
2410 	has_group_ref = false;
2411 	signaling_user = other.signaling_user;
2412 	team = other.team;
2413 	group_id = other.group_id;
2414 	status = other.status;
2415 	reason = other.reason;
2416 	user_time = other.user_time;
2417 	kernel_time = other.kernel_time;
2418 
2419 	return *this;
2420 }
2421 
2422 
2423 /*! This is the kernel backend for waitid().
2424 */
2425 static thread_id
2426 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2427 	team_usage_info& _usage_info)
2428 {
2429 	Thread* thread = thread_get_current_thread();
2430 	Team* team = thread->team;
2431 	struct job_control_entry foundEntry;
2432 	struct job_control_entry* freeDeathEntry = NULL;
2433 	status_t status = B_OK;
2434 
2435 	TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2436 		child, flags));
2437 
2438 	T(WaitForChild(child, flags));
2439 
2440 	if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2441 		T(WaitForChildDone(B_BAD_VALUE));
2442 		return B_BAD_VALUE;
2443 	}
2444 
2445 	pid_t originalChild = child;
2446 
2447 	bool ignoreFoundEntries = false;
2448 	bool ignoreFoundEntriesChecked = false;
2449 
2450 	while (true) {
2451 		// lock the team
2452 		TeamLocker teamLocker(team);
2453 
2454 		// A 0 child argument means to wait for all children in the process
2455 		// group of the calling team.
2456 		child = originalChild == 0 ? -team->group_id : originalChild;
2457 
2458 		// check whether any condition holds
2459 		job_control_entry* entry = get_job_control_entry(team, child, flags);
2460 
2461 		// If we don't have an entry yet, check whether there are any children
2462 		// complying to the process group specification at all.
2463 		if (entry == NULL) {
2464 			// No success yet -- check whether there are any children complying
2465 			// to the process group specification at all.
2466 			bool childrenExist = false;
2467 			if (child == -1) {
2468 				childrenExist = team->children != NULL;
2469 			} else if (child < -1) {
2470 				childrenExist = has_children_in_group(team, -child);
2471 			} else if (child != team->id) {
2472 				if (Team* childTeam = Team::Get(child)) {
2473 					BReference<Team> childTeamReference(childTeam, true);
2474 					TeamLocker childTeamLocker(childTeam);
2475 					childrenExist = childTeam->parent == team;
2476 				}
2477 			}
2478 
2479 			if (!childrenExist) {
2480 				// there is no child we could wait for
2481 				status = ECHILD;
2482 			} else {
2483 				// the children we're waiting for are still running
2484 				status = B_WOULD_BLOCK;
2485 			}
2486 		} else {
2487 			// got something
2488 			foundEntry = *entry;
2489 
2490 			// unless WNOWAIT has been specified, "consume" the wait state
2491 			if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2492 				if (entry->state == JOB_CONTROL_STATE_DEAD) {
2493 					// The child is dead. Reap its death entry.
2494 					freeDeathEntry = entry;
2495 					team->dead_children.entries.Remove(entry);
2496 					team->dead_children.count--;
2497 				} else {
2498 					// The child is well. Reset its job control state.
2499 					team_set_job_control_state(entry->team,
2500 						JOB_CONTROL_STATE_NONE, NULL);
2501 				}
2502 			}
2503 		}
2504 
2505 		// If we haven't got anything yet, prepare for waiting for the
2506 		// condition variable.
2507 		ConditionVariableEntry deadWaitEntry;
2508 
2509 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2510 			team->dead_children.condition_variable.Add(&deadWaitEntry);
2511 
2512 		teamLocker.Unlock();
2513 
2514 		// we got our entry and can return to our caller
2515 		if (status == B_OK) {
2516 			if (ignoreFoundEntries) {
2517 				// ... unless we shall ignore found entries
2518 				delete freeDeathEntry;
2519 				freeDeathEntry = NULL;
2520 				continue;
2521 			}
2522 
2523 			break;
2524 		}
2525 
2526 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2527 			T(WaitForChildDone(status));
2528 			return status;
2529 		}
2530 
2531 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2532 		if (status == B_INTERRUPTED) {
2533 			T(WaitForChildDone(status));
2534 			return status;
2535 		}
2536 
2537 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2538 		// all our children are dead and fail with ECHILD. We check the
2539 		// condition at this point.
2540 		if (!ignoreFoundEntriesChecked) {
2541 			teamLocker.Lock();
2542 
2543 			struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2544 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2545 				|| handler.sa_handler == SIG_IGN) {
2546 				ignoreFoundEntries = true;
2547 			}
2548 
2549 			teamLocker.Unlock();
2550 
2551 			ignoreFoundEntriesChecked = true;
2552 		}
2553 	}
2554 
2555 	delete freeDeathEntry;
2556 
2557 	// When we got here, we have a valid death entry, and already got
2558 	// unregistered from the team or group. Fill in the returned info.
2559 	memset(&_info, 0, sizeof(_info));
2560 	_info.si_signo = SIGCHLD;
2561 	_info.si_pid = foundEntry.thread;
2562 	_info.si_uid = foundEntry.signaling_user;
2563 	// TODO: Fill in si_errno?
2564 
2565 	switch (foundEntry.state) {
2566 		case JOB_CONTROL_STATE_DEAD:
2567 			_info.si_code = foundEntry.reason;
2568 			_info.si_status = foundEntry.reason == CLD_EXITED
2569 				? foundEntry.status : foundEntry.signal;
2570 			_usage_info.user_time = foundEntry.user_time;
2571 			_usage_info.kernel_time = foundEntry.kernel_time;
2572 			break;
2573 		case JOB_CONTROL_STATE_STOPPED:
2574 			_info.si_code = CLD_STOPPED;
2575 			_info.si_status = foundEntry.signal;
2576 			break;
2577 		case JOB_CONTROL_STATE_CONTINUED:
2578 			_info.si_code = CLD_CONTINUED;
2579 			_info.si_status = 0;
2580 			break;
2581 		case JOB_CONTROL_STATE_NONE:
2582 			// can't happen
2583 			break;
2584 	}
2585 
2586 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2587 	// status is available.
2588 	TeamLocker teamLocker(team);
2589 	InterruptsSpinLocker signalLocker(team->signal_lock);
2590 	SpinLocker threadCreationLocker(gThreadCreationLock);
2591 
2592 	if (is_team_signal_blocked(team, SIGCHLD)) {
2593 		if (get_job_control_entry(team, child, flags) == NULL)
2594 			team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2595 	}
2596 
2597 	threadCreationLocker.Unlock();
2598 	signalLocker.Unlock();
2599 	teamLocker.Unlock();
2600 
2601 	// When the team is dead, the main thread continues to live in the kernel
2602 	// team for a very short time. To avoid surprises for the caller we rather
2603 	// wait until the thread is really gone.
2604 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2605 		wait_for_thread(foundEntry.thread, NULL);
2606 
2607 	T(WaitForChildDone(foundEntry));
2608 
2609 	return foundEntry.thread;
2610 }
2611 
2612 
2613 /*! Fills the team_info structure with information from the specified team.
2614 	Interrupts must be enabled. The team must not be locked.
2615 */
2616 static status_t
2617 fill_team_info(Team* team, team_info* info, size_t size)
2618 {
2619 	if (size != sizeof(team_info))
2620 		return B_BAD_VALUE;
2621 
2622 	// TODO: Set more informations for team_info
2623 	memset(info, 0, size);
2624 
2625 	info->team = team->id;
2626 		// immutable
2627 	info->image_count = count_images(team);
2628 		// protected by sImageMutex
2629 
2630 	TeamLocker teamLocker(team);
2631 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2632 
2633 	info->thread_count = team->num_threads;
2634 	//info->area_count =
2635 	info->debugger_nub_thread = team->debug_info.nub_thread;
2636 	info->debugger_nub_port = team->debug_info.nub_port;
2637 	info->uid = team->effective_uid;
2638 	info->gid = team->effective_gid;
2639 
2640 	strlcpy(info->args, team->Args(), sizeof(info->args));
2641 	info->argc = 1;
2642 
2643 	return B_OK;
2644 }
2645 
2646 
2647 /*!	Returns whether the process group contains stopped processes.
2648 	The caller must hold the process group's lock.
2649 */
2650 static bool
2651 process_group_has_stopped_processes(ProcessGroup* group)
2652 {
2653 	Team* team = group->teams;
2654 	while (team != NULL) {
2655 		// the parent team's lock guards the job control entry -- acquire it
2656 		team->LockTeamAndParent(false);
2657 
2658 		if (team->job_control_entry != NULL
2659 			&& team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2660 			team->UnlockTeamAndParent();
2661 			return true;
2662 		}
2663 
2664 		team->UnlockTeamAndParent();
2665 
2666 		team = team->group_next;
2667 	}
2668 
2669 	return false;
2670 }
2671 
2672 
2673 /*!	Iterates through all process groups queued in team_remove_team() and signals
2674 	those that are orphaned and have stopped processes.
2675 	The caller must not hold any team or process group locks.
2676 */
2677 static void
2678 orphaned_process_group_check()
2679 {
2680 	// process as long as there are groups in the list
2681 	while (true) {
2682 		// remove the head from the list
2683 		MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2684 
2685 		ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2686 		if (group == NULL)
2687 			return;
2688 
2689 		group->UnsetOrphanedCheck();
2690 		BReference<ProcessGroup> groupReference(group);
2691 
2692 		orphanedCheckLocker.Unlock();
2693 
2694 		AutoLocker<ProcessGroup> groupLocker(group);
2695 
2696 		// If the group is orphaned and contains stopped processes, we're
2697 		// supposed to send SIGHUP + SIGCONT.
2698 		if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2699 			Thread* currentThread = thread_get_current_thread();
2700 
2701 			Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2702 			send_signal_to_process_group_locked(group, signal, 0);
2703 
2704 			signal.SetNumber(SIGCONT);
2705 			send_signal_to_process_group_locked(group, signal, 0);
2706 		}
2707 	}
2708 }
2709 
2710 
2711 static status_t
2712 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2713 	uint32 flags)
2714 {
2715 	if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2716 		return B_BAD_VALUE;
2717 
2718 	// get the team
2719 	Team* team = Team::GetAndLock(id);
2720 	if (team == NULL)
2721 		return B_BAD_TEAM_ID;
2722 	BReference<Team> teamReference(team, true);
2723 	TeamLocker teamLocker(team, true);
2724 
2725 	if ((flags & B_CHECK_PERMISSION) != 0) {
2726 		uid_t uid = geteuid();
2727 		if (uid != 0 && uid != team->effective_uid)
2728 			return B_NOT_ALLOWED;
2729 	}
2730 
2731 	bigtime_t kernelTime = 0;
2732 	bigtime_t userTime = 0;
2733 
2734 	switch (who) {
2735 		case B_TEAM_USAGE_SELF:
2736 		{
2737 			Thread* thread = team->thread_list;
2738 
2739 			for (; thread != NULL; thread = thread->team_next) {
2740 				InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2741 				kernelTime += thread->kernel_time;
2742 				userTime += thread->user_time;
2743 			}
2744 
2745 			kernelTime += team->dead_threads_kernel_time;
2746 			userTime += team->dead_threads_user_time;
2747 			break;
2748 		}
2749 
2750 		case B_TEAM_USAGE_CHILDREN:
2751 		{
2752 			Team* child = team->children;
2753 			for (; child != NULL; child = child->siblings_next) {
2754 				TeamLocker childLocker(child);
2755 
2756 				Thread* thread = team->thread_list;
2757 
2758 				for (; thread != NULL; thread = thread->team_next) {
2759 					InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2760 					kernelTime += thread->kernel_time;
2761 					userTime += thread->user_time;
2762 				}
2763 
2764 				kernelTime += child->dead_threads_kernel_time;
2765 				userTime += child->dead_threads_user_time;
2766 			}
2767 
2768 			kernelTime += team->dead_children.kernel_time;
2769 			userTime += team->dead_children.user_time;
2770 			break;
2771 		}
2772 	}
2773 
2774 	info->kernel_time = kernelTime;
2775 	info->user_time = userTime;
2776 
2777 	return B_OK;
2778 }
2779 
2780 
2781 //	#pragma mark - Private kernel API
2782 
2783 
2784 status_t
2785 team_init(kernel_args* args)
2786 {
2787 	// create the team hash table
2788 	new(&sTeamHash) TeamTable;
2789 	if (sTeamHash.Init(64) != B_OK)
2790 		panic("Failed to init team hash table!");
2791 
2792 	new(&sGroupHash) ProcessGroupHashTable;
2793 	if (sGroupHash.Init() != B_OK)
2794 		panic("Failed to init process group hash table!");
2795 
2796 	// create initial session and process groups
2797 
2798 	ProcessSession* session = new(std::nothrow) ProcessSession(1);
2799 	if (session == NULL)
2800 		panic("Could not create initial session.\n");
2801 	BReference<ProcessSession> sessionReference(session, true);
2802 
2803 	ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2804 	if (group == NULL)
2805 		panic("Could not create initial process group.\n");
2806 	BReference<ProcessGroup> groupReference(group, true);
2807 
2808 	group->Publish(session);
2809 
2810 	// create the kernel team
2811 	sKernelTeam = Team::Create(1, "kernel_team", true);
2812 	if (sKernelTeam == NULL)
2813 		panic("could not create kernel team!\n");
2814 	sKernelTeam->SetArgs(sKernelTeam->Name());
2815 	sKernelTeam->state = TEAM_STATE_NORMAL;
2816 
2817 	sKernelTeam->saved_set_uid = 0;
2818 	sKernelTeam->real_uid = 0;
2819 	sKernelTeam->effective_uid = 0;
2820 	sKernelTeam->saved_set_gid = 0;
2821 	sKernelTeam->real_gid = 0;
2822 	sKernelTeam->effective_gid = 0;
2823 	sKernelTeam->supplementary_groups = NULL;
2824 	sKernelTeam->supplementary_group_count = 0;
2825 
2826 	insert_team_into_group(group, sKernelTeam);
2827 
2828 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2829 	if (sKernelTeam->io_context == NULL)
2830 		panic("could not create io_context for kernel team!\n");
2831 
2832 	if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2833 		dprintf("Failed to resize FD table for kernel team!\n");
2834 
2835 	// stick it in the team hash
2836 	sTeamHash.Insert(sKernelTeam);
2837 
2838 	add_debugger_command_etc("team", &dump_team_info,
2839 		"Dump info about a particular team",
2840 		"[ <id> | <address> | <name> ]\n"
2841 		"Prints information about the specified team. If no argument is given\n"
2842 		"the current team is selected.\n"
2843 		"  <id>       - The ID of the team.\n"
2844 		"  <address>  - The address of the team structure.\n"
2845 		"  <name>     - The team's name.\n", 0);
2846 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2847 		"\n"
2848 		"Prints a list of all existing teams.\n", 0);
2849 
2850 	new(&sNotificationService) TeamNotificationService();
2851 
2852 	sNotificationService.Register();
2853 
2854 	return B_OK;
2855 }
2856 
2857 
2858 int32
2859 team_max_teams(void)
2860 {
2861 	return sMaxTeams;
2862 }
2863 
2864 
2865 int32
2866 team_used_teams(void)
2867 {
2868 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2869 	return sUsedTeams;
2870 }
2871 
2872 
2873 /*! Returns a death entry of a child team specified by ID (if any).
2874 	The caller must hold the team's lock.
2875 
2876 	\param team The team whose dead children list to check.
2877 	\param child The ID of the child for whose death entry to lock. Must be > 0.
2878 	\param _deleteEntry Return variable, indicating whether the caller needs to
2879 		delete the returned entry.
2880 	\return The death entry of the matching team, or \c NULL, if no death entry
2881 		for the team was found.
2882 */
2883 job_control_entry*
2884 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2885 {
2886 	if (child <= 0)
2887 		return NULL;
2888 
2889 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2890 		child);
2891 	if (entry) {
2892 		// remove the entry only, if the caller is the parent of the found team
2893 		if (team_get_current_team_id() == entry->thread) {
2894 			team->dead_children.entries.Remove(entry);
2895 			team->dead_children.count--;
2896 			*_deleteEntry = true;
2897 		} else {
2898 			*_deleteEntry = false;
2899 		}
2900 	}
2901 
2902 	return entry;
2903 }
2904 
2905 
2906 /*! Quick check to see if we have a valid team ID. */
2907 bool
2908 team_is_valid(team_id id)
2909 {
2910 	if (id <= 0)
2911 		return false;
2912 
2913 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
2914 
2915 	return team_get_team_struct_locked(id) != NULL;
2916 }
2917 
2918 
2919 Team*
2920 team_get_team_struct_locked(team_id id)
2921 {
2922 	return sTeamHash.Lookup(id);
2923 }
2924 
2925 
2926 void
2927 team_set_controlling_tty(int32 ttyIndex)
2928 {
2929 	// lock the team, so its session won't change while we're playing with it
2930 	Team* team = thread_get_current_thread()->team;
2931 	TeamLocker teamLocker(team);
2932 
2933 	// get and lock the session
2934 	ProcessSession* session = team->group->Session();
2935 	AutoLocker<ProcessSession> sessionLocker(session);
2936 
2937 	// set the session's fields
2938 	session->controlling_tty = ttyIndex;
2939 	session->foreground_group = -1;
2940 }
2941 
2942 
2943 int32
2944 team_get_controlling_tty()
2945 {
2946 	// lock the team, so its session won't change while we're playing with it
2947 	Team* team = thread_get_current_thread()->team;
2948 	TeamLocker teamLocker(team);
2949 
2950 	// get and lock the session
2951 	ProcessSession* session = team->group->Session();
2952 	AutoLocker<ProcessSession> sessionLocker(session);
2953 
2954 	// get the session's field
2955 	return session->controlling_tty;
2956 }
2957 
2958 
2959 status_t
2960 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2961 {
2962 	// lock the team, so its session won't change while we're playing with it
2963 	Thread* thread = thread_get_current_thread();
2964 	Team* team = thread->team;
2965 	TeamLocker teamLocker(team);
2966 
2967 	// get and lock the session
2968 	ProcessSession* session = team->group->Session();
2969 	AutoLocker<ProcessSession> sessionLocker(session);
2970 
2971 	// check given TTY -- must be the controlling tty of the calling process
2972 	if (session->controlling_tty != ttyIndex)
2973 		return ENOTTY;
2974 
2975 	// check given process group -- must belong to our session
2976 	{
2977 		InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2978 		ProcessGroup* group = sGroupHash.Lookup(processGroupID);
2979 		if (group == NULL || group->Session() != session)
2980 			return B_BAD_VALUE;
2981 	}
2982 
2983 	// If we are a background group, we can do that unharmed only when we
2984 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2985 	if (session->foreground_group != -1
2986 		&& session->foreground_group != team->group_id
2987 		&& team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
2988 		&& (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
2989 		InterruptsSpinLocker signalLocker(team->signal_lock);
2990 
2991 		if (!is_team_signal_blocked(team, SIGTTOU)) {
2992 			pid_t groupID = team->group_id;
2993 
2994 			signalLocker.Unlock();
2995 			sessionLocker.Unlock();
2996 			teamLocker.Unlock();
2997 
2998 			Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
2999 			send_signal_to_process_group(groupID, signal, 0);
3000 			return B_INTERRUPTED;
3001 		}
3002 	}
3003 
3004 	session->foreground_group = processGroupID;
3005 
3006 	return B_OK;
3007 }
3008 
3009 
3010 /*!	Removes the specified team from the global team hash, from its process
3011 	group, and from its parent.
3012 	It also moves all of its children to the kernel team.
3013 
3014 	The caller must hold the following locks:
3015 	- \a team's process group's lock,
3016 	- the kernel team's lock,
3017 	- \a team's parent team's lock (might be the kernel team), and
3018 	- \a team's lock.
3019 */
3020 void
3021 team_remove_team(Team* team, pid_t& _signalGroup)
3022 {
3023 	Team* parent = team->parent;
3024 
3025 	// remember how long this team lasted
3026 	parent->dead_children.kernel_time += team->dead_threads_kernel_time
3027 		+ team->dead_children.kernel_time;
3028 	parent->dead_children.user_time += team->dead_threads_user_time
3029 		+ team->dead_children.user_time;
3030 
3031 	// remove the team from the hash table
3032 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3033 	sTeamHash.Remove(team);
3034 	sUsedTeams--;
3035 	teamsLocker.Unlock();
3036 
3037 	// The team can no longer be accessed by ID. Navigation to it is still
3038 	// possible from its process group and its parent and children, but that
3039 	// will be rectified shortly.
3040 	team->state = TEAM_STATE_DEATH;
3041 
3042 	// If we're a controlling process (i.e. a session leader with controlling
3043 	// terminal), there's a bit of signalling we have to do. We can't do any of
3044 	// the signaling here due to the bunch of locks we're holding, but we need
3045 	// to determine, whom to signal.
3046 	_signalGroup = -1;
3047 	bool isSessionLeader = false;
3048 	if (team->session_id == team->id
3049 		&& team->group->Session()->controlling_tty >= 0) {
3050 		isSessionLeader = true;
3051 
3052 		ProcessSession* session = team->group->Session();
3053 
3054 		AutoLocker<ProcessSession> sessionLocker(session);
3055 
3056 		session->controlling_tty = -1;
3057 		_signalGroup = session->foreground_group;
3058 	}
3059 
3060 	// remove us from our process group
3061 	remove_team_from_group(team);
3062 
3063 	// move the team's children to the kernel team
3064 	while (Team* child = team->children) {
3065 		// remove the child from the current team and add it to the kernel team
3066 		TeamLocker childLocker(child);
3067 
3068 		remove_team_from_parent(team, child);
3069 		insert_team_into_parent(sKernelTeam, child);
3070 
3071 		// move job control entries too
3072 		sKernelTeam->stopped_children.entries.MoveFrom(
3073 			&team->stopped_children.entries);
3074 		sKernelTeam->continued_children.entries.MoveFrom(
3075 			&team->continued_children.entries);
3076 
3077 		// If the team was a session leader with controlling terminal,
3078 		// we need to send SIGHUP + SIGCONT to all newly-orphaned process
3079 		// groups with stopped processes. Due to locking complications we can't
3080 		// do that here, so we only check whether we were a reason for the
3081 		// child's process group not being an orphan and, if so, schedule a
3082 		// later check (cf. orphaned_process_group_check()).
3083 		if (isSessionLeader) {
3084 			ProcessGroup* childGroup = child->group;
3085 			if (childGroup->Session()->id == team->session_id
3086 				&& childGroup->id != team->group_id) {
3087 				childGroup->ScheduleOrphanedCheck();
3088 			}
3089 		}
3090 
3091 		// Note, we don't move the dead children entries. Those will be deleted
3092 		// when the team structure is deleted.
3093 	}
3094 
3095 	// remove us from our parent
3096 	remove_team_from_parent(parent, team);
3097 }
3098 
3099 
3100 /*!	Kills all threads but the main thread of the team and shuts down user
3101 	debugging for it.
3102 	To be called on exit of the team's main thread. No locks must be held.
3103 
3104 	\param team The team in question.
3105 	\return The port of the debugger for the team, -1 if none. To be passed to
3106 		team_delete_team().
3107 */
3108 port_id
3109 team_shutdown_team(Team* team)
3110 {
3111 	ASSERT(thread_get_current_thread() == team->main_thread);
3112 
3113 	TeamLocker teamLocker(team);
3114 
3115 	// Make sure debugging changes won't happen anymore.
3116 	port_id debuggerPort = -1;
3117 	while (true) {
3118 		// If a debugger change is in progress for the team, we'll have to
3119 		// wait until it is done.
3120 		ConditionVariableEntry waitForDebuggerEntry;
3121 		bool waitForDebugger = false;
3122 
3123 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3124 
3125 		if (team->debug_info.debugger_changed_condition != NULL) {
3126 			team->debug_info.debugger_changed_condition->Add(
3127 				&waitForDebuggerEntry);
3128 			waitForDebugger = true;
3129 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3130 			// The team is being debugged. That will stop with the termination
3131 			// of the nub thread. Since we set the team state to death, no one
3132 			// can install a debugger anymore. We fetch the debugger's port to
3133 			// send it a message at the bitter end.
3134 			debuggerPort = team->debug_info.debugger_port;
3135 		}
3136 
3137 		debugInfoLocker.Unlock();
3138 
3139 		if (!waitForDebugger)
3140 			break;
3141 
3142 		// wait for the debugger change to be finished
3143 		teamLocker.Unlock();
3144 
3145 		waitForDebuggerEntry.Wait();
3146 
3147 		teamLocker.Lock();
3148 	}
3149 
3150 	// Mark the team as shutting down. That will prevent new threads from being
3151 	// created and debugger changes from taking place.
3152 	team->state = TEAM_STATE_SHUTDOWN;
3153 
3154 	// delete all timers
3155 	team->DeleteUserTimers(false);
3156 
3157 	// deactivate CPU time user timers for the team
3158 	InterruptsSpinLocker timeLocker(team->time_lock);
3159 
3160 	if (team->HasActiveCPUTimeUserTimers())
3161 		team->DeactivateCPUTimeUserTimers();
3162 
3163 	timeLocker.Unlock();
3164 
3165 	// kill all threads but the main thread
3166 	team_death_entry deathEntry;
3167 	deathEntry.condition.Init(team, "team death");
3168 
3169 	while (true) {
3170 		team->death_entry = &deathEntry;
3171 		deathEntry.remaining_threads = 0;
3172 
3173 		Thread* thread = team->thread_list;
3174 		while (thread != NULL) {
3175 			if (thread != team->main_thread) {
3176 				Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3177 				send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3178 				deathEntry.remaining_threads++;
3179 			}
3180 
3181 			thread = thread->team_next;
3182 		}
3183 
3184 		if (deathEntry.remaining_threads == 0)
3185 			break;
3186 
3187 		// there are threads to wait for
3188 		ConditionVariableEntry entry;
3189 		deathEntry.condition.Add(&entry);
3190 
3191 		teamLocker.Unlock();
3192 
3193 		entry.Wait();
3194 
3195 		teamLocker.Lock();
3196 	}
3197 
3198 	team->death_entry = NULL;
3199 
3200 	return debuggerPort;
3201 }
3202 
3203 
3204 /*!	Called on team exit to notify threads waiting on the team and free most
3205 	resources associated with it.
3206 	The caller shouldn't hold any locks.
3207 */
3208 void
3209 team_delete_team(Team* team, port_id debuggerPort)
3210 {
3211 	// Not quite in our job description, but work that has been left by
3212 	// team_remove_team() and that can be done now that we're not holding any
3213 	// locks.
3214 	orphaned_process_group_check();
3215 
3216 	team_id teamID = team->id;
3217 
3218 	ASSERT(team->num_threads == 0);
3219 
3220 	// If someone is waiting for this team to be loaded, but it dies
3221 	// unexpectedly before being done, we need to notify the waiting
3222 	// thread now.
3223 
3224 	TeamLocker teamLocker(team);
3225 
3226 	if (team->loading_info) {
3227 		// there's indeed someone waiting
3228 		struct team_loading_info* loadingInfo = team->loading_info;
3229 		team->loading_info = NULL;
3230 
3231 		loadingInfo->result = B_ERROR;
3232 		loadingInfo->done = true;
3233 
3234 		// wake up the waiting thread
3235 		thread_continue(loadingInfo->thread);
3236 	}
3237 
3238 	// notify team watchers
3239 
3240 	{
3241 		// we're not reachable from anyone anymore at this point, so we
3242 		// can safely access the list without any locking
3243 		struct team_watcher* watcher;
3244 		while ((watcher = (struct team_watcher*)list_remove_head_item(
3245 				&team->watcher_list)) != NULL) {
3246 			watcher->hook(teamID, watcher->data);
3247 			free(watcher);
3248 		}
3249 	}
3250 
3251 	teamLocker.Unlock();
3252 
3253 	sNotificationService.Notify(TEAM_REMOVED, team);
3254 
3255 	// free team resources
3256 
3257 	delete_realtime_sem_context(team->realtime_sem_context);
3258 	xsi_sem_undo(team);
3259 	remove_images(team);
3260 	team->address_space->RemoveAndPut();
3261 
3262 	team->ReleaseReference();
3263 
3264 	// notify the debugger, that the team is gone
3265 	user_debug_team_deleted(teamID, debuggerPort);
3266 }
3267 
3268 
3269 Team*
3270 team_get_kernel_team(void)
3271 {
3272 	return sKernelTeam;
3273 }
3274 
3275 
3276 team_id
3277 team_get_kernel_team_id(void)
3278 {
3279 	if (!sKernelTeam)
3280 		return 0;
3281 
3282 	return sKernelTeam->id;
3283 }
3284 
3285 
3286 team_id
3287 team_get_current_team_id(void)
3288 {
3289 	return thread_get_current_thread()->team->id;
3290 }
3291 
3292 
3293 status_t
3294 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3295 {
3296 	if (id == sKernelTeam->id) {
3297 		// we're the kernel team, so we don't have to go through all
3298 		// the hassle (locking and hash lookup)
3299 		*_addressSpace = VMAddressSpace::GetKernel();
3300 		return B_OK;
3301 	}
3302 
3303 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3304 
3305 	Team* team = team_get_team_struct_locked(id);
3306 	if (team == NULL)
3307 		return B_BAD_VALUE;
3308 
3309 	team->address_space->Get();
3310 	*_addressSpace = team->address_space;
3311 	return B_OK;
3312 }
3313 
3314 
3315 /*!	Sets the team's job control state.
3316 	The caller must hold the parent team's lock. Interrupts are allowed to be
3317 	enabled or disabled.
3318 	\a team The team whose job control state shall be set.
3319 	\a newState The new state to be set.
3320 	\a signal The signal the new state was caused by. Can \c NULL, if none. Then
3321 		the caller is responsible for filling in the following fields of the
3322 		entry before releasing the parent team's lock, unless the new state is
3323 		\c JOB_CONTROL_STATE_NONE:
3324 		- \c signal: The number of the signal causing the state change.
3325 		- \c signaling_user: The real UID of the user sending the signal.
3326 */
3327 void
3328 team_set_job_control_state(Team* team, job_control_state newState,
3329 	Signal* signal)
3330 {
3331 	if (team == NULL || team->job_control_entry == NULL)
3332 		return;
3333 
3334 	// don't touch anything, if the state stays the same or the team is already
3335 	// dead
3336 	job_control_entry* entry = team->job_control_entry;
3337 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3338 		return;
3339 
3340 	T(SetJobControlState(team->id, newState, signal));
3341 
3342 	// remove from the old list
3343 	switch (entry->state) {
3344 		case JOB_CONTROL_STATE_NONE:
3345 			// entry is in no list ATM
3346 			break;
3347 		case JOB_CONTROL_STATE_DEAD:
3348 			// can't get here
3349 			break;
3350 		case JOB_CONTROL_STATE_STOPPED:
3351 			team->parent->stopped_children.entries.Remove(entry);
3352 			break;
3353 		case JOB_CONTROL_STATE_CONTINUED:
3354 			team->parent->continued_children.entries.Remove(entry);
3355 			break;
3356 	}
3357 
3358 	entry->state = newState;
3359 
3360 	if (signal != NULL) {
3361 		entry->signal = signal->Number();
3362 		entry->signaling_user = signal->SendingUser();
3363 	}
3364 
3365 	// add to new list
3366 	team_job_control_children* childList = NULL;
3367 	switch (entry->state) {
3368 		case JOB_CONTROL_STATE_NONE:
3369 			// entry doesn't get into any list
3370 			break;
3371 		case JOB_CONTROL_STATE_DEAD:
3372 			childList = &team->parent->dead_children;
3373 			team->parent->dead_children.count++;
3374 			break;
3375 		case JOB_CONTROL_STATE_STOPPED:
3376 			childList = &team->parent->stopped_children;
3377 			break;
3378 		case JOB_CONTROL_STATE_CONTINUED:
3379 			childList = &team->parent->continued_children;
3380 			break;
3381 	}
3382 
3383 	if (childList != NULL) {
3384 		childList->entries.Add(entry);
3385 		team->parent->dead_children.condition_variable.NotifyAll();
3386 	}
3387 }
3388 
3389 
3390 /*!	Inits the given team's exit information, if not yet initialized, to some
3391 	generic "killed" status.
3392 	The caller must not hold the team's lock. Interrupts must be enabled.
3393 
3394 	\param team The team whose exit info shall be initialized.
3395 */
3396 void
3397 team_init_exit_info_on_error(Team* team)
3398 {
3399 	TeamLocker teamLocker(team);
3400 
3401 	if (!team->exit.initialized) {
3402 		team->exit.reason = CLD_KILLED;
3403 		team->exit.signal = SIGKILL;
3404 		team->exit.signaling_user = geteuid();
3405 		team->exit.status = 0;
3406 		team->exit.initialized = true;
3407 	}
3408 }
3409 
3410 
3411 /*! Adds a hook to the team that is called as soon as this team goes away.
3412 	This call might get public in the future.
3413 */
3414 status_t
3415 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3416 {
3417 	if (hook == NULL || teamID < B_OK)
3418 		return B_BAD_VALUE;
3419 
3420 	// create the watcher object
3421 	team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3422 	if (watcher == NULL)
3423 		return B_NO_MEMORY;
3424 
3425 	watcher->hook = hook;
3426 	watcher->data = data;
3427 
3428 	// add watcher, if the team isn't already dying
3429 	// get the team
3430 	Team* team = Team::GetAndLock(teamID);
3431 	if (team == NULL) {
3432 		free(watcher);
3433 		return B_BAD_TEAM_ID;
3434 	}
3435 
3436 	list_add_item(&team->watcher_list, watcher);
3437 
3438 	team->UnlockAndReleaseReference();
3439 
3440 	return B_OK;
3441 }
3442 
3443 
3444 status_t
3445 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3446 {
3447 	if (hook == NULL || teamID < 0)
3448 		return B_BAD_VALUE;
3449 
3450 	// get team and remove watcher (if present)
3451 	Team* team = Team::GetAndLock(teamID);
3452 	if (team == NULL)
3453 		return B_BAD_TEAM_ID;
3454 
3455 	// search for watcher
3456 	team_watcher* watcher = NULL;
3457 	while ((watcher = (team_watcher*)list_get_next_item(
3458 			&team->watcher_list, watcher)) != NULL) {
3459 		if (watcher->hook == hook && watcher->data == data) {
3460 			// got it!
3461 			list_remove_item(&team->watcher_list, watcher);
3462 			break;
3463 		}
3464 	}
3465 
3466 	team->UnlockAndReleaseReference();
3467 
3468 	if (watcher == NULL)
3469 		return B_ENTRY_NOT_FOUND;
3470 
3471 	free(watcher);
3472 	return B_OK;
3473 }
3474 
3475 
3476 /*!	Allocates a user_thread structure from the team.
3477 	The team lock must be held, unless the function is called for the team's
3478 	main thread. Interrupts must be enabled.
3479 */
3480 struct user_thread*
3481 team_allocate_user_thread(Team* team)
3482 {
3483 	if (team->user_data == 0)
3484 		return NULL;
3485 
3486 	// take an entry from the free list, if any
3487 	if (struct free_user_thread* entry = team->free_user_threads) {
3488 		user_thread* thread = entry->thread;
3489 		team->free_user_threads = entry->next;
3490 		free(entry);
3491 		return thread;
3492 	}
3493 
3494 	while (true) {
3495 		// enough space left?
3496 		size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3497 		if (team->user_data_size - team->used_user_data < needed) {
3498 			// try to resize the area
3499 			if (resize_area(team->user_data_area,
3500 					team->user_data_size + B_PAGE_SIZE) != B_OK) {
3501 				return NULL;
3502 			}
3503 
3504 			// resized user area successfully -- try to allocate the user_thread
3505 			// again
3506 			team->user_data_size += B_PAGE_SIZE;
3507 			continue;
3508 		}
3509 
3510 		// allocate the user_thread
3511 		user_thread* thread
3512 			= (user_thread*)(team->user_data + team->used_user_data);
3513 		team->used_user_data += needed;
3514 
3515 		return thread;
3516 	}
3517 }
3518 
3519 
3520 /*!	Frees the given user_thread structure.
3521 	The team's lock must not be held. Interrupts must be enabled.
3522 	\param team The team the user thread was allocated from.
3523 	\param userThread The user thread to free.
3524 */
3525 void
3526 team_free_user_thread(Team* team, struct user_thread* userThread)
3527 {
3528 	if (userThread == NULL)
3529 		return;
3530 
3531 	// create a free list entry
3532 	free_user_thread* entry
3533 		= (free_user_thread*)malloc(sizeof(free_user_thread));
3534 	if (entry == NULL) {
3535 		// we have to leak the user thread :-/
3536 		return;
3537 	}
3538 
3539 	// add to free list
3540 	TeamLocker teamLocker(team);
3541 
3542 	entry->thread = userThread;
3543 	entry->next = team->free_user_threads;
3544 	team->free_user_threads = entry;
3545 }
3546 
3547 
3548 //	#pragma mark - Associated data interface
3549 
3550 
3551 AssociatedData::AssociatedData()
3552 	:
3553 	fOwner(NULL)
3554 {
3555 }
3556 
3557 
3558 AssociatedData::~AssociatedData()
3559 {
3560 }
3561 
3562 
3563 void
3564 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3565 {
3566 }
3567 
3568 
3569 AssociatedDataOwner::AssociatedDataOwner()
3570 {
3571 	mutex_init(&fLock, "associated data owner");
3572 }
3573 
3574 
3575 AssociatedDataOwner::~AssociatedDataOwner()
3576 {
3577 	mutex_destroy(&fLock);
3578 }
3579 
3580 
3581 bool
3582 AssociatedDataOwner::AddData(AssociatedData* data)
3583 {
3584 	MutexLocker locker(fLock);
3585 
3586 	if (data->Owner() != NULL)
3587 		return false;
3588 
3589 	data->AcquireReference();
3590 	fList.Add(data);
3591 	data->SetOwner(this);
3592 
3593 	return true;
3594 }
3595 
3596 
3597 bool
3598 AssociatedDataOwner::RemoveData(AssociatedData* data)
3599 {
3600 	MutexLocker locker(fLock);
3601 
3602 	if (data->Owner() != this)
3603 		return false;
3604 
3605 	data->SetOwner(NULL);
3606 	fList.Remove(data);
3607 
3608 	locker.Unlock();
3609 
3610 	data->ReleaseReference();
3611 
3612 	return true;
3613 }
3614 
3615 
3616 void
3617 AssociatedDataOwner::PrepareForDeletion()
3618 {
3619 	MutexLocker locker(fLock);
3620 
3621 	// move all data to a temporary list and unset the owner
3622 	DataList list;
3623 	list.MoveFrom(&fList);
3624 
3625 	for (DataList::Iterator it = list.GetIterator();
3626 		AssociatedData* data = it.Next();) {
3627 		data->SetOwner(NULL);
3628 	}
3629 
3630 	locker.Unlock();
3631 
3632 	// call the notification hooks and release our references
3633 	while (AssociatedData* data = list.RemoveHead()) {
3634 		data->OwnerDeleted(this);
3635 		data->ReleaseReference();
3636 	}
3637 }
3638 
3639 
3640 /*!	Associates data with the current team.
3641 	When the team is deleted, the data object is notified.
3642 	The team acquires a reference to the object.
3643 
3644 	\param data The data object.
3645 	\return \c true on success, \c false otherwise. Fails only when the supplied
3646 		data object is already associated with another owner.
3647 */
3648 bool
3649 team_associate_data(AssociatedData* data)
3650 {
3651 	return thread_get_current_thread()->team->AddData(data);
3652 }
3653 
3654 
3655 /*!	Dissociates data from the current team.
3656 	Balances an earlier call to team_associate_data().
3657 
3658 	\param data The data object.
3659 	\return \c true on success, \c false otherwise. Fails only when the data
3660 		object is not associated with the current team.
3661 */
3662 bool
3663 team_dissociate_data(AssociatedData* data)
3664 {
3665 	return thread_get_current_thread()->team->RemoveData(data);
3666 }
3667 
3668 
3669 //	#pragma mark - Public kernel API
3670 
3671 
3672 thread_id
3673 load_image(int32 argCount, const char** args, const char** env)
3674 {
3675 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3676 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3677 }
3678 
3679 
3680 thread_id
3681 load_image_etc(int32 argCount, const char* const* args,
3682 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
3683 {
3684 	// we need to flatten the args and environment
3685 
3686 	if (args == NULL)
3687 		return B_BAD_VALUE;
3688 
3689 	// determine total needed size
3690 	int32 argSize = 0;
3691 	for (int32 i = 0; i < argCount; i++)
3692 		argSize += strlen(args[i]) + 1;
3693 
3694 	int32 envCount = 0;
3695 	int32 envSize = 0;
3696 	while (env != NULL && env[envCount] != NULL)
3697 		envSize += strlen(env[envCount++]) + 1;
3698 
3699 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3700 	if (size > MAX_PROCESS_ARGS_SIZE)
3701 		return B_TOO_MANY_ARGS;
3702 
3703 	// allocate space
3704 	char** flatArgs = (char**)malloc(size);
3705 	if (flatArgs == NULL)
3706 		return B_NO_MEMORY;
3707 
3708 	char** slot = flatArgs;
3709 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3710 
3711 	// copy arguments and environment
3712 	for (int32 i = 0; i < argCount; i++) {
3713 		int32 argSize = strlen(args[i]) + 1;
3714 		memcpy(stringSpace, args[i], argSize);
3715 		*slot++ = stringSpace;
3716 		stringSpace += argSize;
3717 	}
3718 
3719 	*slot++ = NULL;
3720 
3721 	for (int32 i = 0; i < envCount; i++) {
3722 		int32 envSize = strlen(env[i]) + 1;
3723 		memcpy(stringSpace, env[i], envSize);
3724 		*slot++ = stringSpace;
3725 		stringSpace += envSize;
3726 	}
3727 
3728 	*slot++ = NULL;
3729 
3730 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3731 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3732 
3733 	free(flatArgs);
3734 		// load_image_internal() unset our variable if it took over ownership
3735 
3736 	return thread;
3737 }
3738 
3739 
3740 status_t
3741 wait_for_team(team_id id, status_t* _returnCode)
3742 {
3743 	// check whether the team exists
3744 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3745 
3746 	Team* team = team_get_team_struct_locked(id);
3747 	if (team == NULL)
3748 		return B_BAD_TEAM_ID;
3749 
3750 	id = team->id;
3751 
3752 	teamsLocker.Unlock();
3753 
3754 	// wait for the main thread (it has the same ID as the team)
3755 	return wait_for_thread(id, _returnCode);
3756 }
3757 
3758 
3759 status_t
3760 kill_team(team_id id)
3761 {
3762 	InterruptsSpinLocker teamsLocker(sTeamHashLock);
3763 
3764 	Team* team = team_get_team_struct_locked(id);
3765 	if (team == NULL)
3766 		return B_BAD_TEAM_ID;
3767 
3768 	id = team->id;
3769 
3770 	teamsLocker.Unlock();
3771 
3772 	if (team == sKernelTeam)
3773 		return B_NOT_ALLOWED;
3774 
3775 	// Just kill the team's main thread (it has same ID as the team). The
3776 	// cleanup code there will take care of the team.
3777 	return kill_thread(id);
3778 }
3779 
3780 
3781 status_t
3782 _get_team_info(team_id id, team_info* info, size_t size)
3783 {
3784 	// get the team
3785 	Team* team = Team::Get(id);
3786 	if (team == NULL)
3787 		return B_BAD_TEAM_ID;
3788 	BReference<Team> teamReference(team, true);
3789 
3790 	// fill in the info
3791 	return fill_team_info(team, info, size);
3792 }
3793 
3794 
3795 status_t
3796 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3797 {
3798 	int32 slot = *cookie;
3799 	if (slot < 1)
3800 		slot = 1;
3801 
3802 	InterruptsSpinLocker locker(sTeamHashLock);
3803 
3804 	team_id lastTeamID = peek_next_thread_id();
3805 		// TODO: This is broken, since the id can wrap around!
3806 
3807 	// get next valid team
3808 	Team* team = NULL;
3809 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3810 		slot++;
3811 
3812 	if (team == NULL)
3813 		return B_BAD_TEAM_ID;
3814 
3815 	// get a reference to the team and unlock
3816 	BReference<Team> teamReference(team);
3817 	locker.Unlock();
3818 
3819 	// fill in the info
3820 	*cookie = ++slot;
3821 	return fill_team_info(team, info, size);
3822 }
3823 
3824 
3825 status_t
3826 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3827 {
3828 	if (size != sizeof(team_usage_info))
3829 		return B_BAD_VALUE;
3830 
3831 	return common_get_team_usage_info(id, who, info, 0);
3832 }
3833 
3834 
3835 pid_t
3836 getpid(void)
3837 {
3838 	return thread_get_current_thread()->team->id;
3839 }
3840 
3841 
3842 pid_t
3843 getppid(void)
3844 {
3845 	Team* team = thread_get_current_thread()->team;
3846 
3847 	TeamLocker teamLocker(team);
3848 
3849 	return team->parent->id;
3850 }
3851 
3852 
3853 pid_t
3854 getpgid(pid_t id)
3855 {
3856 	if (id < 0) {
3857 		errno = EINVAL;
3858 		return -1;
3859 	}
3860 
3861 	if (id == 0) {
3862 		// get process group of the calling process
3863 		Team* team = thread_get_current_thread()->team;
3864 		TeamLocker teamLocker(team);
3865 		return team->group_id;
3866 	}
3867 
3868 	// get the team
3869 	Team* team = Team::GetAndLock(id);
3870 	if (team == NULL) {
3871 		errno = ESRCH;
3872 		return -1;
3873 	}
3874 
3875 	// get the team's process group ID
3876 	pid_t groupID = team->group_id;
3877 
3878 	team->UnlockAndReleaseReference();
3879 
3880 	return groupID;
3881 }
3882 
3883 
3884 pid_t
3885 getsid(pid_t id)
3886 {
3887 	if (id < 0) {
3888 		errno = EINVAL;
3889 		return -1;
3890 	}
3891 
3892 	if (id == 0) {
3893 		// get session of the calling process
3894 		Team* team = thread_get_current_thread()->team;
3895 		TeamLocker teamLocker(team);
3896 		return team->session_id;
3897 	}
3898 
3899 	// get the team
3900 	Team* team = Team::GetAndLock(id);
3901 	if (team == NULL) {
3902 		errno = ESRCH;
3903 		return -1;
3904 	}
3905 
3906 	// get the team's session ID
3907 	pid_t sessionID = team->session_id;
3908 
3909 	team->UnlockAndReleaseReference();
3910 
3911 	return sessionID;
3912 }
3913 
3914 
3915 //	#pragma mark - User syscalls
3916 
3917 
3918 status_t
3919 _user_exec(const char* userPath, const char* const* userFlatArgs,
3920 	size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
3921 {
3922 	// NOTE: Since this function normally doesn't return, don't use automatic
3923 	// variables that need destruction in the function scope.
3924 	char path[B_PATH_NAME_LENGTH];
3925 
3926 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3927 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3928 		return B_BAD_ADDRESS;
3929 
3930 	// copy and relocate the flat arguments
3931 	char** flatArgs;
3932 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3933 		argCount, envCount, flatArgs);
3934 
3935 	if (error == B_OK) {
3936 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3937 			envCount, umask);
3938 			// this one only returns in case of error
3939 	}
3940 
3941 	free(flatArgs);
3942 	return error;
3943 }
3944 
3945 
3946 thread_id
3947 _user_fork(void)
3948 {
3949 	return fork_team();
3950 }
3951 
3952 
3953 pid_t
3954 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
3955 	team_usage_info* usageInfo)
3956 {
3957 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
3958 		return B_BAD_ADDRESS;
3959 	if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
3960 		return B_BAD_ADDRESS;
3961 
3962 	siginfo_t info;
3963 	team_usage_info usage_info;
3964 	pid_t foundChild = wait_for_child(child, flags, info, usage_info);
3965 	if (foundChild < 0)
3966 		return syscall_restart_handle_post(foundChild);
3967 
3968 	// copy info back to userland
3969 	if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
3970 		return B_BAD_ADDRESS;
3971 	// copy usage_info back to userland
3972 	if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
3973 		sizeof(usage_info)) != B_OK) {
3974 		return B_BAD_ADDRESS;
3975 	}
3976 
3977 	return foundChild;
3978 }
3979 
3980 
3981 pid_t
3982 _user_process_info(pid_t process, int32 which)
3983 {
3984 	// we only allow to return the parent of the current process
3985 	if (which == PARENT_ID
3986 		&& process != 0 && process != thread_get_current_thread()->team->id)
3987 		return B_BAD_VALUE;
3988 
3989 	pid_t result;
3990 	switch (which) {
3991 		case SESSION_ID:
3992 			result = getsid(process);
3993 			break;
3994 		case GROUP_ID:
3995 			result = getpgid(process);
3996 			break;
3997 		case PARENT_ID:
3998 			result = getppid();
3999 			break;
4000 		default:
4001 			return B_BAD_VALUE;
4002 	}
4003 
4004 	return result >= 0 ? result : errno;
4005 }
4006 
4007 
4008 pid_t
4009 _user_setpgid(pid_t processID, pid_t groupID)
4010 {
4011 	// setpgid() can be called either by the parent of the target process or
4012 	// by the process itself to do one of two things:
4013 	// * Create a new process group with the target process' ID and the target
4014 	//   process as group leader.
4015 	// * Set the target process' process group to an already existing one in the
4016 	//   same session.
4017 
4018 	if (groupID < 0)
4019 		return B_BAD_VALUE;
4020 
4021 	Team* currentTeam = thread_get_current_thread()->team;
4022 	if (processID == 0)
4023 		processID = currentTeam->id;
4024 
4025 	// if the group ID is not specified, use the target process' ID
4026 	if (groupID == 0)
4027 		groupID = processID;
4028 
4029 	// We loop when running into the following race condition: We create a new
4030 	// process group, because there isn't one with that ID yet, but later when
4031 	// trying to publish it, we find that someone else created and published
4032 	// a group with that ID in the meantime. In that case we just restart the
4033 	// whole action.
4034 	while (true) {
4035 		// Look up the process group by ID. If it doesn't exist yet and we are
4036 		// allowed to create a new one, do that.
4037 		ProcessGroup* group = ProcessGroup::Get(groupID);
4038 		bool newGroup = false;
4039 		if (group == NULL) {
4040 			if (groupID != processID)
4041 				return B_NOT_ALLOWED;
4042 
4043 			group = new(std::nothrow) ProcessGroup(groupID);
4044 			if (group == NULL)
4045 				return B_NO_MEMORY;
4046 
4047 			newGroup = true;
4048 		}
4049 		BReference<ProcessGroup> groupReference(group, true);
4050 
4051 		// get the target team
4052 		Team* team = Team::Get(processID);
4053 		if (team == NULL)
4054 			return ESRCH;
4055 		BReference<Team> teamReference(team, true);
4056 
4057 		// lock the new process group and the team's current process group
4058 		while (true) {
4059 			// lock the team's current process group
4060 			team->LockProcessGroup();
4061 
4062 			ProcessGroup* oldGroup = team->group;
4063 			if (oldGroup == group) {
4064 				// it's the same as the target group, so just bail out
4065 				oldGroup->Unlock();
4066 				return group->id;
4067 			}
4068 
4069 			oldGroup->AcquireReference();
4070 
4071 			// lock the target process group, if locking order allows it
4072 			if (newGroup || group->id > oldGroup->id) {
4073 				group->Lock();
4074 				break;
4075 			}
4076 
4077 			// try to lock
4078 			if (group->TryLock())
4079 				break;
4080 
4081 			// no dice -- unlock the team's current process group and relock in
4082 			// the correct order
4083 			oldGroup->Unlock();
4084 
4085 			group->Lock();
4086 			oldGroup->Lock();
4087 
4088 			// check whether things are still the same
4089 			TeamLocker teamLocker(team);
4090 			if (team->group == oldGroup)
4091 				break;
4092 
4093 			// something changed -- unlock everything and retry
4094 			teamLocker.Unlock();
4095 			oldGroup->Unlock();
4096 			group->Unlock();
4097 			oldGroup->ReleaseReference();
4098 		}
4099 
4100 		// we now have references and locks of both new and old process group
4101 		BReference<ProcessGroup> oldGroupReference(team->group, true);
4102 		AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4103 		AutoLocker<ProcessGroup> groupLocker(group, true);
4104 
4105 		// also lock the target team and its parent
4106 		team->LockTeamAndParent(false);
4107 		TeamLocker parentLocker(team->parent, true);
4108 		TeamLocker teamLocker(team, true);
4109 
4110 		// perform the checks
4111 		if (team == currentTeam) {
4112 			// we set our own group
4113 
4114 			// we must not change our process group ID if we're a session leader
4115 			if (is_session_leader(currentTeam))
4116 				return B_NOT_ALLOWED;
4117 		} else {
4118 			// Calling team != target team. The target team must be a child of
4119 			// the calling team and in the same session. (If that's the case it
4120 			// isn't a session leader either.)
4121 			if (team->parent != currentTeam
4122 				|| team->session_id != currentTeam->session_id) {
4123 				return B_NOT_ALLOWED;
4124 			}
4125 
4126 			// The call is also supposed to fail on a child, when the child has
4127 			// already executed exec*() [EACCES].
4128 			if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4129 				return EACCES;
4130 		}
4131 
4132 		// If we created a new process group, publish it now.
4133 		if (newGroup) {
4134 			InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4135 			if (sGroupHash.Lookup(groupID)) {
4136 				// A group with the group ID appeared since we first checked.
4137 				// Back to square one.
4138 				continue;
4139 			}
4140 
4141 			group->PublishLocked(team->group->Session());
4142 		} else if (group->Session()->id != team->session_id) {
4143 			// The existing target process group belongs to a different session.
4144 			// That's not allowed.
4145 			return B_NOT_ALLOWED;
4146 		}
4147 
4148 		// Everything is ready -- set the group.
4149 		remove_team_from_group(team);
4150 		insert_team_into_group(group, team);
4151 
4152 		// Changing the process group might have changed the situation for a
4153 		// parent waiting in wait_for_child(). Hence we notify it.
4154 		team->parent->dead_children.condition_variable.NotifyAll();
4155 
4156 		return group->id;
4157 	}
4158 }
4159 
4160 
4161 pid_t
4162 _user_setsid(void)
4163 {
4164 	Team* team = thread_get_current_thread()->team;
4165 
4166 	// create a new process group and session
4167 	ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4168 	if (group == NULL)
4169 		return B_NO_MEMORY;
4170 	BReference<ProcessGroup> groupReference(group, true);
4171 	AutoLocker<ProcessGroup> groupLocker(group);
4172 
4173 	ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4174 	if (session == NULL)
4175 		return B_NO_MEMORY;
4176 	BReference<ProcessSession> sessionReference(session, true);
4177 
4178 	// lock the team's current process group, parent, and the team itself
4179 	team->LockTeamParentAndProcessGroup();
4180 	BReference<ProcessGroup> oldGroupReference(team->group);
4181 	AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4182 	TeamLocker parentLocker(team->parent, true);
4183 	TeamLocker teamLocker(team, true);
4184 
4185 	// the team must not already be a process group leader
4186 	if (is_process_group_leader(team))
4187 		return B_NOT_ALLOWED;
4188 
4189 	// remove the team from the old and add it to the new process group
4190 	remove_team_from_group(team);
4191 	group->Publish(session);
4192 	insert_team_into_group(group, team);
4193 
4194 	// Changing the process group might have changed the situation for a
4195 	// parent waiting in wait_for_child(). Hence we notify it.
4196 	team->parent->dead_children.condition_variable.NotifyAll();
4197 
4198 	return group->id;
4199 }
4200 
4201 
4202 status_t
4203 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4204 {
4205 	status_t returnCode;
4206 	status_t status;
4207 
4208 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4209 		return B_BAD_ADDRESS;
4210 
4211 	status = wait_for_team(id, &returnCode);
4212 	if (status >= B_OK && _userReturnCode != NULL) {
4213 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4214 				!= B_OK)
4215 			return B_BAD_ADDRESS;
4216 		return B_OK;
4217 	}
4218 
4219 	return syscall_restart_handle_post(status);
4220 }
4221 
4222 
4223 thread_id
4224 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4225 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
4226 	port_id errorPort, uint32 errorToken)
4227 {
4228 	TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4229 
4230 	if (argCount < 1)
4231 		return B_BAD_VALUE;
4232 
4233 	// copy and relocate the flat arguments
4234 	char** flatArgs;
4235 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4236 		argCount, envCount, flatArgs);
4237 	if (error != B_OK)
4238 		return error;
4239 
4240 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4241 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4242 		errorToken);
4243 
4244 	free(flatArgs);
4245 		// load_image_internal() unset our variable if it took over ownership
4246 
4247 	return thread;
4248 }
4249 
4250 
4251 void
4252 _user_exit_team(status_t returnValue)
4253 {
4254 	Thread* thread = thread_get_current_thread();
4255 	Team* team = thread->team;
4256 
4257 	// set this thread's exit status
4258 	thread->exit.status = returnValue;
4259 
4260 	// set the team exit status
4261 	TeamLocker teamLocker(team);
4262 
4263 	if (!team->exit.initialized) {
4264 		team->exit.reason = CLD_EXITED;
4265 		team->exit.signal = 0;
4266 		team->exit.signaling_user = 0;
4267 		team->exit.status = returnValue;
4268 		team->exit.initialized = true;
4269 	}
4270 
4271 	teamLocker.Unlock();
4272 
4273 	// Stop the thread, if the team is being debugged and that has been
4274 	// requested.
4275 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4276 		user_debug_stop_thread();
4277 
4278 	// Send this thread a SIGKILL. This makes sure the thread will not return to
4279 	// userland. The signal handling code forwards the signal to the main
4280 	// thread (if that's not already this one), which will take the team down.
4281 	Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4282 	send_signal_to_thread(thread, signal, 0);
4283 }
4284 
4285 
4286 status_t
4287 _user_kill_team(team_id team)
4288 {
4289 	return kill_team(team);
4290 }
4291 
4292 
4293 status_t
4294 _user_get_team_info(team_id id, team_info* userInfo)
4295 {
4296 	status_t status;
4297 	team_info info;
4298 
4299 	if (!IS_USER_ADDRESS(userInfo))
4300 		return B_BAD_ADDRESS;
4301 
4302 	status = _get_team_info(id, &info, sizeof(team_info));
4303 	if (status == B_OK) {
4304 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4305 			return B_BAD_ADDRESS;
4306 	}
4307 
4308 	return status;
4309 }
4310 
4311 
4312 status_t
4313 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
4314 {
4315 	status_t status;
4316 	team_info info;
4317 	int32 cookie;
4318 
4319 	if (!IS_USER_ADDRESS(userCookie)
4320 		|| !IS_USER_ADDRESS(userInfo)
4321 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4322 		return B_BAD_ADDRESS;
4323 
4324 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
4325 	if (status != B_OK)
4326 		return status;
4327 
4328 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4329 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
4330 		return B_BAD_ADDRESS;
4331 
4332 	return status;
4333 }
4334 
4335 
4336 team_id
4337 _user_get_current_team(void)
4338 {
4339 	return team_get_current_team_id();
4340 }
4341 
4342 
4343 status_t
4344 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4345 	size_t size)
4346 {
4347 	if (size != sizeof(team_usage_info))
4348 		return B_BAD_VALUE;
4349 
4350 	team_usage_info info;
4351 	status_t status = common_get_team_usage_info(team, who, &info,
4352 		B_CHECK_PERMISSION);
4353 
4354 	if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4355 		|| user_memcpy(userInfo, &info, size) != B_OK) {
4356 		return B_BAD_ADDRESS;
4357 	}
4358 
4359 	return status;
4360 }
4361 
4362 
4363 status_t
4364 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4365 	size_t size, size_t* _sizeNeeded)
4366 {
4367 	// check parameters
4368 	if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4369 		|| (buffer == NULL && size > 0)
4370 		|| _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4371 		return B_BAD_ADDRESS;
4372 	}
4373 
4374 	KMessage info;
4375 
4376 	if ((flags & B_TEAM_INFO_BASIC) != 0) {
4377 		// allocate memory for a copy of the needed team data
4378 		struct ExtendedTeamData {
4379 			team_id	id;
4380 			pid_t	group_id;
4381 			pid_t	session_id;
4382 			uid_t	real_uid;
4383 			gid_t	real_gid;
4384 			uid_t	effective_uid;
4385 			gid_t	effective_gid;
4386 			char	name[B_OS_NAME_LENGTH];
4387 		};
4388 
4389 		ExtendedTeamData* teamClone
4390 			= (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData));
4391 			// It would be nicer to use new, but then we'd have to use
4392 			// ObjectDeleter and declare the structure outside of the function
4393 			// due to template parameter restrictions.
4394 		if (teamClone == NULL)
4395 			return B_NO_MEMORY;
4396 		MemoryDeleter teamCloneDeleter(teamClone);
4397 
4398 		io_context* ioContext;
4399 		{
4400 			// get the team structure
4401 			Team* team = Team::GetAndLock(teamID);
4402 			if (team == NULL)
4403 				return B_BAD_TEAM_ID;
4404 			BReference<Team> teamReference(team, true);
4405 			TeamLocker teamLocker(team, true);
4406 
4407 			// copy the data
4408 			teamClone->id = team->id;
4409 			strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name));
4410 			teamClone->group_id = team->group_id;
4411 			teamClone->session_id = team->session_id;
4412 			teamClone->real_uid = team->real_uid;
4413 			teamClone->real_gid = team->real_gid;
4414 			teamClone->effective_uid = team->effective_uid;
4415 			teamClone->effective_gid = team->effective_gid;
4416 
4417 			// also fetch a reference to the I/O context
4418 			ioContext = team->io_context;
4419 			vfs_get_io_context(ioContext);
4420 		}
4421 		CObjectDeleter<io_context> ioContextPutter(ioContext,
4422 			&vfs_put_io_context);
4423 
4424 		// add the basic data to the info message
4425 		if (info.AddInt32("id", teamClone->id) != B_OK
4426 			|| info.AddString("name", teamClone->name) != B_OK
4427 			|| info.AddInt32("process group", teamClone->group_id) != B_OK
4428 			|| info.AddInt32("session", teamClone->session_id) != B_OK
4429 			|| info.AddInt32("uid", teamClone->real_uid) != B_OK
4430 			|| info.AddInt32("gid", teamClone->real_gid) != B_OK
4431 			|| info.AddInt32("euid", teamClone->effective_uid) != B_OK
4432 			|| info.AddInt32("egid", teamClone->effective_gid) != B_OK) {
4433 			return B_NO_MEMORY;
4434 		}
4435 
4436 		// get the current working directory from the I/O context
4437 		dev_t cwdDevice;
4438 		ino_t cwdDirectory;
4439 		{
4440 			MutexLocker ioContextLocker(ioContext->io_mutex);
4441 			vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4442 		}
4443 
4444 		if (info.AddInt32("cwd device", cwdDevice) != B_OK
4445 			|| info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4446 			return B_NO_MEMORY;
4447 		}
4448 	}
4449 
4450 	// TODO: Support the other flags!
4451 
4452 	// copy the needed size and, if it fits, the message back to userland
4453 	size_t sizeNeeded = info.ContentSize();
4454 	if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4455 		return B_BAD_ADDRESS;
4456 
4457 	if (sizeNeeded > size)
4458 		return B_BUFFER_OVERFLOW;
4459 
4460 	if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4461 		return B_BAD_ADDRESS;
4462 
4463 	return B_OK;
4464 }
4465