xref: /haiku/src/system/kernel/team.cpp (revision 37c7d5d83a2372a6971e383411d5bacbeef0ebdc)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <sys/wait.h>
20 
21 #include <OS.h>
22 
23 #include <AutoDeleter.h>
24 #include <FindDirectory.h>
25 
26 #include <boot_device.h>
27 #include <elf.h>
28 #include <file_cache.h>
29 #include <fs/KPath.h>
30 #include <heap.h>
31 #include <int.h>
32 #include <kernel.h>
33 #include <kimage.h>
34 #include <kscheduler.h>
35 #include <ksignal.h>
36 #include <Notifications.h>
37 #include <port.h>
38 #include <posix/realtime_sem.h>
39 #include <posix/xsi_semaphore.h>
40 #include <sem.h>
41 #include <syscall_process_info.h>
42 #include <syscall_restart.h>
43 #include <syscalls.h>
44 #include <tls.h>
45 #include <tracing.h>
46 #include <user_runtime.h>
47 #include <user_thread.h>
48 #include <usergroup.h>
49 #include <vfs.h>
50 #include <vm/vm.h>
51 #include <vm/VMAddressSpace.h>
52 #include <util/AutoLock.h>
53 #include <util/khash.h>
54 
55 //#define TRACE_TEAM
56 #ifdef TRACE_TEAM
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 struct team_key {
64 	team_id id;
65 };
66 
67 struct team_arg {
68 	char	*path;
69 	char	**flat_args;
70 	size_t	flat_args_size;
71 	uint32	arg_count;
72 	uint32	env_count;
73 	port_id	error_port;
74 	uint32	error_token;
75 };
76 
77 struct fork_arg {
78 	area_id				user_stack_area;
79 	addr_t				user_stack_base;
80 	size_t				user_stack_size;
81 	addr_t				user_local_storage;
82 	sigset_t			sig_block_mask;
83 	struct sigaction	sig_action[32];
84 	addr_t				signal_stack_base;
85 	size_t				signal_stack_size;
86 	bool				signal_stack_enabled;
87 
88 	struct user_thread* user_thread;
89 
90 	struct arch_fork_arg arch_info;
91 };
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, struct team* team);
98 };
99 
100 
101 static hash_table* sTeamHash = NULL;
102 static hash_table* sGroupHash = NULL;
103 static struct team* sKernelTeam = NULL;
104 
105 // some arbitrary chosen limits - should probably depend on the available
106 // memory (the limit is not yet enforced)
107 static int32 sMaxTeams = 2048;
108 static int32 sUsedTeams = 1;
109 
110 static TeamNotificationService sNotificationService;
111 
112 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
113 
114 
115 // #pragma mark - Tracing
116 
117 
118 #if TEAM_TRACING
119 namespace TeamTracing {
120 
121 class TeamForked : public AbstractTraceEntry {
122 public:
123 	TeamForked(thread_id forkedThread)
124 		:
125 		fForkedThread(forkedThread)
126 	{
127 		Initialized();
128 	}
129 
130 	virtual void AddDump(TraceOutput& out)
131 	{
132 		out.Print("team forked, new thread %ld", fForkedThread);
133 	}
134 
135 private:
136 	thread_id			fForkedThread;
137 };
138 
139 
140 class ExecTeam : public AbstractTraceEntry {
141 public:
142 	ExecTeam(const char* path, int32 argCount, const char* const* args,
143 			int32 envCount, const char* const* env)
144 		:
145 		fArgCount(argCount),
146 		fArgs(NULL)
147 	{
148 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
149 			false);
150 
151 		// determine the buffer size we need for the args
152 		size_t argBufferSize = 0;
153 		for (int32 i = 0; i < argCount; i++)
154 			argBufferSize += strlen(args[i]) + 1;
155 
156 		// allocate a buffer
157 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
158 		if (fArgs) {
159 			char* buffer = fArgs;
160 			for (int32 i = 0; i < argCount; i++) {
161 				size_t argSize = strlen(args[i]) + 1;
162 				memcpy(buffer, args[i], argSize);
163 				buffer += argSize;
164 			}
165 		}
166 
167 		// ignore env for the time being
168 		(void)envCount;
169 		(void)env;
170 
171 		Initialized();
172 	}
173 
174 	virtual void AddDump(TraceOutput& out)
175 	{
176 		out.Print("team exec, \"%p\", args:", fPath);
177 
178 		if (fArgs != NULL) {
179 			char* args = fArgs;
180 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
181 				out.Print(" \"%s\"", args);
182 				args += strlen(args) + 1;
183 			}
184 		} else
185 			out.Print(" <too long>");
186 	}
187 
188 private:
189 	char*	fPath;
190 	int32	fArgCount;
191 	char*	fArgs;
192 };
193 
194 
195 static const char*
196 job_control_state_name(job_control_state state)
197 {
198 	switch (state) {
199 		case JOB_CONTROL_STATE_NONE:
200 			return "none";
201 		case JOB_CONTROL_STATE_STOPPED:
202 			return "stopped";
203 		case JOB_CONTROL_STATE_CONTINUED:
204 			return "continued";
205 		case JOB_CONTROL_STATE_DEAD:
206 			return "dead";
207 		default:
208 			return "invalid";
209 	}
210 }
211 
212 
213 class SetJobControlState : public AbstractTraceEntry {
214 public:
215 	SetJobControlState(team_id team, job_control_state newState, int signal)
216 		:
217 		fTeam(team),
218 		fNewState(newState),
219 		fSignal(signal)
220 	{
221 		Initialized();
222 	}
223 
224 	virtual void AddDump(TraceOutput& out)
225 	{
226 		out.Print("team set job control state, team %ld, "
227 			"new state: %s, signal: %d",
228 			fTeam, job_control_state_name(fNewState), fSignal);
229 	}
230 
231 private:
232 	team_id				fTeam;
233 	job_control_state	fNewState;
234 	int					fSignal;
235 };
236 
237 
238 class WaitForChild : public AbstractTraceEntry {
239 public:
240 	WaitForChild(pid_t child, uint32 flags)
241 		:
242 		fChild(child),
243 		fFlags(flags)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("team wait for child, child: %ld, "
251 			"flags: 0x%lx", fChild, fFlags);
252 	}
253 
254 private:
255 	pid_t	fChild;
256 	uint32	fFlags;
257 };
258 
259 
260 class WaitForChildDone : public AbstractTraceEntry {
261 public:
262 	WaitForChildDone(const job_control_entry& entry)
263 		:
264 		fState(entry.state),
265 		fTeam(entry.thread),
266 		fStatus(entry.status),
267 		fReason(entry.reason),
268 		fSignal(entry.signal)
269 	{
270 		Initialized();
271 	}
272 
273 	WaitForChildDone(status_t error)
274 		:
275 		fTeam(error)
276 	{
277 		Initialized();
278 	}
279 
280 	virtual void AddDump(TraceOutput& out)
281 	{
282 		if (fTeam >= 0) {
283 			out.Print("team wait for child done, team: %ld, "
284 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
285 				fTeam, job_control_state_name(fState), fStatus, fReason,
286 				fSignal);
287 		} else {
288 			out.Print("team wait for child failed, error: "
289 				"0x%lx, ", fTeam);
290 		}
291 	}
292 
293 private:
294 	job_control_state	fState;
295 	team_id				fTeam;
296 	status_t			fStatus;
297 	uint16				fReason;
298 	uint16				fSignal;
299 };
300 
301 }	// namespace TeamTracing
302 
303 #	define T(x) new(std::nothrow) TeamTracing::x;
304 #else
305 #	define T(x) ;
306 #endif
307 
308 
309 //	#pragma mark - TeamNotificationService
310 
311 
312 TeamNotificationService::TeamNotificationService()
313 	: DefaultNotificationService("teams")
314 {
315 }
316 
317 
318 void
319 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
320 {
321 	char eventBuffer[128];
322 	KMessage event;
323 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
324 	event.AddInt32("event", eventCode);
325 	event.AddInt32("team", team->id);
326 	event.AddPointer("teamStruct", team);
327 
328 	DefaultNotificationService::Notify(event, eventCode);
329 }
330 
331 
332 //	#pragma mark - Private functions
333 
334 
335 static void
336 _dump_team_info(struct team* team)
337 {
338 	kprintf("TEAM: %p\n", team);
339 	kprintf("id:               %ld (%#lx)\n", team->id, team->id);
340 	kprintf("name:             '%s'\n", team->name);
341 	kprintf("args:             '%s'\n", team->args);
342 	kprintf("next:             %p\n", team->next);
343 	kprintf("parent:           %p", team->parent);
344 	if (team->parent != NULL) {
345 		kprintf(" (id = %ld)\n", team->parent->id);
346 	} else
347 		kprintf("\n");
348 
349 	kprintf("children:         %p\n", team->children);
350 	kprintf("num_threads:      %d\n", team->num_threads);
351 	kprintf("state:            %d\n", team->state);
352 	kprintf("flags:            0x%lx\n", team->flags);
353 	kprintf("io_context:       %p\n", team->io_context);
354 	if (team->address_space)
355 		kprintf("address_space:    %p\n", team->address_space);
356 	kprintf("user data:        %p (area %ld)\n", (void*)team->user_data,
357 		team->user_data_area);
358 	kprintf("free user thread: %p\n", team->free_user_threads);
359 	kprintf("main_thread:      %p\n", team->main_thread);
360 	kprintf("thread_list:      %p\n", team->thread_list);
361 	kprintf("group_id:         %ld\n", team->group_id);
362 	kprintf("session_id:       %ld\n", team->session_id);
363 }
364 
365 
366 static int
367 dump_team_info(int argc, char** argv)
368 {
369 	struct hash_iterator iterator;
370 	struct team* team;
371 	team_id id = -1;
372 	bool found = false;
373 
374 	if (argc < 2) {
375 		struct thread* thread = thread_get_current_thread();
376 		if (thread != NULL && thread->team != NULL)
377 			_dump_team_info(thread->team);
378 		else
379 			kprintf("No current team!\n");
380 		return 0;
381 	}
382 
383 	id = strtoul(argv[1], NULL, 0);
384 	if (IS_KERNEL_ADDRESS(id)) {
385 		// semi-hack
386 		_dump_team_info((struct team*)id);
387 		return 0;
388 	}
389 
390 	// walk through the thread list, trying to match name or id
391 	hash_open(sTeamHash, &iterator);
392 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
393 		if ((team->name && strcmp(argv[1], team->name) == 0)
394 			|| team->id == id) {
395 			_dump_team_info(team);
396 			found = true;
397 			break;
398 		}
399 	}
400 	hash_close(sTeamHash, &iterator, false);
401 
402 	if (!found)
403 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
404 	return 0;
405 }
406 
407 
408 static int
409 dump_teams(int argc, char** argv)
410 {
411 	struct hash_iterator iterator;
412 	struct team* team;
413 
414 	kprintf("team           id  parent      name\n");
415 	hash_open(sTeamHash, &iterator);
416 
417 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
418 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
419 	}
420 
421 	hash_close(sTeamHash, &iterator, false);
422 	return 0;
423 }
424 
425 
426 static int
427 team_struct_compare(void* _p, const void* _key)
428 {
429 	struct team* p = (struct team*)_p;
430 	const struct team_key* key = (const struct team_key*)_key;
431 
432 	if (p->id == key->id)
433 		return 0;
434 
435 	return 1;
436 }
437 
438 
439 static uint32
440 team_struct_hash(void* _p, const void* _key, uint32 range)
441 {
442 	struct team* p = (struct team*)_p;
443 	const struct team_key* key = (const struct team_key*)_key;
444 
445 	if (p != NULL)
446 		return p->id % range;
447 
448 	return (uint32)key->id % range;
449 }
450 
451 
452 static int
453 process_group_compare(void* _group, const void* _key)
454 {
455 	struct process_group* group = (struct process_group*)_group;
456 	const struct team_key* key = (const struct team_key*)_key;
457 
458 	if (group->id == key->id)
459 		return 0;
460 
461 	return 1;
462 }
463 
464 
465 static uint32
466 process_group_hash(void* _group, const void* _key, uint32 range)
467 {
468 	struct process_group* group = (struct process_group*)_group;
469 	const struct team_key* key = (const struct team_key*)_key;
470 
471 	if (group != NULL)
472 		return group->id % range;
473 
474 	return (uint32)key->id % range;
475 }
476 
477 
478 static void
479 insert_team_into_parent(struct team* parent, struct team* team)
480 {
481 	ASSERT(parent != NULL);
482 
483 	team->siblings_next = parent->children;
484 	parent->children = team;
485 	team->parent = parent;
486 }
487 
488 
489 /*!	Note: must have team lock held */
490 static void
491 remove_team_from_parent(struct team* parent, struct team* team)
492 {
493 	struct team* child;
494 	struct team* last = NULL;
495 
496 	for (child = parent->children; child != NULL;
497 			child = child->siblings_next) {
498 		if (child == team) {
499 			if (last == NULL)
500 				parent->children = child->siblings_next;
501 			else
502 				last->siblings_next = child->siblings_next;
503 
504 			team->parent = NULL;
505 			break;
506 		}
507 		last = child;
508 	}
509 }
510 
511 
512 /*!	Reparent each of our children
513 	Note: must have team lock held
514 */
515 static void
516 reparent_children(struct team* team)
517 {
518 	struct team* child;
519 
520 	while ((child = team->children) != NULL) {
521 		// remove the child from the current proc and add to the parent
522 		remove_team_from_parent(team, child);
523 		insert_team_into_parent(sKernelTeam, child);
524 	}
525 
526 	// move job control entries too
527 	sKernelTeam->stopped_children->entries.MoveFrom(
528 		&team->stopped_children->entries);
529 	sKernelTeam->continued_children->entries.MoveFrom(
530 		&team->continued_children->entries);
531 
532 	// Note, we don't move the dead children entries. Those will be deleted
533 	// when the team structure is deleted.
534 }
535 
536 
537 static bool
538 is_session_leader(struct team* team)
539 {
540 	return team->session_id == team->id;
541 }
542 
543 
544 static bool
545 is_process_group_leader(struct team* team)
546 {
547 	return team->group_id == team->id;
548 }
549 
550 
551 static void
552 deferred_delete_process_group(struct process_group* group)
553 {
554 	if (group == NULL)
555 		return;
556 
557 	// remove_group_from_session() keeps this pointer around
558 	// only if the session can be freed as well
559 	if (group->session) {
560 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
561 			group->session->id));
562 		deferred_free(group->session);
563 	}
564 
565 	deferred_free(group);
566 }
567 
568 
569 /*!	Removes a group from a session, and puts the session object
570 	back into the session cache, if it's not used anymore.
571 	You must hold the team lock when calling this function.
572 */
573 static void
574 remove_group_from_session(struct process_group* group)
575 {
576 	struct process_session* session = group->session;
577 
578 	// the group must be in any session to let this function have any effect
579 	if (session == NULL)
580 		return;
581 
582 	hash_remove(sGroupHash, group);
583 
584 	// we cannot free the resource here, so we're keeping the group link
585 	// around - this way it'll be freed by free_process_group()
586 	if (--session->group_count > 0)
587 		group->session = NULL;
588 }
589 
590 
591 /*!	Team lock must be held.
592 */
593 static void
594 acquire_process_group_ref(pid_t groupID)
595 {
596 	process_group* group = team_get_process_group_locked(NULL, groupID);
597 	if (group == NULL) {
598 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
599 		return;
600 	}
601 
602 	group->refs++;
603 }
604 
605 
606 /*!	Team lock must be held.
607 */
608 static void
609 release_process_group_ref(pid_t groupID)
610 {
611 	process_group* group = team_get_process_group_locked(NULL, groupID);
612 	if (group == NULL) {
613 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
614 		return;
615 	}
616 
617 	if (group->refs <= 0) {
618 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
619 		return;
620 	}
621 
622 	if (--group->refs > 0)
623 		return;
624 
625 	// group is no longer used
626 
627 	remove_group_from_session(group);
628 	deferred_delete_process_group(group);
629 }
630 
631 
632 /*!	You must hold the team lock when calling this function. */
633 static void
634 insert_group_into_session(struct process_session* session,
635 	struct process_group* group)
636 {
637 	if (group == NULL)
638 		return;
639 
640 	group->session = session;
641 	hash_insert(sGroupHash, group);
642 	session->group_count++;
643 }
644 
645 
646 /*!	You must hold the team lock when calling this function. */
647 static void
648 insert_team_into_group(struct process_group* group, struct team* team)
649 {
650 	team->group = group;
651 	team->group_id = group->id;
652 	team->session_id = group->session->id;
653 
654 	team->group_next = group->teams;
655 	group->teams = team;
656 	acquire_process_group_ref(group->id);
657 }
658 
659 
660 /*!	Removes the team from the group.
661 
662 	\param team the team that'll be removed from it's group
663 */
664 static void
665 remove_team_from_group(struct team* team)
666 {
667 	struct process_group* group = team->group;
668 	struct team* current;
669 	struct team* last = NULL;
670 
671 	// the team must be in any team to let this function have any effect
672 	if  (group == NULL)
673 		return;
674 
675 	for (current = group->teams; current != NULL;
676 			current = current->group_next) {
677 		if (current == team) {
678 			if (last == NULL)
679 				group->teams = current->group_next;
680 			else
681 				last->group_next = current->group_next;
682 
683 			team->group = NULL;
684 			break;
685 		}
686 		last = current;
687 	}
688 
689 	team->group = NULL;
690 	team->group_next = NULL;
691 
692 	release_process_group_ref(group->id);
693 }
694 
695 
696 static struct process_group*
697 create_process_group(pid_t id)
698 {
699 	struct process_group* group
700 		= (struct process_group*)malloc(sizeof(struct process_group));
701 	if (group == NULL)
702 		return NULL;
703 
704 	group->id = id;
705 	group->refs = 0;
706 	group->session = NULL;
707 	group->teams = NULL;
708 	group->orphaned = true;
709 	return group;
710 }
711 
712 
713 static struct process_session*
714 create_process_session(pid_t id)
715 {
716 	struct process_session* session
717 		= (struct process_session*)malloc(sizeof(struct process_session));
718 	if (session == NULL)
719 		return NULL;
720 
721 	session->id = id;
722 	session->group_count = 0;
723 	session->controlling_tty = -1;
724 	session->foreground_group = -1;
725 
726 	return session;
727 }
728 
729 
730 static void
731 set_team_name(struct team* team, const char* name)
732 {
733 	if (const char* lastSlash = strrchr(name, '/'))
734 		name = lastSlash + 1;
735 
736 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
737 }
738 
739 
740 static struct team*
741 create_team_struct(const char* name, bool kernel)
742 {
743 	struct team* team = (struct team*)malloc(sizeof(struct team));
744 	if (team == NULL)
745 		return NULL;
746 	MemoryDeleter teamDeleter(team);
747 
748 	team->next = team->siblings_next = team->children = team->parent = NULL;
749 	team->id = allocate_thread_id();
750 	set_team_name(team, name);
751 	team->args[0] = '\0';
752 	team->num_threads = 0;
753 	team->io_context = NULL;
754 	team->address_space = NULL;
755 	team->realtime_sem_context = NULL;
756 	team->xsi_sem_context = NULL;
757 	team->thread_list = NULL;
758 	team->main_thread = NULL;
759 	team->loading_info = NULL;
760 	team->state = TEAM_STATE_BIRTH;
761 	team->flags = 0;
762 	team->death_entry = NULL;
763 	team->user_data_area = -1;
764 	team->user_data = 0;
765 	team->used_user_data = 0;
766 	team->user_data_size = 0;
767 	team->free_user_threads = NULL;
768 
769 	team->supplementary_groups = NULL;
770 	team->supplementary_group_count = 0;
771 
772 	team->dead_threads_kernel_time = 0;
773 	team->dead_threads_user_time = 0;
774 
775 	// dead threads
776 	list_init(&team->dead_threads);
777 	team->dead_threads_count = 0;
778 
779 	// dead children
780 	team->dead_children = new(nothrow) team_dead_children;
781 	if (team->dead_children == NULL)
782 		return NULL;
783 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
784 
785 	team->dead_children->count = 0;
786 	team->dead_children->kernel_time = 0;
787 	team->dead_children->user_time = 0;
788 
789 	// stopped children
790 	team->stopped_children = new(nothrow) team_job_control_children;
791 	if (team->stopped_children == NULL)
792 		return NULL;
793 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
794 		team->stopped_children);
795 
796 	// continued children
797 	team->continued_children = new(nothrow) team_job_control_children;
798 	if (team->continued_children == NULL)
799 		return NULL;
800 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
801 		team->continued_children);
802 
803 	// job control entry
804 	team->job_control_entry = new(nothrow) job_control_entry;
805 	if (team->job_control_entry == NULL)
806 		return NULL;
807 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
808 		team->job_control_entry);
809 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
810 	team->job_control_entry->thread = team->id;
811 	team->job_control_entry->team = team;
812 
813 	list_init(&team->sem_list);
814 	list_init(&team->port_list);
815 	list_init(&team->image_list);
816 	list_init(&team->watcher_list);
817 
818 	clear_team_debug_info(&team->debug_info, true);
819 
820 	if (arch_team_init_team_struct(team, kernel) < 0)
821 		return NULL;
822 
823 	// publish dead/stopped/continued children condition vars
824 	team->dead_children->condition_variable.Init(team->dead_children,
825 		"team children");
826 
827 	// keep all allocated structures
828 	jobControlEntryDeleter.Detach();
829 	continuedChildrenDeleter.Detach();
830 	stoppedChildrenDeleter.Detach();
831 	deadChildrenDeleter.Detach();
832 	teamDeleter.Detach();
833 
834 	return team;
835 }
836 
837 
838 static void
839 delete_team_struct(struct team* team)
840 {
841 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
842 			&team->dead_threads)) {
843 		free(threadDeathEntry);
844 	}
845 
846 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
847 		delete entry;
848 
849 	while (free_user_thread* entry = team->free_user_threads) {
850 		team->free_user_threads = entry->next;
851 		free(entry);
852 	}
853 
854 	malloc_referenced_release(team->supplementary_groups);
855 
856 	delete team->job_control_entry;
857 		// usually already NULL and transferred to the parent
858 	delete team->continued_children;
859 	delete team->stopped_children;
860 	delete team->dead_children;
861 	free(team);
862 }
863 
864 
865 static status_t
866 create_team_user_data(struct team* team)
867 {
868 	void* address = (void*)KERNEL_USER_DATA_BASE;
869 	size_t size = 4 * B_PAGE_SIZE;
870 	team->user_data_area = create_area_etc(team->id, "user area", &address,
871 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
872 	if (team->user_data_area < 0)
873 		return team->user_data_area;
874 
875 	team->user_data = (addr_t)address;
876 	team->used_user_data = 0;
877 	team->user_data_size = size;
878 	team->free_user_threads = NULL;
879 
880 	return B_OK;
881 }
882 
883 
884 static void
885 delete_team_user_data(struct team* team)
886 {
887 	if (team->user_data_area >= 0) {
888 		vm_delete_area(team->id, team->user_data_area, true);
889 		team->user_data = 0;
890 		team->used_user_data = 0;
891 		team->user_data_size = 0;
892 		team->user_data_area = -1;
893 		while (free_user_thread* entry = team->free_user_threads) {
894 			team->free_user_threads = entry->next;
895 			free(entry);
896 		}
897 	}
898 }
899 
900 
901 static status_t
902 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
903 	int32 argCount, int32 envCount, char**& _flatArgs)
904 {
905 	if (argCount < 0 || envCount < 0)
906 		return B_BAD_VALUE;
907 
908 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
909 		return B_TOO_MANY_ARGS;
910 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
911 		return B_BAD_VALUE;
912 
913 	if (!IS_USER_ADDRESS(userFlatArgs))
914 		return B_BAD_ADDRESS;
915 
916 	// allocate kernel memory
917 	char** flatArgs = (char**)malloc(flatArgsSize);
918 	if (flatArgs == NULL)
919 		return B_NO_MEMORY;
920 
921 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
922 		free(flatArgs);
923 		return B_BAD_ADDRESS;
924 	}
925 
926 	// check and relocate the array
927 	status_t error = B_OK;
928 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
929 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
930 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
931 		if (i == argCount || i == argCount + envCount + 1) {
932 			// check array null termination
933 			if (flatArgs[i] != NULL) {
934 				error = B_BAD_VALUE;
935 				break;
936 			}
937 		} else {
938 			// check string
939 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
940 			size_t maxLen = stringEnd - arg;
941 			if (arg < stringBase || arg >= stringEnd
942 					|| strnlen(arg, maxLen) == maxLen) {
943 				error = B_BAD_VALUE;
944 				break;
945 			}
946 
947 			flatArgs[i] = arg;
948 		}
949 	}
950 
951 	if (error == B_OK)
952 		_flatArgs = flatArgs;
953 	else
954 		free(flatArgs);
955 
956 	return error;
957 }
958 
959 
960 static void
961 free_team_arg(struct team_arg* teamArg)
962 {
963 	if (teamArg != NULL) {
964 		free(teamArg->flat_args);
965 		free(teamArg->path);
966 		free(teamArg);
967 	}
968 }
969 
970 
971 static status_t
972 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
973 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
974 	uint32 token)
975 {
976 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
977 	if (teamArg == NULL)
978 		return B_NO_MEMORY;
979 
980 	teamArg->path = strdup(path);
981 	if (teamArg->path == NULL) {
982 		free(teamArg);
983 		return B_NO_MEMORY;
984 	}
985 
986 	// copy the args over
987 
988 	teamArg->flat_args = flatArgs;
989 	teamArg->flat_args_size = flatArgsSize;
990 	teamArg->arg_count = argCount;
991 	teamArg->env_count = envCount;
992 	teamArg->error_port = port;
993 	teamArg->error_token = token;
994 
995 	*_teamArg = teamArg;
996 	return B_OK;
997 }
998 
999 
1000 static int32
1001 team_create_thread_start(void* args)
1002 {
1003 	status_t err;
1004 	struct thread* thread;
1005 	struct team* team;
1006 	struct team_arg* teamArgs = (struct team_arg*)args;
1007 	const char* path;
1008 	addr_t entry;
1009 	char userStackName[128];
1010 	uint32 sizeLeft;
1011 	char** userArgs;
1012 	char** userEnv;
1013 	struct user_space_program_args* programArgs;
1014 	uint32 argCount, envCount, i;
1015 
1016 	thread = thread_get_current_thread();
1017 	team = thread->team;
1018 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1019 
1020 	TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
1021 
1022 	// get a user thread for the main thread
1023 	thread->user_thread = team_allocate_user_thread(team);
1024 
1025 	// create an initial primary stack area
1026 
1027 	// Main stack area layout is currently as follows (starting from 0):
1028 	//
1029 	// size								| usage
1030 	// ---------------------------------+--------------------------------
1031 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1032 	// TLS_SIZE							| TLS data
1033 	// sizeof(user_space_program_args)	| argument structure for the runtime
1034 	//									| loader
1035 	// flat arguments size				| flat process arguments and environment
1036 
1037 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1038 	// the heap
1039 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1040 
1041 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1042 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1043 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1044 	thread->user_stack_base
1045 		= USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1046 	thread->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1047 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1048 		// the exact location at the end of the user stack area
1049 
1050 	sprintf(userStackName, "%s_main_stack", team->name);
1051 	thread->user_stack_area = create_area_etc(team->id, userStackName,
1052 		(void**)&thread->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
1053 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
1054 	if (thread->user_stack_area < 0) {
1055 		dprintf("team_create_thread_start: could not create default user stack "
1056 			"region: %s\n", strerror(thread->user_stack_area));
1057 
1058 		free_team_arg(teamArgs);
1059 		return thread->user_stack_area;
1060 	}
1061 
1062 	// now that the TLS area is allocated, initialize TLS
1063 	arch_thread_init_tls(thread);
1064 
1065 	argCount = teamArgs->arg_count;
1066 	envCount = teamArgs->env_count;
1067 
1068 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1069 		+ thread->user_stack_size + TLS_SIZE);
1070 
1071 	userArgs = (char**)(programArgs + 1);
1072 	userEnv = userArgs + argCount + 1;
1073 	path = teamArgs->path;
1074 
1075 	if (user_strlcpy(programArgs->program_path, path,
1076 				sizeof(programArgs->program_path)) < B_OK
1077 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1078 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1079 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1080 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1081 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1082 				sizeof(port_id)) < B_OK
1083 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1084 				sizeof(uint32)) < B_OK
1085 		|| user_memcpy(userArgs, teamArgs->flat_args,
1086 				teamArgs->flat_args_size) < B_OK) {
1087 		// the team deletion process will clean this mess
1088 		return B_BAD_ADDRESS;
1089 	}
1090 
1091 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1092 
1093 	// add args to info member
1094 	team->args[0] = 0;
1095 	strlcpy(team->args, path, sizeof(team->args));
1096 	for (i = 1; i < argCount; i++) {
1097 		strlcat(team->args, " ", sizeof(team->args));
1098 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1099 	}
1100 
1101 	free_team_arg(teamArgs);
1102 		// the arguments are already on the user stack, we no longer need
1103 		// them in this form
1104 
1105 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1106 	// automatic variables with function scope will never be destroyed.
1107 	{
1108 		// find runtime_loader path
1109 		KPath runtimeLoaderPath;
1110 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1111 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1112 		if (err < B_OK) {
1113 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1114 				strerror(err)));
1115 			return err;
1116 		}
1117 		runtimeLoaderPath.UnlockBuffer();
1118 		err = runtimeLoaderPath.Append("runtime_loader");
1119 
1120 		if (err == B_OK) {
1121 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1122 				&entry);
1123 		}
1124 	}
1125 
1126 	if (err < B_OK) {
1127 		// Luckily, we don't have to clean up the mess we created - that's
1128 		// done for us by the normal team deletion process
1129 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1130 			"%s\n", strerror(err)));
1131 		return err;
1132 	}
1133 
1134 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1135 
1136 	team->state = TEAM_STATE_NORMAL;
1137 
1138 	// jump to the entry point in user space
1139 	return arch_thread_enter_userspace(thread, entry, programArgs, NULL);
1140 		// only returns in case of error
1141 }
1142 
1143 
1144 static thread_id
1145 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1146 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1147 	port_id errorPort, uint32 errorToken)
1148 {
1149 	char** flatArgs = _flatArgs;
1150 	struct team* team;
1151 	const char* threadName;
1152 	thread_id thread;
1153 	status_t status;
1154 	cpu_status state;
1155 	struct team_arg* teamArgs;
1156 	struct team_loading_info loadingInfo;
1157 	io_context* parentIOContext = NULL;
1158 
1159 	if (flatArgs == NULL || argCount == 0)
1160 		return B_BAD_VALUE;
1161 
1162 	const char* path = flatArgs[0];
1163 
1164 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1165 		path, flatArgs, argCount));
1166 
1167 	team = create_team_struct(path, false);
1168 	if (team == NULL)
1169 		return B_NO_MEMORY;
1170 
1171 	if (flags & B_WAIT_TILL_LOADED) {
1172 		loadingInfo.thread = thread_get_current_thread();
1173 		loadingInfo.result = B_ERROR;
1174 		loadingInfo.done = false;
1175 		team->loading_info = &loadingInfo;
1176 	}
1177 
1178  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1179 
1180 	// get the parent team
1181 	struct team* parent;
1182 
1183 	if (parentID == B_CURRENT_TEAM)
1184 		parent = thread_get_current_thread()->team;
1185 	else
1186 		parent = team_get_team_struct_locked(parentID);
1187 
1188 	if (parent == NULL) {
1189 		teamLocker.Unlock();
1190 		status = B_BAD_TEAM_ID;
1191 		goto err0;
1192 	}
1193 
1194 	// inherit the parent's user/group
1195 	inherit_parent_user_and_group_locked(team, parent);
1196 
1197 	hash_insert(sTeamHash, team);
1198 	insert_team_into_parent(parent, team);
1199 	insert_team_into_group(parent->group, team);
1200 	sUsedTeams++;
1201 
1202 	// get a reference to the parent's I/O context -- we need it to create ours
1203 	parentIOContext = parent->io_context;
1204 	vfs_get_io_context(parentIOContext);
1205 
1206 	teamLocker.Unlock();
1207 
1208 	// check the executable's set-user/group-id permission
1209 	update_set_id_user_and_group(team, path);
1210 
1211 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1212 		envCount, errorPort, errorToken);
1213 
1214 	if (status != B_OK)
1215 		goto err1;
1216 
1217 	_flatArgs = NULL;
1218 		// args are owned by the team_arg structure now
1219 
1220 	// create a new io_context for this team
1221 	team->io_context = vfs_new_io_context(parentIOContext, true);
1222 	if (!team->io_context) {
1223 		status = B_NO_MEMORY;
1224 		goto err2;
1225 	}
1226 
1227 	// We don't need the parent's I/O context any longer.
1228 	vfs_put_io_context(parentIOContext);
1229 	parentIOContext = NULL;
1230 
1231 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1232 	vfs_exec_io_context(team->io_context);
1233 
1234 	// create an address space for this team
1235 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1236 		&team->address_space);
1237 	if (status != B_OK)
1238 		goto err3;
1239 
1240 	// cut the path from the main thread name
1241 	threadName = strrchr(path, '/');
1242 	if (threadName != NULL)
1243 		threadName++;
1244 	else
1245 		threadName = path;
1246 
1247 	// create the user data area
1248 	status = create_team_user_data(team);
1249 	if (status != B_OK)
1250 		goto err4;
1251 
1252 	// notify team listeners
1253 	sNotificationService.Notify(TEAM_ADDED, team);
1254 
1255 	// Create a kernel thread, but under the context of the new team
1256 	// The new thread will take over ownership of teamArgs
1257 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1258 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1259 	if (thread < 0) {
1260 		status = thread;
1261 		goto err5;
1262 	}
1263 
1264 	// wait for the loader of the new team to finish its work
1265 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1266 		struct thread* mainThread;
1267 
1268 		state = disable_interrupts();
1269 		GRAB_THREAD_LOCK();
1270 
1271 		mainThread = thread_get_thread_struct_locked(thread);
1272 		if (mainThread) {
1273 			// resume the team's main thread
1274 			if (mainThread->state == B_THREAD_SUSPENDED)
1275 				scheduler_enqueue_in_run_queue(mainThread);
1276 
1277 			// Now suspend ourselves until loading is finished.
1278 			// We will be woken either by the thread, when it finished or
1279 			// aborted loading, or when the team is going to die (e.g. is
1280 			// killed). In either case the one setting `loadingInfo.done' is
1281 			// responsible for removing the info from the team structure.
1282 			while (!loadingInfo.done) {
1283 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1284 				scheduler_reschedule();
1285 			}
1286 		} else {
1287 			// Impressive! Someone managed to kill the thread in this short
1288 			// time.
1289 		}
1290 
1291 		RELEASE_THREAD_LOCK();
1292 		restore_interrupts(state);
1293 
1294 		if (loadingInfo.result < B_OK)
1295 			return loadingInfo.result;
1296 	}
1297 
1298 	// notify the debugger
1299 	user_debug_team_created(team->id);
1300 
1301 	return thread;
1302 
1303 err5:
1304 	sNotificationService.Notify(TEAM_REMOVED, team);
1305 	delete_team_user_data(team);
1306 err4:
1307 	team->address_space->Put();
1308 err3:
1309 	vfs_put_io_context(team->io_context);
1310 err2:
1311 	free_team_arg(teamArgs);
1312 err1:
1313 	if (parentIOContext != NULL)
1314 		vfs_put_io_context(parentIOContext);
1315 
1316 	// Remove the team structure from the team hash table and delete the team
1317 	// structure
1318 	state = disable_interrupts();
1319 	GRAB_TEAM_LOCK();
1320 
1321 	remove_team_from_group(team);
1322 	remove_team_from_parent(team->parent, team);
1323 	hash_remove(sTeamHash, team);
1324 
1325 	RELEASE_TEAM_LOCK();
1326 	restore_interrupts(state);
1327 
1328 err0:
1329 	delete_team_struct(team);
1330 
1331 	return status;
1332 }
1333 
1334 
1335 /*!	Almost shuts down the current team and loads a new image into it.
1336 	If successful, this function does not return and will takeover ownership of
1337 	the arguments provided.
1338 	This function may only be called from user space.
1339 */
1340 static status_t
1341 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1342 	int32 argCount, int32 envCount)
1343 {
1344 	// NOTE: Since this function normally doesn't return, don't use automatic
1345 	// variables that need destruction in the function scope.
1346 	char** flatArgs = _flatArgs;
1347 	struct team* team = thread_get_current_thread()->team;
1348 	struct team_arg* teamArgs;
1349 	const char* threadName;
1350 	status_t status = B_OK;
1351 	cpu_status state;
1352 	struct thread* thread;
1353 	thread_id nubThreadID = -1;
1354 
1355 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1356 		path, argCount, envCount, team->id));
1357 
1358 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1359 
1360 	// switching the kernel at run time is probably not a good idea :)
1361 	if (team == team_get_kernel_team())
1362 		return B_NOT_ALLOWED;
1363 
1364 	// we currently need to be single threaded here
1365 	// ToDo: maybe we should just kill all other threads and
1366 	//	make the current thread the team's main thread?
1367 	if (team->main_thread != thread_get_current_thread())
1368 		return B_NOT_ALLOWED;
1369 
1370 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1371 	// We iterate through the thread list to make sure that there's no other
1372 	// thread.
1373 	state = disable_interrupts();
1374 	GRAB_TEAM_LOCK();
1375 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1376 
1377 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1378 		nubThreadID = team->debug_info.nub_thread;
1379 
1380 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1381 
1382 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1383 		if (thread != team->main_thread && thread->id != nubThreadID) {
1384 			status = B_NOT_ALLOWED;
1385 			break;
1386 		}
1387 	}
1388 
1389 	RELEASE_TEAM_LOCK();
1390 	restore_interrupts(state);
1391 
1392 	if (status != B_OK)
1393 		return status;
1394 
1395 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1396 		envCount, -1, 0);
1397 
1398 	if (status != B_OK)
1399 		return status;
1400 
1401 	_flatArgs = NULL;
1402 		// args are owned by the team_arg structure now
1403 
1404 	// ToDo: remove team resources if there are any left
1405 	// thread_atkernel_exit() might not be called at all
1406 
1407 	thread_reset_for_exec();
1408 
1409 	user_debug_prepare_for_exec();
1410 
1411 	delete_team_user_data(team);
1412 	vm_delete_areas(team->address_space, false);
1413 	xsi_sem_undo(team);
1414 	delete_owned_ports(team);
1415 	sem_delete_owned_sems(team);
1416 	remove_images(team);
1417 	vfs_exec_io_context(team->io_context);
1418 	delete_realtime_sem_context(team->realtime_sem_context);
1419 	team->realtime_sem_context = NULL;
1420 
1421 	status = create_team_user_data(team);
1422 	if (status != B_OK) {
1423 		// creating the user data failed -- we're toast
1424 		// TODO: We should better keep the old user area in the first place.
1425 		exit_thread(status);
1426 		return status;
1427 	}
1428 
1429 	user_debug_finish_after_exec();
1430 
1431 	// rename the team
1432 
1433 	set_team_name(team, path);
1434 
1435 	// cut the path from the team name and rename the main thread, too
1436 	threadName = strrchr(path, '/');
1437 	if (threadName != NULL)
1438 		threadName++;
1439 	else
1440 		threadName = path;
1441 	rename_thread(thread_get_current_thread_id(), threadName);
1442 
1443 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1444 
1445 	// Update user/group according to the executable's set-user/group-id
1446 	// permission.
1447 	update_set_id_user_and_group(team, path);
1448 
1449 	user_debug_team_exec();
1450 
1451 	// notify team listeners
1452 	sNotificationService.Notify(TEAM_EXEC, team);
1453 
1454 	status = team_create_thread_start(teamArgs);
1455 		// this one usually doesn't return...
1456 
1457 	// sorry, we have to kill us, there is no way out anymore
1458 	// (without any areas left and all that)
1459 	exit_thread(status);
1460 
1461 	// we return a status here since the signal that is sent by the
1462 	// call above is not immediately handled
1463 	return B_ERROR;
1464 }
1465 
1466 
1467 /*! This is the first function to be called from the newly created
1468 	main child thread.
1469 	It will fill in everything what's left to do from fork_arg, and
1470 	return from the parent's fork() syscall to the child.
1471 */
1472 static int32
1473 fork_team_thread_start(void* _args)
1474 {
1475 	struct thread* thread = thread_get_current_thread();
1476 	struct fork_arg* forkArgs = (struct fork_arg*)_args;
1477 
1478 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1479 		// we need a local copy of the arch dependent part
1480 
1481 	thread->user_stack_area = forkArgs->user_stack_area;
1482 	thread->user_stack_base = forkArgs->user_stack_base;
1483 	thread->user_stack_size = forkArgs->user_stack_size;
1484 	thread->user_local_storage = forkArgs->user_local_storage;
1485 	thread->sig_block_mask = forkArgs->sig_block_mask;
1486 	thread->user_thread = forkArgs->user_thread;
1487 	memcpy(thread->sig_action, forkArgs->sig_action,
1488 		sizeof(forkArgs->sig_action));
1489 	thread->signal_stack_base = forkArgs->signal_stack_base;
1490 	thread->signal_stack_size = forkArgs->signal_stack_size;
1491 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1492 
1493 	arch_thread_init_tls(thread);
1494 
1495 	free(forkArgs);
1496 
1497 	// set frame of the parent thread to this one, too
1498 
1499 	arch_restore_fork_frame(&archArgs);
1500 		// This one won't return here
1501 
1502 	return 0;
1503 }
1504 
1505 
1506 static thread_id
1507 fork_team(void)
1508 {
1509 	struct thread* parentThread = thread_get_current_thread();
1510 	struct team* parentTeam = parentThread->team;
1511 	struct team* team;
1512 	struct fork_arg* forkArgs;
1513 	struct area_info info;
1514 	thread_id threadID;
1515 	status_t status;
1516 	int32 cookie;
1517 
1518 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1519 
1520 	if (parentTeam == team_get_kernel_team())
1521 		return B_NOT_ALLOWED;
1522 
1523 	// create a new team
1524 	// TODO: this is very similar to load_image_internal() - maybe we can do
1525 	// something about it :)
1526 
1527 	team = create_team_struct(parentTeam->name, false);
1528 	if (team == NULL)
1529 		return B_NO_MEMORY;
1530 
1531 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1532 
1533 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1534 
1535 	// Inherit the parent's user/group.
1536 	inherit_parent_user_and_group_locked(team, parentTeam);
1537 
1538 	hash_insert(sTeamHash, team);
1539 	insert_team_into_parent(parentTeam, team);
1540 	insert_team_into_group(parentTeam->group, team);
1541 	sUsedTeams++;
1542 
1543 	teamLocker.Unlock();
1544 
1545 	// inherit some team debug flags
1546 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1547 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1548 
1549 	forkArgs = (struct fork_arg*)malloc(sizeof(struct fork_arg));
1550 	if (forkArgs == NULL) {
1551 		status = B_NO_MEMORY;
1552 		goto err1;
1553 	}
1554 
1555 	// create a new io_context for this team
1556 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
1557 	if (!team->io_context) {
1558 		status = B_NO_MEMORY;
1559 		goto err2;
1560 	}
1561 
1562 	// duplicate the realtime sem context
1563 	if (parentTeam->realtime_sem_context) {
1564 		team->realtime_sem_context = clone_realtime_sem_context(
1565 			parentTeam->realtime_sem_context);
1566 		if (team->realtime_sem_context == NULL) {
1567 			status = B_NO_MEMORY;
1568 			goto err25;
1569 		}
1570 	}
1571 
1572 	// create an address space for this team
1573 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1574 		&team->address_space);
1575 	if (status < B_OK)
1576 		goto err3;
1577 
1578 	// copy all areas of the team
1579 	// TODO: should be able to handle stack areas differently (ie. don't have
1580 	// them copy-on-write)
1581 	// TODO: all stacks of other threads than the current one could be left out
1582 
1583 	forkArgs->user_thread = NULL;
1584 
1585 	cookie = 0;
1586 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1587 		if (info.area == parentTeam->user_data_area) {
1588 			// don't clone the user area; just create a new one
1589 			status = create_team_user_data(team);
1590 			if (status != B_OK)
1591 				break;
1592 
1593 			forkArgs->user_thread = team_allocate_user_thread(team);
1594 		} else {
1595 			void* address;
1596 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
1597 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1598 			if (area < B_OK) {
1599 				status = area;
1600 				break;
1601 			}
1602 
1603 			if (info.area == parentThread->user_stack_area)
1604 				forkArgs->user_stack_area = area;
1605 		}
1606 	}
1607 
1608 	if (status < B_OK)
1609 		goto err4;
1610 
1611 	if (forkArgs->user_thread == NULL) {
1612 #if KDEBUG
1613 		panic("user data area not found, parent area is %ld",
1614 			parentTeam->user_data_area);
1615 #endif
1616 		status = B_ERROR;
1617 		goto err4;
1618 	}
1619 
1620 	forkArgs->user_stack_base = parentThread->user_stack_base;
1621 	forkArgs->user_stack_size = parentThread->user_stack_size;
1622 	forkArgs->user_local_storage = parentThread->user_local_storage;
1623 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1624 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1625 		sizeof(forkArgs->sig_action));
1626 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1627 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1628 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1629 
1630 	arch_store_fork_frame(&forkArgs->arch_info);
1631 
1632 	// copy image list
1633 	image_info imageInfo;
1634 	cookie = 0;
1635 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1636 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1637 		if (image < 0)
1638 			goto err5;
1639 	}
1640 
1641 	// notify team listeners
1642 	sNotificationService.Notify(TEAM_ADDED, team);
1643 
1644 	// create a kernel thread under the context of the new team
1645 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1646 		parentThread->name, parentThread->priority, forkArgs,
1647 		team->id, team->id);
1648 	if (threadID < 0) {
1649 		status = threadID;
1650 		goto err5;
1651 	}
1652 
1653 	// notify the debugger
1654 	user_debug_team_created(team->id);
1655 
1656 	T(TeamForked(threadID));
1657 
1658 	resume_thread(threadID);
1659 	return threadID;
1660 
1661 err5:
1662 	sNotificationService.Notify(TEAM_REMOVED, team);
1663 	remove_images(team);
1664 err4:
1665 	team->address_space->RemoveAndPut();
1666 err3:
1667 	delete_realtime_sem_context(team->realtime_sem_context);
1668 err25:
1669 	vfs_put_io_context(team->io_context);
1670 err2:
1671 	free(forkArgs);
1672 err1:
1673 	// remove the team structure from the team hash table and delete the team
1674 	// structure
1675 	teamLocker.Lock();
1676 
1677 	remove_team_from_group(team);
1678 	remove_team_from_parent(parentTeam, team);
1679 	hash_remove(sTeamHash, team);
1680 
1681 	teamLocker.Unlock();
1682 
1683 	delete_team_struct(team);
1684 
1685 	return status;
1686 }
1687 
1688 
1689 /*!	Returns if the specified \a team has any children belonging to the
1690 	specified \a group.
1691 	Must be called with the team lock held.
1692 */
1693 static bool
1694 has_children_in_group(struct team* parent, pid_t groupID)
1695 {
1696 	struct team* team;
1697 
1698 	struct process_group* group = team_get_process_group_locked(
1699 		parent->group->session, groupID);
1700 	if (group == NULL)
1701 		return false;
1702 
1703 	for (team = group->teams; team; team = team->group_next) {
1704 		if (team->parent == parent)
1705 			return true;
1706 	}
1707 
1708 	return false;
1709 }
1710 
1711 
1712 static job_control_entry*
1713 get_job_control_entry(team_job_control_children* children, pid_t id)
1714 {
1715 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1716 		 job_control_entry* entry = it.Next();) {
1717 
1718 		if (id > 0) {
1719 			if (entry->thread == id)
1720 				return entry;
1721 		} else if (id == -1) {
1722 			return entry;
1723 		} else {
1724 			pid_t processGroup
1725 				= (entry->team ? entry->team->group_id : entry->group_id);
1726 			if (processGroup == -id)
1727 				return entry;
1728 		}
1729 	}
1730 
1731 	return NULL;
1732 }
1733 
1734 
1735 static job_control_entry*
1736 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1737 {
1738 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1739 
1740 	if (entry == NULL && (flags & WCONTINUED) != 0)
1741 		entry = get_job_control_entry(team->continued_children, id);
1742 
1743 	if (entry == NULL && (flags & WUNTRACED) != 0)
1744 		entry = get_job_control_entry(team->stopped_children, id);
1745 
1746 	return entry;
1747 }
1748 
1749 
1750 job_control_entry::job_control_entry()
1751 	:
1752 	has_group_ref(false)
1753 {
1754 }
1755 
1756 
1757 job_control_entry::~job_control_entry()
1758 {
1759 	if (has_group_ref) {
1760 		InterruptsSpinLocker locker(gTeamSpinlock);
1761 		release_process_group_ref(group_id);
1762 	}
1763 }
1764 
1765 
1766 /*!	Team and thread lock must be held.
1767 */
1768 void
1769 job_control_entry::InitDeadState()
1770 {
1771 	if (team != NULL) {
1772 		struct thread* thread = team->main_thread;
1773 		group_id = team->group_id;
1774 		this->thread = thread->id;
1775 		status = thread->exit.status;
1776 		reason = thread->exit.reason;
1777 		signal = thread->exit.signal;
1778 		team = NULL;
1779 		acquire_process_group_ref(group_id);
1780 		has_group_ref = true;
1781 	}
1782 }
1783 
1784 
1785 job_control_entry&
1786 job_control_entry::operator=(const job_control_entry& other)
1787 {
1788 	state = other.state;
1789 	thread = other.thread;
1790 	has_group_ref = false;
1791 	team = other.team;
1792 	group_id = other.group_id;
1793 	status = other.status;
1794 	reason = other.reason;
1795 	signal = other.signal;
1796 
1797 	return *this;
1798 }
1799 
1800 
1801 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1802 	comes to the reason why a thread has died than waitpid() can be.
1803 */
1804 static thread_id
1805 wait_for_child(pid_t child, uint32 flags, int32* _reason,
1806 	status_t* _returnCode)
1807 {
1808 	struct thread* thread = thread_get_current_thread();
1809 	struct team* team = thread->team;
1810 	struct job_control_entry foundEntry;
1811 	struct job_control_entry* freeDeathEntry = NULL;
1812 	status_t status = B_OK;
1813 
1814 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1815 
1816 	T(WaitForChild(child, flags));
1817 
1818 	if (child == 0) {
1819 		// wait for all children in the process group of the calling team
1820 		child = -team->group_id;
1821 	}
1822 
1823 	bool ignoreFoundEntries = false;
1824 	bool ignoreFoundEntriesChecked = false;
1825 
1826 	while (true) {
1827 		InterruptsSpinLocker locker(gTeamSpinlock);
1828 
1829 		// check whether any condition holds
1830 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1831 
1832 		// If we don't have an entry yet, check whether there are any children
1833 		// complying to the process group specification at all.
1834 		if (entry == NULL) {
1835 			// No success yet -- check whether there are any children we could
1836 			// wait for.
1837 			bool childrenExist = false;
1838 			if (child == -1) {
1839 				childrenExist = team->children != NULL;
1840 			} else if (child < -1) {
1841 				childrenExist = has_children_in_group(team, -child);
1842 			} else {
1843 				if (struct team* childTeam = team_get_team_struct_locked(child))
1844 					childrenExist = childTeam->parent == team;
1845 			}
1846 
1847 			if (!childrenExist) {
1848 				// there is no child we could wait for
1849 				status = ECHILD;
1850 			} else {
1851 				// the children we're waiting for are still running
1852 				status = B_WOULD_BLOCK;
1853 			}
1854 		} else {
1855 			// got something
1856 			foundEntry = *entry;
1857 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1858 				// The child is dead. Reap its death entry.
1859 				freeDeathEntry = entry;
1860 				team->dead_children->entries.Remove(entry);
1861 				team->dead_children->count--;
1862 			} else {
1863 				// The child is well. Reset its job control state.
1864 				team_set_job_control_state(entry->team,
1865 					JOB_CONTROL_STATE_NONE, 0, false);
1866 			}
1867 		}
1868 
1869 		// If we haven't got anything yet, prepare for waiting for the
1870 		// condition variable.
1871 		ConditionVariableEntry deadWaitEntry;
1872 
1873 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1874 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1875 
1876 		locker.Unlock();
1877 
1878 		// we got our entry and can return to our caller
1879 		if (status == B_OK) {
1880 			if (ignoreFoundEntries) {
1881 				// ... unless we shall ignore found entries
1882 				delete freeDeathEntry;
1883 				freeDeathEntry = NULL;
1884 				continue;
1885 			}
1886 
1887 			break;
1888 		}
1889 
1890 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1891 			T(WaitForChildDone(status));
1892 			return status;
1893 		}
1894 
1895 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1896 		if (status == B_INTERRUPTED) {
1897 			T(WaitForChildDone(status));
1898 			return status;
1899 		}
1900 
1901 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1902 		// all our children are dead and fail with ECHILD. We check the
1903 		// condition at this point.
1904 		if (!ignoreFoundEntriesChecked) {
1905 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1906 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1907 				|| handler.sa_handler == SIG_IGN) {
1908 				ignoreFoundEntries = true;
1909 			}
1910 
1911 			ignoreFoundEntriesChecked = true;
1912 		}
1913 	}
1914 
1915 	delete freeDeathEntry;
1916 
1917 	// when we got here, we have a valid death entry, and
1918 	// already got unregistered from the team or group
1919 	int reason = 0;
1920 	switch (foundEntry.state) {
1921 		case JOB_CONTROL_STATE_DEAD:
1922 			reason = foundEntry.reason;
1923 			break;
1924 		case JOB_CONTROL_STATE_STOPPED:
1925 			reason = THREAD_STOPPED;
1926 			break;
1927 		case JOB_CONTROL_STATE_CONTINUED:
1928 			reason = THREAD_CONTINUED;
1929 			break;
1930 		case JOB_CONTROL_STATE_NONE:
1931 			// can't happen
1932 			break;
1933 	}
1934 
1935 	*_returnCode = foundEntry.status;
1936 	*_reason = (foundEntry.signal << 16) | reason;
1937 
1938 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1939 	// status is available.
1940 	if (is_signal_blocked(SIGCHLD)) {
1941 		InterruptsSpinLocker locker(gTeamSpinlock);
1942 
1943 		if (get_job_control_entry(team, child, flags) == NULL)
1944 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1945 	}
1946 
1947 	// When the team is dead, the main thread continues to live in the kernel
1948 	// team for a very short time. To avoid surprises for the caller we rather
1949 	// wait until the thread is really gone.
1950 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1951 		wait_for_thread(foundEntry.thread, NULL);
1952 
1953 	T(WaitForChildDone(foundEntry));
1954 
1955 	return foundEntry.thread;
1956 }
1957 
1958 
1959 /*! Fills the team_info structure with information from the specified
1960 	team.
1961 	The team lock must be held when called.
1962 */
1963 static status_t
1964 fill_team_info(struct team* team, team_info* info, size_t size)
1965 {
1966 	if (size != sizeof(team_info))
1967 		return B_BAD_VALUE;
1968 
1969 	// ToDo: Set more informations for team_info
1970 	memset(info, 0, size);
1971 
1972 	info->team = team->id;
1973 	info->thread_count = team->num_threads;
1974 	info->image_count = count_images(team);
1975 	//info->area_count =
1976 	info->debugger_nub_thread = team->debug_info.nub_thread;
1977 	info->debugger_nub_port = team->debug_info.nub_port;
1978 	//info->uid =
1979 	//info->gid =
1980 
1981 	strlcpy(info->args, team->args, sizeof(info->args));
1982 	info->argc = 1;
1983 
1984 	return B_OK;
1985 }
1986 
1987 
1988 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1989 	Interrupts must be disabled and team lock be held.
1990 */
1991 static bool
1992 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1993 {
1994 	// Orphaned Process Group: "A process group in which the parent of every
1995 	// member is either itself a member of the group or is not a member of the
1996 	// group's session." (Open Group Base Specs Issue 6)
1997 
1998 	// once orphaned, things won't change (exception: cf. setpgid())
1999 	if (group->orphaned)
2000 		return true;
2001 
2002 	struct team* team = group->teams;
2003 	while (team != NULL) {
2004 		struct team* parent = team->parent;
2005 		if (team->id != dyingProcess && parent != NULL
2006 			&& parent->id != dyingProcess
2007 			&& parent->group_id != group->id
2008 			&& parent->session_id == group->session->id) {
2009 			return false;
2010 		}
2011 
2012 		team = team->group_next;
2013 	}
2014 
2015 	group->orphaned = true;
2016 	return true;
2017 }
2018 
2019 
2020 /*!	Returns whether the process group contains stopped processes.
2021 	Interrupts must be disabled and team lock be held.
2022 */
2023 static bool
2024 process_group_has_stopped_processes(process_group* group)
2025 {
2026 	SpinLocker _(gThreadSpinlock);
2027 
2028 	struct team* team = group->teams;
2029 	while (team != NULL) {
2030 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2031 			return true;
2032 
2033 		team = team->group_next;
2034 	}
2035 
2036 	return false;
2037 }
2038 
2039 
2040 //	#pragma mark - Private kernel API
2041 
2042 
2043 status_t
2044 team_init(kernel_args* args)
2045 {
2046 	struct process_session* session;
2047 	struct process_group* group;
2048 
2049 	// create the team hash table
2050 	sTeamHash = hash_init(16, offsetof(struct team, next),
2051 		&team_struct_compare, &team_struct_hash);
2052 
2053 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2054 		&process_group_compare, &process_group_hash);
2055 
2056 	// create initial session and process groups
2057 
2058 	session = create_process_session(1);
2059 	if (session == NULL)
2060 		panic("Could not create initial session.\n");
2061 
2062 	group = create_process_group(1);
2063 	if (group == NULL)
2064 		panic("Could not create initial process group.\n");
2065 
2066 	insert_group_into_session(session, group);
2067 
2068 	// create the kernel team
2069 	sKernelTeam = create_team_struct("kernel_team", true);
2070 	if (sKernelTeam == NULL)
2071 		panic("could not create kernel team!\n");
2072 	strcpy(sKernelTeam->args, sKernelTeam->name);
2073 	sKernelTeam->state = TEAM_STATE_NORMAL;
2074 
2075 	sKernelTeam->saved_set_uid = 0;
2076 	sKernelTeam->real_uid = 0;
2077 	sKernelTeam->effective_uid = 0;
2078 	sKernelTeam->saved_set_gid = 0;
2079 	sKernelTeam->real_gid = 0;
2080 	sKernelTeam->effective_gid = 0;
2081 	sKernelTeam->supplementary_groups = NULL;
2082 	sKernelTeam->supplementary_group_count = 0;
2083 
2084 	insert_team_into_group(group, sKernelTeam);
2085 
2086 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2087 	if (sKernelTeam->io_context == NULL)
2088 		panic("could not create io_context for kernel team!\n");
2089 
2090 	// stick it in the team hash
2091 	hash_insert(sTeamHash, sKernelTeam);
2092 
2093 	add_debugger_command_etc("team", &dump_team_info,
2094 		"Dump info about a particular team",
2095 		"[ <id> | <address> | <name> ]\n"
2096 		"Prints information about the specified team. If no argument is given\n"
2097 		"the current team is selected.\n"
2098 		"  <id>       - The ID of the team.\n"
2099 		"  <address>  - The address of the team structure.\n"
2100 		"  <name>     - The team's name.\n", 0);
2101 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2102 		"\n"
2103 		"Prints a list of all existing teams.\n", 0);
2104 
2105 	new(&sNotificationService) TeamNotificationService();
2106 
2107 	return B_OK;
2108 }
2109 
2110 
2111 int32
2112 team_max_teams(void)
2113 {
2114 	return sMaxTeams;
2115 }
2116 
2117 
2118 int32
2119 team_used_teams(void)
2120 {
2121 	return sUsedTeams;
2122 }
2123 
2124 
2125 /*!	Iterates through the list of teams. The team spinlock must be held.
2126 */
2127 struct team*
2128 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2129 {
2130 	struct hash_iterator iterator;
2131 	hash_open(sTeamHash, &iterator);
2132 
2133 	struct team* team;
2134 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2135 		if (callback(team, cookie))
2136 			break;
2137 	}
2138 
2139 	hash_close(sTeamHash, &iterator, false);
2140 
2141 	return team;
2142 }
2143 
2144 
2145 /*! Fills the provided death entry if it's in the team.
2146 	You need to have the team lock held when calling this function.
2147 */
2148 job_control_entry*
2149 team_get_death_entry(struct team* team, thread_id child, bool* _deleteEntry)
2150 {
2151 	if (child <= 0)
2152 		return NULL;
2153 
2154 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2155 		child);
2156 	if (entry) {
2157 		// remove the entry only, if the caller is the parent of the found team
2158 		if (team_get_current_team_id() == entry->thread) {
2159 			team->dead_children->entries.Remove(entry);
2160 			team->dead_children->count--;
2161 			*_deleteEntry = true;
2162 		} else {
2163 			*_deleteEntry = false;
2164 		}
2165 	}
2166 
2167 	return entry;
2168 }
2169 
2170 
2171 /*! Quick check to see if we have a valid team ID. */
2172 bool
2173 team_is_valid(team_id id)
2174 {
2175 	struct team* team;
2176 	cpu_status state;
2177 
2178 	if (id <= 0)
2179 		return false;
2180 
2181 	state = disable_interrupts();
2182 	GRAB_TEAM_LOCK();
2183 
2184 	team = team_get_team_struct_locked(id);
2185 
2186 	RELEASE_TEAM_LOCK();
2187 	restore_interrupts(state);
2188 
2189 	return team != NULL;
2190 }
2191 
2192 
2193 struct team*
2194 team_get_team_struct_locked(team_id id)
2195 {
2196 	struct team_key key;
2197 	key.id = id;
2198 
2199 	return (struct team*)hash_lookup(sTeamHash, &key);
2200 }
2201 
2202 
2203 /*! This searches the session of the team for the specified group ID.
2204 	You must hold the team lock when you call this function.
2205 */
2206 struct process_group*
2207 team_get_process_group_locked(struct process_session* session, pid_t id)
2208 {
2209 	struct process_group* group;
2210 	struct team_key key;
2211 	key.id = id;
2212 
2213 	group = (struct process_group*)hash_lookup(sGroupHash, &key);
2214 	if (group != NULL && (session == NULL || session == group->session))
2215 		return group;
2216 
2217 	return NULL;
2218 }
2219 
2220 
2221 void
2222 team_delete_process_group(struct process_group* group)
2223 {
2224 	if (group == NULL)
2225 		return;
2226 
2227 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2228 
2229 	// remove_group_from_session() keeps this pointer around
2230 	// only if the session can be freed as well
2231 	if (group->session) {
2232 		TRACE(("team_delete_process_group(): frees session %ld\n",
2233 			group->session->id));
2234 		free(group->session);
2235 	}
2236 
2237 	free(group);
2238 }
2239 
2240 
2241 void
2242 team_set_controlling_tty(int32 ttyIndex)
2243 {
2244 	struct team* team = thread_get_current_thread()->team;
2245 
2246 	InterruptsSpinLocker _(gTeamSpinlock);
2247 
2248 	team->group->session->controlling_tty = ttyIndex;
2249 	team->group->session->foreground_group = -1;
2250 }
2251 
2252 
2253 int32
2254 team_get_controlling_tty()
2255 {
2256 	struct team* team = thread_get_current_thread()->team;
2257 
2258 	InterruptsSpinLocker _(gTeamSpinlock);
2259 
2260 	return team->group->session->controlling_tty;
2261 }
2262 
2263 
2264 status_t
2265 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2266 {
2267 	struct thread* thread = thread_get_current_thread();
2268 	struct team* team = thread->team;
2269 
2270 	InterruptsSpinLocker locker(gTeamSpinlock);
2271 
2272 	process_session* session = team->group->session;
2273 
2274 	// must be the controlling tty of the calling process
2275 	if (session->controlling_tty != ttyIndex)
2276 		return ENOTTY;
2277 
2278 	// check process group -- must belong to our session
2279 	process_group* group = team_get_process_group_locked(session,
2280 		processGroupID);
2281 	if (group == NULL)
2282 		return B_BAD_VALUE;
2283 
2284 	// If we are a background group, we can't do that unharmed, only if we
2285 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2286 	if (session->foreground_group != -1
2287 		&& session->foreground_group != team->group_id
2288 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2289 		&& !is_signal_blocked(SIGTTOU)) {
2290 		pid_t groupID = team->group->id;
2291 		locker.Unlock();
2292 		send_signal(-groupID, SIGTTOU);
2293 		return B_INTERRUPTED;
2294 	}
2295 
2296 	team->group->session->foreground_group = processGroupID;
2297 
2298 	return B_OK;
2299 }
2300 
2301 
2302 /*!	Removes the specified team from the global team hash, and from its parent.
2303 	It also moves all of its children up to the parent.
2304 	You must hold the team lock when you call this function.
2305 */
2306 void
2307 team_remove_team(struct team* team)
2308 {
2309 	struct team* parent = team->parent;
2310 
2311 	// remember how long this team lasted
2312 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2313 		+ team->dead_children->kernel_time;
2314 	parent->dead_children->user_time += team->dead_threads_user_time
2315 		+ team->dead_children->user_time;
2316 
2317 	// Also grab the thread spinlock while removing the team from the hash.
2318 	// This makes the following sequence safe: grab teams lock, lookup team,
2319 	// grab threads lock, unlock teams lock,
2320 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2321 	// lock another team's IO context.
2322 	GRAB_THREAD_LOCK();
2323 	hash_remove(sTeamHash, team);
2324 	RELEASE_THREAD_LOCK();
2325 	sUsedTeams--;
2326 
2327 	team->state = TEAM_STATE_DEATH;
2328 
2329 	// If we're a controlling process (i.e. a session leader with controlling
2330 	// terminal), there's a bit of signalling we have to do.
2331 	if (team->session_id == team->id
2332 		&& team->group->session->controlling_tty >= 0) {
2333 		process_session* session = team->group->session;
2334 
2335 		session->controlling_tty = -1;
2336 
2337 		// send SIGHUP to the foreground
2338 		if (session->foreground_group >= 0) {
2339 			send_signal_etc(-session->foreground_group, SIGHUP,
2340 				SIGNAL_FLAG_TEAMS_LOCKED);
2341 		}
2342 
2343 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2344 		// stopped processes
2345 		struct team* child = team->children;
2346 		while (child != NULL) {
2347 			process_group* childGroup = child->group;
2348 			if (!childGroup->orphaned
2349 				&& update_orphaned_process_group(childGroup, team->id)
2350 				&& process_group_has_stopped_processes(childGroup)) {
2351 				send_signal_etc(-childGroup->id, SIGHUP,
2352 					SIGNAL_FLAG_TEAMS_LOCKED);
2353 				send_signal_etc(-childGroup->id, SIGCONT,
2354 					SIGNAL_FLAG_TEAMS_LOCKED);
2355 			}
2356 
2357 			child = child->siblings_next;
2358 		}
2359 	} else {
2360 		// update "orphaned" flags of all children's process groups
2361 		struct team* child = team->children;
2362 		while (child != NULL) {
2363 			process_group* childGroup = child->group;
2364 			if (!childGroup->orphaned)
2365 				update_orphaned_process_group(childGroup, team->id);
2366 
2367 			child = child->siblings_next;
2368 		}
2369 
2370 		// update "orphaned" flag of this team's process group
2371 		update_orphaned_process_group(team->group, team->id);
2372 	}
2373 
2374 	// reparent each of the team's children
2375 	reparent_children(team);
2376 
2377 	// remove us from our process group
2378 	remove_team_from_group(team);
2379 
2380 	// remove us from our parent
2381 	remove_team_from_parent(parent, team);
2382 }
2383 
2384 
2385 /*!	Kills all threads but the main thread of the team.
2386 	To be called on exit of the team's main thread. The teams spinlock must be
2387 	held. The function may temporarily drop the spinlock, but will reacquire it
2388 	before it returns.
2389 	\param team The team in question.
2390 	\param state The CPU state as returned by disable_interrupts(). Will be
2391 		adjusted, if the function needs to unlock and relock.
2392 	\return The port of the debugger for the team, -1 if none. To be passed to
2393 		team_delete_team().
2394 */
2395 port_id
2396 team_shutdown_team(struct team* team, cpu_status& state)
2397 {
2398 	ASSERT(thread_get_current_thread() == team->main_thread);
2399 
2400 	// Make sure debugging changes won't happen anymore.
2401 	port_id debuggerPort = -1;
2402 	while (true) {
2403 		// If a debugger change is in progress for the team, we'll have to
2404 		// wait until it is done.
2405 		ConditionVariableEntry waitForDebuggerEntry;
2406 		bool waitForDebugger = false;
2407 
2408 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2409 
2410 		if (team->debug_info.debugger_changed_condition != NULL) {
2411 			team->debug_info.debugger_changed_condition->Add(
2412 				&waitForDebuggerEntry);
2413 			waitForDebugger = true;
2414 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2415 			// The team is being debugged. That will stop with the termination
2416 			// of the nub thread. Since we won't let go of the team lock, unless
2417 			// we set team::death_entry or until we have removed the tem from
2418 			// the team hash, no-one can install a debugger anymore. We fetch
2419 			// the debugger's port to send it a message at the bitter end.
2420 			debuggerPort = team->debug_info.debugger_port;
2421 		}
2422 
2423 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2424 
2425 		if (!waitForDebugger)
2426 			break;
2427 
2428 		// wait for the debugger change to be finished
2429 		RELEASE_TEAM_LOCK();
2430 		restore_interrupts(state);
2431 
2432 		waitForDebuggerEntry.Wait();
2433 
2434 		state = disable_interrupts();
2435 		GRAB_TEAM_LOCK();
2436 	}
2437 
2438 	// kill all threads but the main thread
2439 	team_death_entry deathEntry;
2440 	deathEntry.condition.Init(team, "team death");
2441 
2442 	while (true) {
2443 		team->death_entry = &deathEntry;
2444 		deathEntry.remaining_threads = 0;
2445 
2446 		struct thread* thread = team->thread_list;
2447 		while (thread != NULL) {
2448 			if (thread != team->main_thread) {
2449 				send_signal_etc(thread->id, SIGKILLTHR,
2450 					B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED);
2451 				deathEntry.remaining_threads++;
2452 			}
2453 
2454 			thread = thread->team_next;
2455 		}
2456 
2457 		if (deathEntry.remaining_threads == 0)
2458 			break;
2459 
2460 		// there are threads to wait for
2461 		ConditionVariableEntry entry;
2462 		deathEntry.condition.Add(&entry);
2463 
2464 		RELEASE_TEAM_LOCK();
2465 		restore_interrupts(state);
2466 
2467 		entry.Wait();
2468 
2469 		state = disable_interrupts();
2470 		GRAB_TEAM_LOCK();
2471 	}
2472 
2473 	team->death_entry = NULL;
2474 		// That makes the team "undead" again, but we have the teams spinlock
2475 		// and our caller won't drop it until after removing the team from the
2476 		// teams hash table.
2477 
2478 	return debuggerPort;
2479 }
2480 
2481 
2482 void
2483 team_delete_team(struct team* team, port_id debuggerPort)
2484 {
2485 	team_id teamID = team->id;
2486 
2487 	ASSERT(team->num_threads == 0);
2488 
2489 	// If someone is waiting for this team to be loaded, but it dies
2490 	// unexpectedly before being done, we need to notify the waiting
2491 	// thread now.
2492 
2493 	cpu_status state = disable_interrupts();
2494 	GRAB_TEAM_LOCK();
2495 
2496 	if (team->loading_info) {
2497 		// there's indeed someone waiting
2498 		struct team_loading_info* loadingInfo = team->loading_info;
2499 		team->loading_info = NULL;
2500 
2501 		loadingInfo->result = B_ERROR;
2502 		loadingInfo->done = true;
2503 
2504 		GRAB_THREAD_LOCK();
2505 
2506 		// wake up the waiting thread
2507 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2508 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2509 
2510 		RELEASE_THREAD_LOCK();
2511 	}
2512 
2513 	RELEASE_TEAM_LOCK();
2514 	restore_interrupts(state);
2515 
2516 	// notify team watchers
2517 
2518 	{
2519 		// we're not reachable from anyone anymore at this point, so we
2520 		// can safely access the list without any locking
2521 		struct team_watcher* watcher;
2522 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2523 				&team->watcher_list)) != NULL) {
2524 			watcher->hook(teamID, watcher->data);
2525 			free(watcher);
2526 		}
2527 	}
2528 
2529 	sNotificationService.Notify(TEAM_REMOVED, team);
2530 
2531 	// free team resources
2532 
2533 	vfs_put_io_context(team->io_context);
2534 	delete_realtime_sem_context(team->realtime_sem_context);
2535 	xsi_sem_undo(team);
2536 	delete_owned_ports(team);
2537 	sem_delete_owned_sems(team);
2538 	remove_images(team);
2539 	team->address_space->RemoveAndPut();
2540 
2541 	delete_team_struct(team);
2542 
2543 	// notify the debugger, that the team is gone
2544 	user_debug_team_deleted(teamID, debuggerPort);
2545 }
2546 
2547 
2548 struct team*
2549 team_get_kernel_team(void)
2550 {
2551 	return sKernelTeam;
2552 }
2553 
2554 
2555 team_id
2556 team_get_kernel_team_id(void)
2557 {
2558 	if (!sKernelTeam)
2559 		return 0;
2560 
2561 	return sKernelTeam->id;
2562 }
2563 
2564 
2565 team_id
2566 team_get_current_team_id(void)
2567 {
2568 	return thread_get_current_thread()->team->id;
2569 }
2570 
2571 
2572 status_t
2573 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
2574 {
2575 	cpu_status state;
2576 	struct team* team;
2577 	status_t status;
2578 
2579 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2580 	if (id == 1) {
2581 		// we're the kernel team, so we don't have to go through all
2582 		// the hassle (locking and hash lookup)
2583 		*_addressSpace = VMAddressSpace::GetKernel();
2584 		return B_OK;
2585 	}
2586 
2587 	state = disable_interrupts();
2588 	GRAB_TEAM_LOCK();
2589 
2590 	team = team_get_team_struct_locked(id);
2591 	if (team != NULL) {
2592 		team->address_space->Get();
2593 		*_addressSpace = team->address_space;
2594 		status = B_OK;
2595 	} else
2596 		status = B_BAD_VALUE;
2597 
2598 	RELEASE_TEAM_LOCK();
2599 	restore_interrupts(state);
2600 
2601 	return status;
2602 }
2603 
2604 
2605 /*!	Sets the team's job control state.
2606 	Interrupts must be disabled and the team lock be held.
2607 	\a threadsLocked indicates whether the thread lock is being held, too.
2608 */
2609 void
2610 team_set_job_control_state(struct team* team, job_control_state newState,
2611 	int signal, bool threadsLocked)
2612 {
2613 	if (team == NULL || team->job_control_entry == NULL)
2614 		return;
2615 
2616 	// don't touch anything, if the state stays the same or the team is already
2617 	// dead
2618 	job_control_entry* entry = team->job_control_entry;
2619 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2620 		return;
2621 
2622 	T(SetJobControlState(team->id, newState, signal));
2623 
2624 	// remove from the old list
2625 	switch (entry->state) {
2626 		case JOB_CONTROL_STATE_NONE:
2627 			// entry is in no list ATM
2628 			break;
2629 		case JOB_CONTROL_STATE_DEAD:
2630 			// can't get here
2631 			break;
2632 		case JOB_CONTROL_STATE_STOPPED:
2633 			team->parent->stopped_children->entries.Remove(entry);
2634 			break;
2635 		case JOB_CONTROL_STATE_CONTINUED:
2636 			team->parent->continued_children->entries.Remove(entry);
2637 			break;
2638 	}
2639 
2640 	entry->state = newState;
2641 	entry->signal = signal;
2642 
2643 	// add to new list
2644 	team_job_control_children* childList = NULL;
2645 	switch (entry->state) {
2646 		case JOB_CONTROL_STATE_NONE:
2647 			// entry doesn't get into any list
2648 			break;
2649 		case JOB_CONTROL_STATE_DEAD:
2650 			childList = team->parent->dead_children;
2651 			team->parent->dead_children->count++;
2652 			break;
2653 		case JOB_CONTROL_STATE_STOPPED:
2654 			childList = team->parent->stopped_children;
2655 			break;
2656 		case JOB_CONTROL_STATE_CONTINUED:
2657 			childList = team->parent->continued_children;
2658 			break;
2659 	}
2660 
2661 	if (childList != NULL) {
2662 		childList->entries.Add(entry);
2663 		team->parent->dead_children->condition_variable.NotifyAll(
2664 			threadsLocked);
2665 	}
2666 }
2667 
2668 
2669 /*! Adds a hook to the team that is called as soon as this
2670 	team goes away.
2671 	This call might get public in the future.
2672 */
2673 status_t
2674 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2675 {
2676 	struct team_watcher* watcher;
2677 	struct team* team;
2678 	cpu_status state;
2679 
2680 	if (hook == NULL || teamID < B_OK)
2681 		return B_BAD_VALUE;
2682 
2683 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2684 	if (watcher == NULL)
2685 		return B_NO_MEMORY;
2686 
2687 	watcher->hook = hook;
2688 	watcher->data = data;
2689 
2690 	// find team and add watcher
2691 
2692 	state = disable_interrupts();
2693 	GRAB_TEAM_LOCK();
2694 
2695 	team = team_get_team_struct_locked(teamID);
2696 	if (team != NULL)
2697 		list_add_item(&team->watcher_list, watcher);
2698 
2699 	RELEASE_TEAM_LOCK();
2700 	restore_interrupts(state);
2701 
2702 	if (team == NULL) {
2703 		free(watcher);
2704 		return B_BAD_TEAM_ID;
2705 	}
2706 
2707 	return B_OK;
2708 }
2709 
2710 
2711 status_t
2712 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2713 {
2714 	struct team_watcher* watcher = NULL;
2715 	struct team* team;
2716 	cpu_status state;
2717 
2718 	if (hook == NULL || teamID < B_OK)
2719 		return B_BAD_VALUE;
2720 
2721 	// find team and remove watcher (if present)
2722 
2723 	state = disable_interrupts();
2724 	GRAB_TEAM_LOCK();
2725 
2726 	team = team_get_team_struct_locked(teamID);
2727 	if (team != NULL) {
2728 		// search for watcher
2729 		while ((watcher = (struct team_watcher*)list_get_next_item(
2730 				&team->watcher_list, watcher)) != NULL) {
2731 			if (watcher->hook == hook && watcher->data == data) {
2732 				// got it!
2733 				list_remove_item(&team->watcher_list, watcher);
2734 				break;
2735 			}
2736 		}
2737 	}
2738 
2739 	RELEASE_TEAM_LOCK();
2740 	restore_interrupts(state);
2741 
2742 	if (watcher == NULL)
2743 		return B_ENTRY_NOT_FOUND;
2744 
2745 	free(watcher);
2746 	return B_OK;
2747 }
2748 
2749 
2750 /*!	The team lock must be held or the team must still be single threaded.
2751 */
2752 struct user_thread*
2753 team_allocate_user_thread(struct team* team)
2754 {
2755 	if (team->user_data == 0)
2756 		return NULL;
2757 
2758 	user_thread* thread = NULL;
2759 
2760 	// take an entry from the free list, if any
2761 	if (struct free_user_thread* entry = team->free_user_threads) {
2762 		thread = entry->thread;
2763 		team->free_user_threads = entry->next;
2764 		deferred_free(entry);
2765 		return thread;
2766 	} else {
2767 		// enough space left?
2768 		size_t needed = _ALIGN(sizeof(user_thread));
2769 		if (team->user_data_size - team->used_user_data < needed)
2770 			return NULL;
2771 		// TODO: This imposes a per team thread limit! We should resize the
2772 		// area, if necessary. That's problematic at this point, though, since
2773 		// we've got the team lock.
2774 
2775 		thread = (user_thread*)(team->user_data + team->used_user_data);
2776 		team->used_user_data += needed;
2777 	}
2778 
2779 	thread->defer_signals = 0;
2780 	thread->pending_signals = 0;
2781 	thread->wait_status = B_OK;
2782 
2783 	return thread;
2784 }
2785 
2786 
2787 /*!	The team lock must not be held. \a thread must be the current thread.
2788 */
2789 void
2790 team_free_user_thread(struct thread* thread)
2791 {
2792 	user_thread* userThread = thread->user_thread;
2793 	if (userThread == NULL)
2794 		return;
2795 
2796 	// create a free list entry
2797 	free_user_thread* entry
2798 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2799 	if (entry == NULL) {
2800 		// we have to leak the user thread :-/
2801 		return;
2802 	}
2803 
2804 	InterruptsSpinLocker _(gTeamSpinlock);
2805 
2806 	// detach from thread
2807 	SpinLocker threadLocker(gThreadSpinlock);
2808 	thread->user_thread = NULL;
2809 	threadLocker.Unlock();
2810 
2811 	entry->thread = userThread;
2812 	entry->next = thread->team->free_user_threads;
2813 	thread->team->free_user_threads = entry;
2814 }
2815 
2816 
2817 //	#pragma mark - Public kernel API
2818 
2819 
2820 thread_id
2821 load_image(int32 argCount, const char** args, const char** env)
2822 {
2823 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2824 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2825 }
2826 
2827 
2828 thread_id
2829 load_image_etc(int32 argCount, const char* const* args,
2830 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2831 {
2832 	// we need to flatten the args and environment
2833 
2834 	if (args == NULL)
2835 		return B_BAD_VALUE;
2836 
2837 	// determine total needed size
2838 	int32 argSize = 0;
2839 	for (int32 i = 0; i < argCount; i++)
2840 		argSize += strlen(args[i]) + 1;
2841 
2842 	int32 envCount = 0;
2843 	int32 envSize = 0;
2844 	while (env != NULL && env[envCount] != NULL)
2845 		envSize += strlen(env[envCount++]) + 1;
2846 
2847 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2848 	if (size > MAX_PROCESS_ARGS_SIZE)
2849 		return B_TOO_MANY_ARGS;
2850 
2851 	// allocate space
2852 	char** flatArgs = (char**)malloc(size);
2853 	if (flatArgs == NULL)
2854 		return B_NO_MEMORY;
2855 
2856 	char** slot = flatArgs;
2857 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2858 
2859 	// copy arguments and environment
2860 	for (int32 i = 0; i < argCount; i++) {
2861 		int32 argSize = strlen(args[i]) + 1;
2862 		memcpy(stringSpace, args[i], argSize);
2863 		*slot++ = stringSpace;
2864 		stringSpace += argSize;
2865 	}
2866 
2867 	*slot++ = NULL;
2868 
2869 	for (int32 i = 0; i < envCount; i++) {
2870 		int32 envSize = strlen(env[i]) + 1;
2871 		memcpy(stringSpace, env[i], envSize);
2872 		*slot++ = stringSpace;
2873 		stringSpace += envSize;
2874 	}
2875 
2876 	*slot++ = NULL;
2877 
2878 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2879 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2880 
2881 	free(flatArgs);
2882 		// load_image_internal() unset our variable if it took over ownership
2883 
2884 	return thread;
2885 }
2886 
2887 
2888 status_t
2889 wait_for_team(team_id id, status_t* _returnCode)
2890 {
2891 	struct team* team;
2892 	thread_id thread;
2893 	cpu_status state;
2894 
2895 	// find main thread and wait for that
2896 
2897 	state = disable_interrupts();
2898 	GRAB_TEAM_LOCK();
2899 
2900 	team = team_get_team_struct_locked(id);
2901 	if (team != NULL && team->main_thread != NULL)
2902 		thread = team->main_thread->id;
2903 	else
2904 		thread = B_BAD_THREAD_ID;
2905 
2906 	RELEASE_TEAM_LOCK();
2907 	restore_interrupts(state);
2908 
2909 	if (thread < 0)
2910 		return thread;
2911 
2912 	return wait_for_thread(thread, _returnCode);
2913 }
2914 
2915 
2916 status_t
2917 kill_team(team_id id)
2918 {
2919 	status_t status = B_OK;
2920 	thread_id threadID = -1;
2921 	struct team* team;
2922 	cpu_status state;
2923 
2924 	state = disable_interrupts();
2925 	GRAB_TEAM_LOCK();
2926 
2927 	team = team_get_team_struct_locked(id);
2928 	if (team != NULL) {
2929 		if (team != sKernelTeam) {
2930 			threadID = team->id;
2931 				// the team ID is the same as the ID of its main thread
2932 		} else
2933 			status = B_NOT_ALLOWED;
2934 	} else
2935 		status = B_BAD_THREAD_ID;
2936 
2937 	RELEASE_TEAM_LOCK();
2938 	restore_interrupts(state);
2939 
2940 	if (status < B_OK)
2941 		return status;
2942 
2943 	// just kill the main thread in the team. The cleanup code there will
2944 	// take care of the team
2945 	return kill_thread(threadID);
2946 }
2947 
2948 
2949 status_t
2950 _get_team_info(team_id id, team_info* info, size_t size)
2951 {
2952 	cpu_status state;
2953 	status_t status = B_OK;
2954 	struct team* team;
2955 
2956 	state = disable_interrupts();
2957 	GRAB_TEAM_LOCK();
2958 
2959 	if (id == B_CURRENT_TEAM)
2960 		team = thread_get_current_thread()->team;
2961 	else
2962 		team = team_get_team_struct_locked(id);
2963 
2964 	if (team == NULL) {
2965 		status = B_BAD_TEAM_ID;
2966 		goto err;
2967 	}
2968 
2969 	status = fill_team_info(team, info, size);
2970 
2971 err:
2972 	RELEASE_TEAM_LOCK();
2973 	restore_interrupts(state);
2974 
2975 	return status;
2976 }
2977 
2978 
2979 status_t
2980 _get_next_team_info(int32* cookie, team_info* info, size_t size)
2981 {
2982 	status_t status = B_BAD_TEAM_ID;
2983 	struct team* team = NULL;
2984 	int32 slot = *cookie;
2985 	team_id lastTeamID;
2986 	cpu_status state;
2987 
2988 	if (slot < 1)
2989 		slot = 1;
2990 
2991 	state = disable_interrupts();
2992 	GRAB_TEAM_LOCK();
2993 
2994 	lastTeamID = peek_next_thread_id();
2995 	if (slot >= lastTeamID)
2996 		goto err;
2997 
2998 	// get next valid team
2999 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3000 		slot++;
3001 
3002 	if (team) {
3003 		status = fill_team_info(team, info, size);
3004 		*cookie = ++slot;
3005 	}
3006 
3007 err:
3008 	RELEASE_TEAM_LOCK();
3009 	restore_interrupts(state);
3010 
3011 	return status;
3012 }
3013 
3014 
3015 status_t
3016 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3017 {
3018 	bigtime_t kernelTime = 0, userTime = 0;
3019 	status_t status = B_OK;
3020 	struct team* team;
3021 	cpu_status state;
3022 
3023 	if (size != sizeof(team_usage_info)
3024 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
3025 		return B_BAD_VALUE;
3026 
3027 	state = disable_interrupts();
3028 	GRAB_TEAM_LOCK();
3029 
3030 	if (id == B_CURRENT_TEAM)
3031 		team = thread_get_current_thread()->team;
3032 	else
3033 		team = team_get_team_struct_locked(id);
3034 
3035 	if (team == NULL) {
3036 		status = B_BAD_TEAM_ID;
3037 		goto out;
3038 	}
3039 
3040 	switch (who) {
3041 		case B_TEAM_USAGE_SELF:
3042 		{
3043 			struct thread* thread = team->thread_list;
3044 
3045 			for (; thread != NULL; thread = thread->team_next) {
3046 				kernelTime += thread->kernel_time;
3047 				userTime += thread->user_time;
3048 			}
3049 
3050 			kernelTime += team->dead_threads_kernel_time;
3051 			userTime += team->dead_threads_user_time;
3052 			break;
3053 		}
3054 
3055 		case B_TEAM_USAGE_CHILDREN:
3056 		{
3057 			struct team* child = team->children;
3058 			for (; child != NULL; child = child->siblings_next) {
3059 				struct thread* thread = team->thread_list;
3060 
3061 				for (; thread != NULL; thread = thread->team_next) {
3062 					kernelTime += thread->kernel_time;
3063 					userTime += thread->user_time;
3064 				}
3065 
3066 				kernelTime += child->dead_threads_kernel_time;
3067 				userTime += child->dead_threads_user_time;
3068 			}
3069 
3070 			kernelTime += team->dead_children->kernel_time;
3071 			userTime += team->dead_children->user_time;
3072 			break;
3073 		}
3074 	}
3075 
3076 out:
3077 	RELEASE_TEAM_LOCK();
3078 	restore_interrupts(state);
3079 
3080 	if (status == B_OK) {
3081 		info->kernel_time = kernelTime;
3082 		info->user_time = userTime;
3083 	}
3084 
3085 	return status;
3086 }
3087 
3088 
3089 pid_t
3090 getpid(void)
3091 {
3092 	return thread_get_current_thread()->team->id;
3093 }
3094 
3095 
3096 pid_t
3097 getppid(void)
3098 {
3099 	struct team* team = thread_get_current_thread()->team;
3100 	cpu_status state;
3101 	pid_t parent;
3102 
3103 	state = disable_interrupts();
3104 	GRAB_TEAM_LOCK();
3105 
3106 	parent = team->parent->id;
3107 
3108 	RELEASE_TEAM_LOCK();
3109 	restore_interrupts(state);
3110 
3111 	return parent;
3112 }
3113 
3114 
3115 pid_t
3116 getpgid(pid_t process)
3117 {
3118 	struct thread* thread;
3119 	pid_t result = -1;
3120 	cpu_status state;
3121 
3122 	if (process == 0)
3123 		process = thread_get_current_thread()->team->id;
3124 
3125 	state = disable_interrupts();
3126 	GRAB_THREAD_LOCK();
3127 
3128 	thread = thread_get_thread_struct_locked(process);
3129 	if (thread != NULL)
3130 		result = thread->team->group_id;
3131 
3132 	RELEASE_THREAD_LOCK();
3133 	restore_interrupts(state);
3134 
3135 	return thread != NULL ? result : B_BAD_VALUE;
3136 }
3137 
3138 
3139 pid_t
3140 getsid(pid_t process)
3141 {
3142 	struct thread* thread;
3143 	pid_t result = -1;
3144 	cpu_status state;
3145 
3146 	if (process == 0)
3147 		process = thread_get_current_thread()->team->id;
3148 
3149 	state = disable_interrupts();
3150 	GRAB_THREAD_LOCK();
3151 
3152 	thread = thread_get_thread_struct_locked(process);
3153 	if (thread != NULL)
3154 		result = thread->team->session_id;
3155 
3156 	RELEASE_THREAD_LOCK();
3157 	restore_interrupts(state);
3158 
3159 	return thread != NULL ? result : B_BAD_VALUE;
3160 }
3161 
3162 
3163 //	#pragma mark - User syscalls
3164 
3165 
3166 status_t
3167 _user_exec(const char* userPath, const char* const* userFlatArgs,
3168 	size_t flatArgsSize, int32 argCount, int32 envCount)
3169 {
3170 	// NOTE: Since this function normally doesn't return, don't use automatic
3171 	// variables that need destruction in the function scope.
3172 	char path[B_PATH_NAME_LENGTH];
3173 
3174 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3175 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3176 		return B_BAD_ADDRESS;
3177 
3178 	// copy and relocate the flat arguments
3179 	char** flatArgs;
3180 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3181 		argCount, envCount, flatArgs);
3182 
3183 	if (error == B_OK) {
3184 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3185 			envCount);
3186 			// this one only returns in case of error
3187 	}
3188 
3189 	free(flatArgs);
3190 	return error;
3191 }
3192 
3193 
3194 thread_id
3195 _user_fork(void)
3196 {
3197 	return fork_team();
3198 }
3199 
3200 
3201 thread_id
3202 _user_wait_for_child(thread_id child, uint32 flags, int32* _userReason,
3203 	status_t* _userReturnCode)
3204 {
3205 	status_t returnCode;
3206 	int32 reason;
3207 	thread_id deadChild;
3208 
3209 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3210 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3211 		return B_BAD_ADDRESS;
3212 
3213 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3214 
3215 	if (deadChild >= B_OK) {
3216 		// copy result data on successful completion
3217 		if ((_userReason != NULL
3218 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3219 			|| (_userReturnCode != NULL
3220 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3221 					< B_OK)) {
3222 			return B_BAD_ADDRESS;
3223 		}
3224 
3225 		return deadChild;
3226 	}
3227 
3228 	return syscall_restart_handle_post(deadChild);
3229 }
3230 
3231 
3232 pid_t
3233 _user_process_info(pid_t process, int32 which)
3234 {
3235 	// we only allow to return the parent of the current process
3236 	if (which == PARENT_ID
3237 		&& process != 0 && process != thread_get_current_thread()->team->id)
3238 		return B_BAD_VALUE;
3239 
3240 	switch (which) {
3241 		case SESSION_ID:
3242 			return getsid(process);
3243 		case GROUP_ID:
3244 			return getpgid(process);
3245 		case PARENT_ID:
3246 			return getppid();
3247 	}
3248 
3249 	return B_BAD_VALUE;
3250 }
3251 
3252 
3253 pid_t
3254 _user_setpgid(pid_t processID, pid_t groupID)
3255 {
3256 	struct thread* thread = thread_get_current_thread();
3257 	struct team* currentTeam = thread->team;
3258 	struct team* team;
3259 
3260 	if (groupID < 0)
3261 		return B_BAD_VALUE;
3262 
3263 	if (processID == 0)
3264 		processID = currentTeam->id;
3265 
3266 	// if the group ID is not specified, use the target process' ID
3267 	if (groupID == 0)
3268 		groupID = processID;
3269 
3270 	if (processID == currentTeam->id) {
3271 		// we set our own group
3272 
3273 		// we must not change our process group ID if we're a session leader
3274 		if (is_session_leader(currentTeam))
3275 			return B_NOT_ALLOWED;
3276 	} else {
3277 		// another team is the target of the call -- check it out
3278 		InterruptsSpinLocker _(gTeamSpinlock);
3279 
3280 		team = team_get_team_struct_locked(processID);
3281 		if (team == NULL)
3282 			return ESRCH;
3283 
3284 		// The team must be a child of the calling team and in the same session.
3285 		// (If that's the case it isn't a session leader either.)
3286 		if (team->parent != currentTeam
3287 			|| team->session_id != currentTeam->session_id) {
3288 			return B_NOT_ALLOWED;
3289 		}
3290 
3291 		if (team->group_id == groupID)
3292 			return groupID;
3293 
3294 		// The call is also supposed to fail on a child, when the child already
3295 		// has executed exec*() [EACCES].
3296 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3297 			return EACCES;
3298 	}
3299 
3300 	struct process_group* group = NULL;
3301 	if (groupID == processID) {
3302 		// A new process group might be needed.
3303 		group = create_process_group(groupID);
3304 		if (group == NULL)
3305 			return B_NO_MEMORY;
3306 
3307 		// Assume orphaned. We consider the situation of the team's parent
3308 		// below.
3309 		group->orphaned = true;
3310 	}
3311 
3312 	status_t status = B_OK;
3313 	struct process_group* freeGroup = NULL;
3314 
3315 	InterruptsSpinLocker locker(gTeamSpinlock);
3316 
3317 	team = team_get_team_struct_locked(processID);
3318 	if (team != NULL) {
3319 		// check the conditions again -- they might have changed in the meantime
3320 		if (is_session_leader(team)
3321 			|| team->session_id != currentTeam->session_id) {
3322 			status = B_NOT_ALLOWED;
3323 		} else if (team != currentTeam
3324 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3325 			status = EACCES;
3326 		} else if (team->group_id == groupID) {
3327 			// the team is already in the desired process group
3328 			freeGroup = group;
3329 		} else {
3330 			// Check if a process group with the requested ID already exists.
3331 			struct process_group* targetGroup
3332 				= team_get_process_group_locked(team->group->session, groupID);
3333 			if (targetGroup != NULL) {
3334 				// In case of processID == groupID we have to free the
3335 				// allocated group.
3336 				freeGroup = group;
3337 			} else if (processID == groupID) {
3338 				// We created a new process group, let us insert it into the
3339 				// team's session.
3340 				insert_group_into_session(team->group->session, group);
3341 				targetGroup = group;
3342 			}
3343 
3344 			if (targetGroup != NULL) {
3345 				// we got a group, let's move the team there
3346 				process_group* oldGroup = team->group;
3347 
3348 				remove_team_from_group(team);
3349 				insert_team_into_group(targetGroup, team);
3350 
3351 				// Update the "orphaned" flag of all potentially affected
3352 				// groups.
3353 
3354 				// the team's old group
3355 				if (oldGroup->teams != NULL) {
3356 					oldGroup->orphaned = false;
3357 					update_orphaned_process_group(oldGroup, -1);
3358 				}
3359 
3360 				// the team's new group
3361 				struct team* parent = team->parent;
3362 				targetGroup->orphaned &= parent == NULL
3363 					|| parent->group == targetGroup
3364 					|| team->parent->session_id != team->session_id;
3365 
3366 				// children's groups
3367 				struct team* child = team->children;
3368 				while (child != NULL) {
3369 					child->group->orphaned = false;
3370 					update_orphaned_process_group(child->group, -1);
3371 
3372 					child = child->siblings_next;
3373 				}
3374 			} else
3375 				status = B_NOT_ALLOWED;
3376 		}
3377 	} else
3378 		status = B_NOT_ALLOWED;
3379 
3380 	// Changing the process group might have changed the situation for a parent
3381 	// waiting in wait_for_child(). Hence we notify it.
3382 	if (status == B_OK)
3383 		team->parent->dead_children->condition_variable.NotifyAll(false);
3384 
3385 	locker.Unlock();
3386 
3387 	if (status != B_OK) {
3388 		// in case of error, the group hasn't been added into the hash
3389 		team_delete_process_group(group);
3390 	}
3391 
3392 	team_delete_process_group(freeGroup);
3393 
3394 	return status == B_OK ? groupID : status;
3395 }
3396 
3397 
3398 pid_t
3399 _user_setsid(void)
3400 {
3401 	struct team* team = thread_get_current_thread()->team;
3402 	struct process_session* session;
3403 	struct process_group* group;
3404 	cpu_status state;
3405 	bool failed = false;
3406 
3407 	// the team must not already be a process group leader
3408 	if (is_process_group_leader(team))
3409 		return B_NOT_ALLOWED;
3410 
3411 	group = create_process_group(team->id);
3412 	if (group == NULL)
3413 		return B_NO_MEMORY;
3414 
3415 	session = create_process_session(group->id);
3416 	if (session == NULL) {
3417 		team_delete_process_group(group);
3418 		return B_NO_MEMORY;
3419 	}
3420 
3421 	state = disable_interrupts();
3422 	GRAB_TEAM_LOCK();
3423 
3424 	// this may have changed since the check above
3425 	if (!is_process_group_leader(team)) {
3426 		remove_team_from_group(team);
3427 
3428 		insert_group_into_session(session, group);
3429 		insert_team_into_group(group, team);
3430 	} else
3431 		failed = true;
3432 
3433 	RELEASE_TEAM_LOCK();
3434 	restore_interrupts(state);
3435 
3436 	if (failed) {
3437 		team_delete_process_group(group);
3438 		free(session);
3439 		return B_NOT_ALLOWED;
3440 	}
3441 
3442 	return team->group_id;
3443 }
3444 
3445 
3446 status_t
3447 _user_wait_for_team(team_id id, status_t* _userReturnCode)
3448 {
3449 	status_t returnCode;
3450 	status_t status;
3451 
3452 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3453 		return B_BAD_ADDRESS;
3454 
3455 	status = wait_for_team(id, &returnCode);
3456 	if (status >= B_OK && _userReturnCode != NULL) {
3457 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
3458 				!= B_OK)
3459 			return B_BAD_ADDRESS;
3460 		return B_OK;
3461 	}
3462 
3463 	return syscall_restart_handle_post(status);
3464 }
3465 
3466 
3467 thread_id
3468 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3469 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3470 	port_id errorPort, uint32 errorToken)
3471 {
3472 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3473 
3474 	if (argCount < 1)
3475 		return B_BAD_VALUE;
3476 
3477 	// copy and relocate the flat arguments
3478 	char** flatArgs;
3479 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3480 		argCount, envCount, flatArgs);
3481 	if (error != B_OK)
3482 		return error;
3483 
3484 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3485 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3486 		errorToken);
3487 
3488 	free(flatArgs);
3489 		// load_image_internal() unset our variable if it took over ownership
3490 
3491 	return thread;
3492 }
3493 
3494 
3495 void
3496 _user_exit_team(status_t returnValue)
3497 {
3498 	struct thread* thread = thread_get_current_thread();
3499 	struct team* team = thread->team;
3500 	struct thread* mainThread = team->main_thread;
3501 
3502 	mainThread->exit.status = returnValue;
3503 	mainThread->exit.reason = THREAD_RETURN_EXIT;
3504 
3505 	// Also set the exit code in the current thread for the sake of it
3506 	if (thread != mainThread) {
3507 		thread->exit.status = returnValue;
3508 		thread->exit.reason = THREAD_RETURN_EXIT;
3509 	}
3510 
3511 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT)
3512 			!= 0) {
3513 		// This team is currently being debugged, and requested that teams
3514 		// should not be exited.
3515 		user_debug_stop_thread();
3516 	}
3517 
3518 	send_signal(thread->id, SIGKILL);
3519 }
3520 
3521 
3522 status_t
3523 _user_kill_team(team_id team)
3524 {
3525 	return kill_team(team);
3526 }
3527 
3528 
3529 status_t
3530 _user_get_team_info(team_id id, team_info* userInfo)
3531 {
3532 	status_t status;
3533 	team_info info;
3534 
3535 	if (!IS_USER_ADDRESS(userInfo))
3536 		return B_BAD_ADDRESS;
3537 
3538 	status = _get_team_info(id, &info, sizeof(team_info));
3539 	if (status == B_OK) {
3540 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3541 			return B_BAD_ADDRESS;
3542 	}
3543 
3544 	return status;
3545 }
3546 
3547 
3548 status_t
3549 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
3550 {
3551 	status_t status;
3552 	team_info info;
3553 	int32 cookie;
3554 
3555 	if (!IS_USER_ADDRESS(userCookie)
3556 		|| !IS_USER_ADDRESS(userInfo)
3557 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3558 		return B_BAD_ADDRESS;
3559 
3560 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3561 	if (status != B_OK)
3562 		return status;
3563 
3564 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3565 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3566 		return B_BAD_ADDRESS;
3567 
3568 	return status;
3569 }
3570 
3571 
3572 team_id
3573 _user_get_current_team(void)
3574 {
3575 	return team_get_current_team_id();
3576 }
3577 
3578 
3579 status_t
3580 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
3581 	size_t size)
3582 {
3583 	team_usage_info info;
3584 	status_t status;
3585 
3586 	if (!IS_USER_ADDRESS(userInfo))
3587 		return B_BAD_ADDRESS;
3588 
3589 	status = _get_team_usage_info(team, who, &info, size);
3590 	if (status != B_OK)
3591 		return status;
3592 
3593 	if (user_memcpy(userInfo, &info, size) < B_OK)
3594 		return B_BAD_ADDRESS;
3595 
3596 	return status;
3597 }
3598 
3599