xref: /haiku/src/system/kernel/team.cpp (revision 1345706a9ff6ad0dc041339a02d4259998b0765d)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*!	Team functions */
12 
13 
14 #include <team.h>
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <sys/wait.h>
20 
21 #include <OS.h>
22 
23 #include <AutoDeleter.h>
24 #include <FindDirectory.h>
25 
26 #include <boot_device.h>
27 #include <elf.h>
28 #include <file_cache.h>
29 #include <fs/KPath.h>
30 #include <heap.h>
31 #include <int.h>
32 #include <kernel.h>
33 #include <kimage.h>
34 #include <kscheduler.h>
35 #include <ksignal.h>
36 #include <Notifications.h>
37 #include <port.h>
38 #include <posix/realtime_sem.h>
39 #include <posix/xsi_semaphore.h>
40 #include <sem.h>
41 #include <syscall_process_info.h>
42 #include <syscall_restart.h>
43 #include <syscalls.h>
44 #include <tls.h>
45 #include <tracing.h>
46 #include <user_runtime.h>
47 #include <user_thread.h>
48 #include <usergroup.h>
49 #include <vfs.h>
50 #include <vm/vm.h>
51 #include <vm/VMAddressSpace.h>
52 #include <util/AutoLock.h>
53 #include <util/khash.h>
54 
55 //#define TRACE_TEAM
56 #ifdef TRACE_TEAM
57 #	define TRACE(x) dprintf x
58 #else
59 #	define TRACE(x) ;
60 #endif
61 
62 
63 struct team_key {
64 	team_id id;
65 };
66 
67 struct team_arg {
68 	char	*path;
69 	char	**flat_args;
70 	size_t	flat_args_size;
71 	uint32	arg_count;
72 	uint32	env_count;
73 	port_id	error_port;
74 	uint32	error_token;
75 };
76 
77 struct fork_arg {
78 	area_id				user_stack_area;
79 	addr_t				user_stack_base;
80 	size_t				user_stack_size;
81 	addr_t				user_local_storage;
82 	sigset_t			sig_block_mask;
83 	struct sigaction	sig_action[32];
84 	addr_t				signal_stack_base;
85 	size_t				signal_stack_size;
86 	bool				signal_stack_enabled;
87 
88 	struct user_thread* user_thread;
89 
90 	struct arch_fork_arg arch_info;
91 };
92 
93 class TeamNotificationService : public DefaultNotificationService {
94 public:
95 							TeamNotificationService();
96 
97 			void			Notify(uint32 eventCode, struct team* team);
98 };
99 
100 
101 static hash_table* sTeamHash = NULL;
102 static hash_table* sGroupHash = NULL;
103 static struct team* sKernelTeam = NULL;
104 
105 // some arbitrary chosen limits - should probably depend on the available
106 // memory (the limit is not yet enforced)
107 static int32 sMaxTeams = 2048;
108 static int32 sUsedTeams = 1;
109 
110 static TeamNotificationService sNotificationService;
111 
112 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
113 
114 
115 // #pragma mark - Tracing
116 
117 
118 #if TEAM_TRACING
119 namespace TeamTracing {
120 
121 class TeamForked : public AbstractTraceEntry {
122 public:
123 	TeamForked(thread_id forkedThread)
124 		:
125 		fForkedThread(forkedThread)
126 	{
127 		Initialized();
128 	}
129 
130 	virtual void AddDump(TraceOutput& out)
131 	{
132 		out.Print("team forked, new thread %ld", fForkedThread);
133 	}
134 
135 private:
136 	thread_id			fForkedThread;
137 };
138 
139 
140 class ExecTeam : public AbstractTraceEntry {
141 public:
142 	ExecTeam(const char* path, int32 argCount, const char* const* args,
143 			int32 envCount, const char* const* env)
144 		:
145 		fArgCount(argCount),
146 		fArgs(NULL)
147 	{
148 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
149 			false);
150 
151 		// determine the buffer size we need for the args
152 		size_t argBufferSize = 0;
153 		for (int32 i = 0; i < argCount; i++)
154 			argBufferSize += strlen(args[i]) + 1;
155 
156 		// allocate a buffer
157 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
158 		if (fArgs) {
159 			char* buffer = fArgs;
160 			for (int32 i = 0; i < argCount; i++) {
161 				size_t argSize = strlen(args[i]) + 1;
162 				memcpy(buffer, args[i], argSize);
163 				buffer += argSize;
164 			}
165 		}
166 
167 		// ignore env for the time being
168 		(void)envCount;
169 		(void)env;
170 
171 		Initialized();
172 	}
173 
174 	virtual void AddDump(TraceOutput& out)
175 	{
176 		out.Print("team exec, \"%p\", args:", fPath);
177 
178 		if (fArgs != NULL) {
179 			char* args = fArgs;
180 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
181 				out.Print(" \"%s\"", args);
182 				args += strlen(args) + 1;
183 			}
184 		} else
185 			out.Print(" <too long>");
186 	}
187 
188 private:
189 	char*	fPath;
190 	int32	fArgCount;
191 	char*	fArgs;
192 };
193 
194 
195 static const char*
196 job_control_state_name(job_control_state state)
197 {
198 	switch (state) {
199 		case JOB_CONTROL_STATE_NONE:
200 			return "none";
201 		case JOB_CONTROL_STATE_STOPPED:
202 			return "stopped";
203 		case JOB_CONTROL_STATE_CONTINUED:
204 			return "continued";
205 		case JOB_CONTROL_STATE_DEAD:
206 			return "dead";
207 		default:
208 			return "invalid";
209 	}
210 }
211 
212 
213 class SetJobControlState : public AbstractTraceEntry {
214 public:
215 	SetJobControlState(team_id team, job_control_state newState, int signal)
216 		:
217 		fTeam(team),
218 		fNewState(newState),
219 		fSignal(signal)
220 	{
221 		Initialized();
222 	}
223 
224 	virtual void AddDump(TraceOutput& out)
225 	{
226 		out.Print("team set job control state, team %ld, "
227 			"new state: %s, signal: %d",
228 			fTeam, job_control_state_name(fNewState), fSignal);
229 	}
230 
231 private:
232 	team_id				fTeam;
233 	job_control_state	fNewState;
234 	int					fSignal;
235 };
236 
237 
238 class WaitForChild : public AbstractTraceEntry {
239 public:
240 	WaitForChild(pid_t child, uint32 flags)
241 		:
242 		fChild(child),
243 		fFlags(flags)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("team wait for child, child: %ld, "
251 			"flags: 0x%lx", fChild, fFlags);
252 	}
253 
254 private:
255 	pid_t	fChild;
256 	uint32	fFlags;
257 };
258 
259 
260 class WaitForChildDone : public AbstractTraceEntry {
261 public:
262 	WaitForChildDone(const job_control_entry& entry)
263 		:
264 		fState(entry.state),
265 		fTeam(entry.thread),
266 		fStatus(entry.status),
267 		fReason(entry.reason),
268 		fSignal(entry.signal)
269 	{
270 		Initialized();
271 	}
272 
273 	WaitForChildDone(status_t error)
274 		:
275 		fTeam(error)
276 	{
277 		Initialized();
278 	}
279 
280 	virtual void AddDump(TraceOutput& out)
281 	{
282 		if (fTeam >= 0) {
283 			out.Print("team wait for child done, team: %ld, "
284 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
285 				fTeam, job_control_state_name(fState), fStatus, fReason,
286 				fSignal);
287 		} else {
288 			out.Print("team wait for child failed, error: "
289 				"0x%lx, ", fTeam);
290 		}
291 	}
292 
293 private:
294 	job_control_state	fState;
295 	team_id				fTeam;
296 	status_t			fStatus;
297 	uint16				fReason;
298 	uint16				fSignal;
299 };
300 
301 }	// namespace TeamTracing
302 
303 #	define T(x) new(std::nothrow) TeamTracing::x;
304 #else
305 #	define T(x) ;
306 #endif
307 
308 
309 //	#pragma mark - TeamNotificationService
310 
311 
312 TeamNotificationService::TeamNotificationService()
313 	: DefaultNotificationService("teams")
314 {
315 }
316 
317 
318 void
319 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
320 {
321 	char eventBuffer[128];
322 	KMessage event;
323 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
324 	event.AddInt32("event", eventCode);
325 	event.AddInt32("team", team->id);
326 	event.AddPointer("teamStruct", team);
327 
328 	DefaultNotificationService::Notify(event, eventCode);
329 }
330 
331 
332 //	#pragma mark - Private functions
333 
334 
335 static void
336 _dump_team_info(struct team* team)
337 {
338 	kprintf("TEAM: %p\n", team);
339 	kprintf("id:               %ld (%#lx)\n", team->id, team->id);
340 	kprintf("name:             '%s'\n", team->name);
341 	kprintf("args:             '%s'\n", team->args);
342 	kprintf("next:             %p\n", team->next);
343 	kprintf("parent:           %p", team->parent);
344 	if (team->parent != NULL) {
345 		kprintf(" (id = %ld)\n", team->parent->id);
346 	} else
347 		kprintf("\n");
348 
349 	kprintf("children:         %p\n", team->children);
350 	kprintf("num_threads:      %d\n", team->num_threads);
351 	kprintf("state:            %d\n", team->state);
352 	kprintf("flags:            0x%lx\n", team->flags);
353 	kprintf("io_context:       %p\n", team->io_context);
354 	if (team->address_space)
355 		kprintf("address_space:    %p\n", team->address_space);
356 	kprintf("user data:        %p (area %ld)\n", (void*)team->user_data,
357 		team->user_data_area);
358 	kprintf("free user thread: %p\n", team->free_user_threads);
359 	kprintf("main_thread:      %p\n", team->main_thread);
360 	kprintf("thread_list:      %p\n", team->thread_list);
361 	kprintf("group_id:         %ld\n", team->group_id);
362 	kprintf("session_id:       %ld\n", team->session_id);
363 }
364 
365 
366 static int
367 dump_team_info(int argc, char** argv)
368 {
369 	struct hash_iterator iterator;
370 	struct team* team;
371 	team_id id = -1;
372 	bool found = false;
373 
374 	if (argc < 2) {
375 		struct thread* thread = thread_get_current_thread();
376 		if (thread != NULL && thread->team != NULL)
377 			_dump_team_info(thread->team);
378 		else
379 			kprintf("No current team!\n");
380 		return 0;
381 	}
382 
383 	id = strtoul(argv[1], NULL, 0);
384 	if (IS_KERNEL_ADDRESS(id)) {
385 		// semi-hack
386 		_dump_team_info((struct team*)id);
387 		return 0;
388 	}
389 
390 	// walk through the thread list, trying to match name or id
391 	hash_open(sTeamHash, &iterator);
392 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
393 		if ((team->name && strcmp(argv[1], team->name) == 0)
394 			|| team->id == id) {
395 			_dump_team_info(team);
396 			found = true;
397 			break;
398 		}
399 	}
400 	hash_close(sTeamHash, &iterator, false);
401 
402 	if (!found)
403 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
404 	return 0;
405 }
406 
407 
408 static int
409 dump_teams(int argc, char** argv)
410 {
411 	struct hash_iterator iterator;
412 	struct team* team;
413 
414 	kprintf("team           id  parent      name\n");
415 	hash_open(sTeamHash, &iterator);
416 
417 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
418 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
419 	}
420 
421 	hash_close(sTeamHash, &iterator, false);
422 	return 0;
423 }
424 
425 
426 static int
427 team_struct_compare(void* _p, const void* _key)
428 {
429 	struct team* p = (struct team*)_p;
430 	const struct team_key* key = (const struct team_key*)_key;
431 
432 	if (p->id == key->id)
433 		return 0;
434 
435 	return 1;
436 }
437 
438 
439 static uint32
440 team_struct_hash(void* _p, const void* _key, uint32 range)
441 {
442 	struct team* p = (struct team*)_p;
443 	const struct team_key* key = (const struct team_key*)_key;
444 
445 	if (p != NULL)
446 		return p->id % range;
447 
448 	return (uint32)key->id % range;
449 }
450 
451 
452 static int
453 process_group_compare(void* _group, const void* _key)
454 {
455 	struct process_group* group = (struct process_group*)_group;
456 	const struct team_key* key = (const struct team_key*)_key;
457 
458 	if (group->id == key->id)
459 		return 0;
460 
461 	return 1;
462 }
463 
464 
465 static uint32
466 process_group_hash(void* _group, const void* _key, uint32 range)
467 {
468 	struct process_group* group = (struct process_group*)_group;
469 	const struct team_key* key = (const struct team_key*)_key;
470 
471 	if (group != NULL)
472 		return group->id % range;
473 
474 	return (uint32)key->id % range;
475 }
476 
477 
478 static void
479 insert_team_into_parent(struct team* parent, struct team* team)
480 {
481 	ASSERT(parent != NULL);
482 
483 	team->siblings_next = parent->children;
484 	parent->children = team;
485 	team->parent = parent;
486 }
487 
488 
489 /*!	Note: must have team lock held */
490 static void
491 remove_team_from_parent(struct team* parent, struct team* team)
492 {
493 	struct team* child;
494 	struct team* last = NULL;
495 
496 	for (child = parent->children; child != NULL;
497 			child = child->siblings_next) {
498 		if (child == team) {
499 			if (last == NULL)
500 				parent->children = child->siblings_next;
501 			else
502 				last->siblings_next = child->siblings_next;
503 
504 			team->parent = NULL;
505 			break;
506 		}
507 		last = child;
508 	}
509 }
510 
511 
512 /*!	Reparent each of our children
513 	Note: must have team lock held
514 */
515 static void
516 reparent_children(struct team* team)
517 {
518 	struct team* child;
519 
520 	while ((child = team->children) != NULL) {
521 		// remove the child from the current proc and add to the parent
522 		remove_team_from_parent(team, child);
523 		insert_team_into_parent(sKernelTeam, child);
524 	}
525 
526 	// move job control entries too
527 	sKernelTeam->stopped_children->entries.MoveFrom(
528 		&team->stopped_children->entries);
529 	sKernelTeam->continued_children->entries.MoveFrom(
530 		&team->continued_children->entries);
531 
532 	// Note, we don't move the dead children entries. Those will be deleted
533 	// when the team structure is deleted.
534 }
535 
536 
537 static bool
538 is_session_leader(struct team* team)
539 {
540 	return team->session_id == team->id;
541 }
542 
543 
544 static bool
545 is_process_group_leader(struct team* team)
546 {
547 	return team->group_id == team->id;
548 }
549 
550 
551 static void
552 deferred_delete_process_group(struct process_group* group)
553 {
554 	if (group == NULL)
555 		return;
556 
557 	// remove_group_from_session() keeps this pointer around
558 	// only if the session can be freed as well
559 	if (group->session) {
560 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
561 			group->session->id));
562 		deferred_free(group->session);
563 	}
564 
565 	deferred_free(group);
566 }
567 
568 
569 /*!	Removes a group from a session, and puts the session object
570 	back into the session cache, if it's not used anymore.
571 	You must hold the team lock when calling this function.
572 */
573 static void
574 remove_group_from_session(struct process_group* group)
575 {
576 	struct process_session* session = group->session;
577 
578 	// the group must be in any session to let this function have any effect
579 	if (session == NULL)
580 		return;
581 
582 	hash_remove(sGroupHash, group);
583 
584 	// we cannot free the resource here, so we're keeping the group link
585 	// around - this way it'll be freed by free_process_group()
586 	if (--session->group_count > 0)
587 		group->session = NULL;
588 }
589 
590 
591 /*!	Team lock must be held.
592 */
593 static void
594 acquire_process_group_ref(pid_t groupID)
595 {
596 	process_group* group = team_get_process_group_locked(NULL, groupID);
597 	if (group == NULL) {
598 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
599 		return;
600 	}
601 
602 	group->refs++;
603 }
604 
605 
606 /*!	Team lock must be held.
607 */
608 static void
609 release_process_group_ref(pid_t groupID)
610 {
611 	process_group* group = team_get_process_group_locked(NULL, groupID);
612 	if (group == NULL) {
613 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
614 		return;
615 	}
616 
617 	if (group->refs <= 0) {
618 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
619 		return;
620 	}
621 
622 	if (--group->refs > 0)
623 		return;
624 
625 	// group is no longer used
626 
627 	remove_group_from_session(group);
628 	deferred_delete_process_group(group);
629 }
630 
631 
632 /*!	You must hold the team lock when calling this function. */
633 static void
634 insert_group_into_session(struct process_session* session,
635 	struct process_group* group)
636 {
637 	if (group == NULL)
638 		return;
639 
640 	group->session = session;
641 	hash_insert(sGroupHash, group);
642 	session->group_count++;
643 }
644 
645 
646 /*!	You must hold the team lock when calling this function. */
647 static void
648 insert_team_into_group(struct process_group* group, struct team* team)
649 {
650 	team->group = group;
651 	team->group_id = group->id;
652 	team->session_id = group->session->id;
653 
654 	team->group_next = group->teams;
655 	group->teams = team;
656 	acquire_process_group_ref(group->id);
657 }
658 
659 
660 /*!	Removes the team from the group.
661 
662 	\param team the team that'll be removed from it's group
663 */
664 static void
665 remove_team_from_group(struct team* team)
666 {
667 	struct process_group* group = team->group;
668 	struct team* current;
669 	struct team* last = NULL;
670 
671 	// the team must be in any team to let this function have any effect
672 	if  (group == NULL)
673 		return;
674 
675 	for (current = group->teams; current != NULL;
676 			current = current->group_next) {
677 		if (current == team) {
678 			if (last == NULL)
679 				group->teams = current->group_next;
680 			else
681 				last->group_next = current->group_next;
682 
683 			team->group = NULL;
684 			break;
685 		}
686 		last = current;
687 	}
688 
689 	team->group = NULL;
690 	team->group_next = NULL;
691 
692 	release_process_group_ref(group->id);
693 }
694 
695 
696 static struct process_group*
697 create_process_group(pid_t id)
698 {
699 	struct process_group* group
700 		= (struct process_group*)malloc(sizeof(struct process_group));
701 	if (group == NULL)
702 		return NULL;
703 
704 	group->id = id;
705 	group->refs = 0;
706 	group->session = NULL;
707 	group->teams = NULL;
708 	group->orphaned = true;
709 	return group;
710 }
711 
712 
713 static struct process_session*
714 create_process_session(pid_t id)
715 {
716 	struct process_session* session
717 		= (struct process_session*)malloc(sizeof(struct process_session));
718 	if (session == NULL)
719 		return NULL;
720 
721 	session->id = id;
722 	session->group_count = 0;
723 	session->controlling_tty = -1;
724 	session->foreground_group = -1;
725 
726 	return session;
727 }
728 
729 
730 static void
731 set_team_name(struct team* team, const char* name)
732 {
733 	if (const char* lastSlash = strrchr(name, '/'))
734 		name = lastSlash + 1;
735 
736 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
737 }
738 
739 
740 static struct team*
741 create_team_struct(const char* name, bool kernel)
742 {
743 	struct team* team = (struct team*)malloc(sizeof(struct team));
744 	if (team == NULL)
745 		return NULL;
746 	MemoryDeleter teamDeleter(team);
747 
748 	team->next = team->siblings_next = team->children = team->parent = NULL;
749 	team->id = allocate_thread_id();
750 	set_team_name(team, name);
751 	team->args[0] = '\0';
752 	team->num_threads = 0;
753 	team->io_context = NULL;
754 	team->address_space = NULL;
755 	team->realtime_sem_context = NULL;
756 	team->xsi_sem_context = NULL;
757 	team->thread_list = NULL;
758 	team->main_thread = NULL;
759 	team->loading_info = NULL;
760 	team->state = TEAM_STATE_BIRTH;
761 	team->flags = 0;
762 	team->death_entry = NULL;
763 	team->user_data_area = -1;
764 	team->user_data = 0;
765 	team->used_user_data = 0;
766 	team->user_data_size = 0;
767 	team->free_user_threads = NULL;
768 
769 	team->supplementary_groups = NULL;
770 	team->supplementary_group_count = 0;
771 
772 	team->dead_threads_kernel_time = 0;
773 	team->dead_threads_user_time = 0;
774 
775 	// dead threads
776 	list_init(&team->dead_threads);
777 	team->dead_threads_count = 0;
778 
779 	// dead children
780 	team->dead_children = new(nothrow) team_dead_children;
781 	if (team->dead_children == NULL)
782 		return NULL;
783 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
784 
785 	team->dead_children->count = 0;
786 	team->dead_children->kernel_time = 0;
787 	team->dead_children->user_time = 0;
788 
789 	// stopped children
790 	team->stopped_children = new(nothrow) team_job_control_children;
791 	if (team->stopped_children == NULL)
792 		return NULL;
793 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
794 		team->stopped_children);
795 
796 	// continued children
797 	team->continued_children = new(nothrow) team_job_control_children;
798 	if (team->continued_children == NULL)
799 		return NULL;
800 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
801 		team->continued_children);
802 
803 	// job control entry
804 	team->job_control_entry = new(nothrow) job_control_entry;
805 	if (team->job_control_entry == NULL)
806 		return NULL;
807 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
808 		team->job_control_entry);
809 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
810 	team->job_control_entry->thread = team->id;
811 	team->job_control_entry->team = team;
812 
813 	list_init(&team->sem_list);
814 	list_init(&team->port_list);
815 	list_init(&team->image_list);
816 	list_init(&team->watcher_list);
817 
818 	clear_team_debug_info(&team->debug_info, true);
819 
820 	if (arch_team_init_team_struct(team, kernel) < 0)
821 		return NULL;
822 
823 	// publish dead/stopped/continued children condition vars
824 	team->dead_children->condition_variable.Init(team->dead_children,
825 		"team children");
826 
827 	// keep all allocated structures
828 	jobControlEntryDeleter.Detach();
829 	continuedChildrenDeleter.Detach();
830 	stoppedChildrenDeleter.Detach();
831 	deadChildrenDeleter.Detach();
832 	teamDeleter.Detach();
833 
834 	return team;
835 }
836 
837 
838 static void
839 delete_team_struct(struct team* team)
840 {
841 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
842 			&team->dead_threads)) {
843 		free(threadDeathEntry);
844 	}
845 
846 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
847 		delete entry;
848 
849 	while (free_user_thread* entry = team->free_user_threads) {
850 		team->free_user_threads = entry->next;
851 		free(entry);
852 	}
853 
854 	malloc_referenced_release(team->supplementary_groups);
855 
856 	delete team->job_control_entry;
857 		// usually already NULL and transferred to the parent
858 	delete team->continued_children;
859 	delete team->stopped_children;
860 	delete team->dead_children;
861 	free(team);
862 }
863 
864 
865 static status_t
866 create_team_user_data(struct team* team)
867 {
868 	void* address;
869 	size_t size = 4 * B_PAGE_SIZE;
870 	virtual_address_restrictions virtualRestrictions = {};
871 	virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
872 	virtualRestrictions.address_specification = B_BASE_ADDRESS;
873 	physical_address_restrictions physicalRestrictions = {};
874 	team->user_data_area = create_area_etc(team->id, "user area", size,
875 		B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
876 		&physicalRestrictions, &address);
877 	if (team->user_data_area < 0)
878 		return team->user_data_area;
879 
880 	team->user_data = (addr_t)address;
881 	team->used_user_data = 0;
882 	team->user_data_size = size;
883 	team->free_user_threads = NULL;
884 
885 	return B_OK;
886 }
887 
888 
889 static void
890 delete_team_user_data(struct team* team)
891 {
892 	if (team->user_data_area >= 0) {
893 		vm_delete_area(team->id, team->user_data_area, true);
894 		team->user_data = 0;
895 		team->used_user_data = 0;
896 		team->user_data_size = 0;
897 		team->user_data_area = -1;
898 		while (free_user_thread* entry = team->free_user_threads) {
899 			team->free_user_threads = entry->next;
900 			free(entry);
901 		}
902 	}
903 }
904 
905 
906 static status_t
907 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
908 	int32 argCount, int32 envCount, char**& _flatArgs)
909 {
910 	if (argCount < 0 || envCount < 0)
911 		return B_BAD_VALUE;
912 
913 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
914 		return B_TOO_MANY_ARGS;
915 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
916 		return B_BAD_VALUE;
917 
918 	if (!IS_USER_ADDRESS(userFlatArgs))
919 		return B_BAD_ADDRESS;
920 
921 	// allocate kernel memory
922 	char** flatArgs = (char**)malloc(flatArgsSize);
923 	if (flatArgs == NULL)
924 		return B_NO_MEMORY;
925 
926 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
927 		free(flatArgs);
928 		return B_BAD_ADDRESS;
929 	}
930 
931 	// check and relocate the array
932 	status_t error = B_OK;
933 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
934 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
935 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
936 		if (i == argCount || i == argCount + envCount + 1) {
937 			// check array null termination
938 			if (flatArgs[i] != NULL) {
939 				error = B_BAD_VALUE;
940 				break;
941 			}
942 		} else {
943 			// check string
944 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
945 			size_t maxLen = stringEnd - arg;
946 			if (arg < stringBase || arg >= stringEnd
947 					|| strnlen(arg, maxLen) == maxLen) {
948 				error = B_BAD_VALUE;
949 				break;
950 			}
951 
952 			flatArgs[i] = arg;
953 		}
954 	}
955 
956 	if (error == B_OK)
957 		_flatArgs = flatArgs;
958 	else
959 		free(flatArgs);
960 
961 	return error;
962 }
963 
964 
965 static void
966 free_team_arg(struct team_arg* teamArg)
967 {
968 	if (teamArg != NULL) {
969 		free(teamArg->flat_args);
970 		free(teamArg->path);
971 		free(teamArg);
972 	}
973 }
974 
975 
976 static status_t
977 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
978 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
979 	uint32 token)
980 {
981 	struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
982 	if (teamArg == NULL)
983 		return B_NO_MEMORY;
984 
985 	teamArg->path = strdup(path);
986 	if (teamArg->path == NULL) {
987 		free(teamArg);
988 		return B_NO_MEMORY;
989 	}
990 
991 	// copy the args over
992 
993 	teamArg->flat_args = flatArgs;
994 	teamArg->flat_args_size = flatArgsSize;
995 	teamArg->arg_count = argCount;
996 	teamArg->env_count = envCount;
997 	teamArg->error_port = port;
998 	teamArg->error_token = token;
999 
1000 	*_teamArg = teamArg;
1001 	return B_OK;
1002 }
1003 
1004 
1005 static int32
1006 team_create_thread_start(void* args)
1007 {
1008 	status_t err;
1009 	struct thread* thread;
1010 	struct team* team;
1011 	struct team_arg* teamArgs = (struct team_arg*)args;
1012 	const char* path;
1013 	addr_t entry;
1014 	char userStackName[128];
1015 	uint32 sizeLeft;
1016 	char** userArgs;
1017 	char** userEnv;
1018 	struct user_space_program_args* programArgs;
1019 	uint32 argCount, envCount, i;
1020 
1021 	thread = thread_get_current_thread();
1022 	team = thread->team;
1023 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1024 
1025 	TRACE(("team_create_thread_start: entry thread %ld\n", thread->id));
1026 
1027 	// get a user thread for the main thread
1028 	thread->user_thread = team_allocate_user_thread(team);
1029 
1030 	// create an initial primary stack area
1031 
1032 	// Main stack area layout is currently as follows (starting from 0):
1033 	//
1034 	// size								| usage
1035 	// ---------------------------------+--------------------------------
1036 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1037 	// TLS_SIZE							| TLS data
1038 	// sizeof(user_space_program_args)	| argument structure for the runtime
1039 	//									| loader
1040 	// flat arguments size				| flat process arguments and environment
1041 
1042 	// TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1043 	// the heap
1044 	// TODO: we could reserve the whole USER_STACK_REGION upfront...
1045 
1046 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1047 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1048 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1049 	thread->user_stack_base
1050 		= USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1051 	thread->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1052 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1053 		// the exact location at the end of the user stack area
1054 
1055 	sprintf(userStackName, "%s_main_stack", team->name);
1056 	virtual_address_restrictions virtualRestrictions = {};
1057 	virtualRestrictions.address = (void*)thread->user_stack_base;
1058 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1059 	physical_address_restrictions physicalRestrictions = {};
1060 	thread->user_stack_area = create_area_etc(team->id, userStackName, sizeLeft,
1061 		B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
1062 		&virtualRestrictions, &physicalRestrictions, NULL);
1063 	if (thread->user_stack_area < 0) {
1064 		dprintf("team_create_thread_start: could not create default user stack "
1065 			"region: %s\n", strerror(thread->user_stack_area));
1066 
1067 		free_team_arg(teamArgs);
1068 		return thread->user_stack_area;
1069 	}
1070 
1071 	// now that the TLS area is allocated, initialize TLS
1072 	arch_thread_init_tls(thread);
1073 
1074 	argCount = teamArgs->arg_count;
1075 	envCount = teamArgs->env_count;
1076 
1077 	programArgs = (struct user_space_program_args*)(thread->user_stack_base
1078 		+ thread->user_stack_size + TLS_SIZE);
1079 
1080 	userArgs = (char**)(programArgs + 1);
1081 	userEnv = userArgs + argCount + 1;
1082 	path = teamArgs->path;
1083 
1084 	if (user_strlcpy(programArgs->program_path, path,
1085 				sizeof(programArgs->program_path)) < B_OK
1086 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1087 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1088 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1089 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1090 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1091 				sizeof(port_id)) < B_OK
1092 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1093 				sizeof(uint32)) < B_OK
1094 		|| user_memcpy(userArgs, teamArgs->flat_args,
1095 				teamArgs->flat_args_size) < B_OK) {
1096 		// the team deletion process will clean this mess
1097 		return B_BAD_ADDRESS;
1098 	}
1099 
1100 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1101 
1102 	// add args to info member
1103 	team->args[0] = 0;
1104 	strlcpy(team->args, path, sizeof(team->args));
1105 	for (i = 1; i < argCount; i++) {
1106 		strlcat(team->args, " ", sizeof(team->args));
1107 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1108 	}
1109 
1110 	free_team_arg(teamArgs);
1111 		// the arguments are already on the user stack, we no longer need
1112 		// them in this form
1113 
1114 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1115 	// automatic variables with function scope will never be destroyed.
1116 	{
1117 		// find runtime_loader path
1118 		KPath runtimeLoaderPath;
1119 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1120 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1121 		if (err < B_OK) {
1122 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1123 				strerror(err)));
1124 			return err;
1125 		}
1126 		runtimeLoaderPath.UnlockBuffer();
1127 		err = runtimeLoaderPath.Append("runtime_loader");
1128 
1129 		if (err == B_OK) {
1130 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1131 				&entry);
1132 		}
1133 	}
1134 
1135 	if (err < B_OK) {
1136 		// Luckily, we don't have to clean up the mess we created - that's
1137 		// done for us by the normal team deletion process
1138 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1139 			"%s\n", strerror(err)));
1140 		return err;
1141 	}
1142 
1143 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1144 
1145 	team->state = TEAM_STATE_NORMAL;
1146 
1147 	// jump to the entry point in user space
1148 	return arch_thread_enter_userspace(thread, entry, programArgs, NULL);
1149 		// only returns in case of error
1150 }
1151 
1152 
1153 static thread_id
1154 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1155 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1156 	port_id errorPort, uint32 errorToken)
1157 {
1158 	char** flatArgs = _flatArgs;
1159 	struct team* team;
1160 	const char* threadName;
1161 	thread_id thread;
1162 	status_t status;
1163 	cpu_status state;
1164 	struct team_arg* teamArgs;
1165 	struct team_loading_info loadingInfo;
1166 	io_context* parentIOContext = NULL;
1167 
1168 	if (flatArgs == NULL || argCount == 0)
1169 		return B_BAD_VALUE;
1170 
1171 	const char* path = flatArgs[0];
1172 
1173 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1174 		path, flatArgs, argCount));
1175 
1176 	team = create_team_struct(path, false);
1177 	if (team == NULL)
1178 		return B_NO_MEMORY;
1179 
1180 	if (flags & B_WAIT_TILL_LOADED) {
1181 		loadingInfo.thread = thread_get_current_thread();
1182 		loadingInfo.result = B_ERROR;
1183 		loadingInfo.done = false;
1184 		team->loading_info = &loadingInfo;
1185 	}
1186 
1187  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1188 
1189 	// get the parent team
1190 	struct team* parent;
1191 
1192 	if (parentID == B_CURRENT_TEAM)
1193 		parent = thread_get_current_thread()->team;
1194 	else
1195 		parent = team_get_team_struct_locked(parentID);
1196 
1197 	if (parent == NULL) {
1198 		teamLocker.Unlock();
1199 		status = B_BAD_TEAM_ID;
1200 		goto err0;
1201 	}
1202 
1203 	// inherit the parent's user/group
1204 	inherit_parent_user_and_group_locked(team, parent);
1205 
1206 	hash_insert(sTeamHash, team);
1207 	insert_team_into_parent(parent, team);
1208 	insert_team_into_group(parent->group, team);
1209 	sUsedTeams++;
1210 
1211 	// get a reference to the parent's I/O context -- we need it to create ours
1212 	parentIOContext = parent->io_context;
1213 	vfs_get_io_context(parentIOContext);
1214 
1215 	teamLocker.Unlock();
1216 
1217 	// check the executable's set-user/group-id permission
1218 	update_set_id_user_and_group(team, path);
1219 
1220 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1221 		envCount, errorPort, errorToken);
1222 
1223 	if (status != B_OK)
1224 		goto err1;
1225 
1226 	_flatArgs = NULL;
1227 		// args are owned by the team_arg structure now
1228 
1229 	// create a new io_context for this team
1230 	team->io_context = vfs_new_io_context(parentIOContext, true);
1231 	if (!team->io_context) {
1232 		status = B_NO_MEMORY;
1233 		goto err2;
1234 	}
1235 
1236 	// We don't need the parent's I/O context any longer.
1237 	vfs_put_io_context(parentIOContext);
1238 	parentIOContext = NULL;
1239 
1240 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1241 	vfs_exec_io_context(team->io_context);
1242 
1243 	// create an address space for this team
1244 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1245 		&team->address_space);
1246 	if (status != B_OK)
1247 		goto err3;
1248 
1249 	// cut the path from the main thread name
1250 	threadName = strrchr(path, '/');
1251 	if (threadName != NULL)
1252 		threadName++;
1253 	else
1254 		threadName = path;
1255 
1256 	// create the user data area
1257 	status = create_team_user_data(team);
1258 	if (status != B_OK)
1259 		goto err4;
1260 
1261 	// notify team listeners
1262 	sNotificationService.Notify(TEAM_ADDED, team);
1263 
1264 	// Create a kernel thread, but under the context of the new team
1265 	// The new thread will take over ownership of teamArgs
1266 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1267 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1268 	if (thread < 0) {
1269 		status = thread;
1270 		goto err5;
1271 	}
1272 
1273 	// wait for the loader of the new team to finish its work
1274 	if ((flags & B_WAIT_TILL_LOADED) != 0) {
1275 		struct thread* mainThread;
1276 
1277 		state = disable_interrupts();
1278 		GRAB_THREAD_LOCK();
1279 
1280 		mainThread = thread_get_thread_struct_locked(thread);
1281 		if (mainThread) {
1282 			// resume the team's main thread
1283 			if (mainThread->state == B_THREAD_SUSPENDED)
1284 				scheduler_enqueue_in_run_queue(mainThread);
1285 
1286 			// Now suspend ourselves until loading is finished.
1287 			// We will be woken either by the thread, when it finished or
1288 			// aborted loading, or when the team is going to die (e.g. is
1289 			// killed). In either case the one setting `loadingInfo.done' is
1290 			// responsible for removing the info from the team structure.
1291 			while (!loadingInfo.done) {
1292 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1293 				scheduler_reschedule();
1294 			}
1295 		} else {
1296 			// Impressive! Someone managed to kill the thread in this short
1297 			// time.
1298 		}
1299 
1300 		RELEASE_THREAD_LOCK();
1301 		restore_interrupts(state);
1302 
1303 		if (loadingInfo.result < B_OK)
1304 			return loadingInfo.result;
1305 	}
1306 
1307 	// notify the debugger
1308 	user_debug_team_created(team->id);
1309 
1310 	return thread;
1311 
1312 err5:
1313 	sNotificationService.Notify(TEAM_REMOVED, team);
1314 	delete_team_user_data(team);
1315 err4:
1316 	team->address_space->Put();
1317 err3:
1318 	vfs_put_io_context(team->io_context);
1319 err2:
1320 	free_team_arg(teamArgs);
1321 err1:
1322 	if (parentIOContext != NULL)
1323 		vfs_put_io_context(parentIOContext);
1324 
1325 	// Remove the team structure from the team hash table and delete the team
1326 	// structure
1327 	state = disable_interrupts();
1328 	GRAB_TEAM_LOCK();
1329 
1330 	remove_team_from_group(team);
1331 	remove_team_from_parent(team->parent, team);
1332 	hash_remove(sTeamHash, team);
1333 
1334 	RELEASE_TEAM_LOCK();
1335 	restore_interrupts(state);
1336 
1337 err0:
1338 	delete_team_struct(team);
1339 
1340 	return status;
1341 }
1342 
1343 
1344 /*!	Almost shuts down the current team and loads a new image into it.
1345 	If successful, this function does not return and will takeover ownership of
1346 	the arguments provided.
1347 	This function may only be called from user space.
1348 */
1349 static status_t
1350 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1351 	int32 argCount, int32 envCount)
1352 {
1353 	// NOTE: Since this function normally doesn't return, don't use automatic
1354 	// variables that need destruction in the function scope.
1355 	char** flatArgs = _flatArgs;
1356 	struct team* team = thread_get_current_thread()->team;
1357 	struct team_arg* teamArgs;
1358 	const char* threadName;
1359 	status_t status = B_OK;
1360 	cpu_status state;
1361 	struct thread* thread;
1362 	thread_id nubThreadID = -1;
1363 
1364 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1365 		path, argCount, envCount, team->id));
1366 
1367 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1368 
1369 	// switching the kernel at run time is probably not a good idea :)
1370 	if (team == team_get_kernel_team())
1371 		return B_NOT_ALLOWED;
1372 
1373 	// we currently need to be single threaded here
1374 	// ToDo: maybe we should just kill all other threads and
1375 	//	make the current thread the team's main thread?
1376 	if (team->main_thread != thread_get_current_thread())
1377 		return B_NOT_ALLOWED;
1378 
1379 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1380 	// We iterate through the thread list to make sure that there's no other
1381 	// thread.
1382 	state = disable_interrupts();
1383 	GRAB_TEAM_LOCK();
1384 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1385 
1386 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1387 		nubThreadID = team->debug_info.nub_thread;
1388 
1389 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1390 
1391 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1392 		if (thread != team->main_thread && thread->id != nubThreadID) {
1393 			status = B_NOT_ALLOWED;
1394 			break;
1395 		}
1396 	}
1397 
1398 	RELEASE_TEAM_LOCK();
1399 	restore_interrupts(state);
1400 
1401 	if (status != B_OK)
1402 		return status;
1403 
1404 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1405 		envCount, -1, 0);
1406 
1407 	if (status != B_OK)
1408 		return status;
1409 
1410 	_flatArgs = NULL;
1411 		// args are owned by the team_arg structure now
1412 
1413 	// ToDo: remove team resources if there are any left
1414 	// thread_atkernel_exit() might not be called at all
1415 
1416 	thread_reset_for_exec();
1417 
1418 	user_debug_prepare_for_exec();
1419 
1420 	delete_team_user_data(team);
1421 	vm_delete_areas(team->address_space, false);
1422 	xsi_sem_undo(team);
1423 	delete_owned_ports(team);
1424 	sem_delete_owned_sems(team);
1425 	remove_images(team);
1426 	vfs_exec_io_context(team->io_context);
1427 	delete_realtime_sem_context(team->realtime_sem_context);
1428 	team->realtime_sem_context = NULL;
1429 
1430 	status = create_team_user_data(team);
1431 	if (status != B_OK) {
1432 		// creating the user data failed -- we're toast
1433 		// TODO: We should better keep the old user area in the first place.
1434 		exit_thread(status);
1435 		return status;
1436 	}
1437 
1438 	user_debug_finish_after_exec();
1439 
1440 	// rename the team
1441 
1442 	set_team_name(team, path);
1443 
1444 	// cut the path from the team name and rename the main thread, too
1445 	threadName = strrchr(path, '/');
1446 	if (threadName != NULL)
1447 		threadName++;
1448 	else
1449 		threadName = path;
1450 	rename_thread(thread_get_current_thread_id(), threadName);
1451 
1452 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1453 
1454 	// Update user/group according to the executable's set-user/group-id
1455 	// permission.
1456 	update_set_id_user_and_group(team, path);
1457 
1458 	user_debug_team_exec();
1459 
1460 	// notify team listeners
1461 	sNotificationService.Notify(TEAM_EXEC, team);
1462 
1463 	status = team_create_thread_start(teamArgs);
1464 		// this one usually doesn't return...
1465 
1466 	// sorry, we have to kill us, there is no way out anymore
1467 	// (without any areas left and all that)
1468 	exit_thread(status);
1469 
1470 	// we return a status here since the signal that is sent by the
1471 	// call above is not immediately handled
1472 	return B_ERROR;
1473 }
1474 
1475 
1476 /*! This is the first function to be called from the newly created
1477 	main child thread.
1478 	It will fill in everything what's left to do from fork_arg, and
1479 	return from the parent's fork() syscall to the child.
1480 */
1481 static int32
1482 fork_team_thread_start(void* _args)
1483 {
1484 	struct thread* thread = thread_get_current_thread();
1485 	struct fork_arg* forkArgs = (struct fork_arg*)_args;
1486 
1487 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1488 		// we need a local copy of the arch dependent part
1489 
1490 	thread->user_stack_area = forkArgs->user_stack_area;
1491 	thread->user_stack_base = forkArgs->user_stack_base;
1492 	thread->user_stack_size = forkArgs->user_stack_size;
1493 	thread->user_local_storage = forkArgs->user_local_storage;
1494 	thread->sig_block_mask = forkArgs->sig_block_mask;
1495 	thread->user_thread = forkArgs->user_thread;
1496 	memcpy(thread->sig_action, forkArgs->sig_action,
1497 		sizeof(forkArgs->sig_action));
1498 	thread->signal_stack_base = forkArgs->signal_stack_base;
1499 	thread->signal_stack_size = forkArgs->signal_stack_size;
1500 	thread->signal_stack_enabled = forkArgs->signal_stack_enabled;
1501 
1502 	arch_thread_init_tls(thread);
1503 
1504 	free(forkArgs);
1505 
1506 	// set frame of the parent thread to this one, too
1507 
1508 	arch_restore_fork_frame(&archArgs);
1509 		// This one won't return here
1510 
1511 	return 0;
1512 }
1513 
1514 
1515 static thread_id
1516 fork_team(void)
1517 {
1518 	struct thread* parentThread = thread_get_current_thread();
1519 	struct team* parentTeam = parentThread->team;
1520 	struct team* team;
1521 	struct fork_arg* forkArgs;
1522 	struct area_info info;
1523 	thread_id threadID;
1524 	status_t status;
1525 	int32 cookie;
1526 
1527 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1528 
1529 	if (parentTeam == team_get_kernel_team())
1530 		return B_NOT_ALLOWED;
1531 
1532 	// create a new team
1533 	// TODO: this is very similar to load_image_internal() - maybe we can do
1534 	// something about it :)
1535 
1536 	team = create_team_struct(parentTeam->name, false);
1537 	if (team == NULL)
1538 		return B_NO_MEMORY;
1539 
1540 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1541 
1542 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1543 
1544 	// Inherit the parent's user/group.
1545 	inherit_parent_user_and_group_locked(team, parentTeam);
1546 
1547 	hash_insert(sTeamHash, team);
1548 	insert_team_into_parent(parentTeam, team);
1549 	insert_team_into_group(parentTeam->group, team);
1550 	sUsedTeams++;
1551 
1552 	teamLocker.Unlock();
1553 
1554 	// inherit some team debug flags
1555 	team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
1556 		& B_TEAM_DEBUG_INHERITED_FLAGS;
1557 
1558 	forkArgs = (struct fork_arg*)malloc(sizeof(struct fork_arg));
1559 	if (forkArgs == NULL) {
1560 		status = B_NO_MEMORY;
1561 		goto err1;
1562 	}
1563 
1564 	// create a new io_context for this team
1565 	team->io_context = vfs_new_io_context(parentTeam->io_context, false);
1566 	if (!team->io_context) {
1567 		status = B_NO_MEMORY;
1568 		goto err2;
1569 	}
1570 
1571 	// duplicate the realtime sem context
1572 	if (parentTeam->realtime_sem_context) {
1573 		team->realtime_sem_context = clone_realtime_sem_context(
1574 			parentTeam->realtime_sem_context);
1575 		if (team->realtime_sem_context == NULL) {
1576 			status = B_NO_MEMORY;
1577 			goto err25;
1578 		}
1579 	}
1580 
1581 	// create an address space for this team
1582 	status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1583 		&team->address_space);
1584 	if (status < B_OK)
1585 		goto err3;
1586 
1587 	// copy all areas of the team
1588 	// TODO: should be able to handle stack areas differently (ie. don't have
1589 	// them copy-on-write)
1590 	// TODO: all stacks of other threads than the current one could be left out
1591 
1592 	forkArgs->user_thread = NULL;
1593 
1594 	cookie = 0;
1595 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1596 		if (info.area == parentTeam->user_data_area) {
1597 			// don't clone the user area; just create a new one
1598 			status = create_team_user_data(team);
1599 			if (status != B_OK)
1600 				break;
1601 
1602 			forkArgs->user_thread = team_allocate_user_thread(team);
1603 		} else {
1604 			void* address;
1605 			area_id area = vm_copy_area(team->address_space->ID(), info.name,
1606 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1607 			if (area < B_OK) {
1608 				status = area;
1609 				break;
1610 			}
1611 
1612 			if (info.area == parentThread->user_stack_area)
1613 				forkArgs->user_stack_area = area;
1614 		}
1615 	}
1616 
1617 	if (status < B_OK)
1618 		goto err4;
1619 
1620 	if (forkArgs->user_thread == NULL) {
1621 #if KDEBUG
1622 		panic("user data area not found, parent area is %ld",
1623 			parentTeam->user_data_area);
1624 #endif
1625 		status = B_ERROR;
1626 		goto err4;
1627 	}
1628 
1629 	forkArgs->user_stack_base = parentThread->user_stack_base;
1630 	forkArgs->user_stack_size = parentThread->user_stack_size;
1631 	forkArgs->user_local_storage = parentThread->user_local_storage;
1632 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1633 	memcpy(forkArgs->sig_action, parentThread->sig_action,
1634 		sizeof(forkArgs->sig_action));
1635 	forkArgs->signal_stack_base = parentThread->signal_stack_base;
1636 	forkArgs->signal_stack_size = parentThread->signal_stack_size;
1637 	forkArgs->signal_stack_enabled = parentThread->signal_stack_enabled;
1638 
1639 	arch_store_fork_frame(&forkArgs->arch_info);
1640 
1641 	// copy image list
1642 	image_info imageInfo;
1643 	cookie = 0;
1644 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1645 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1646 		if (image < 0)
1647 			goto err5;
1648 	}
1649 
1650 	// notify team listeners
1651 	sNotificationService.Notify(TEAM_ADDED, team);
1652 
1653 	// create a kernel thread under the context of the new team
1654 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1655 		parentThread->name, parentThread->priority, forkArgs,
1656 		team->id, team->id);
1657 	if (threadID < 0) {
1658 		status = threadID;
1659 		goto err5;
1660 	}
1661 
1662 	// notify the debugger
1663 	user_debug_team_created(team->id);
1664 
1665 	T(TeamForked(threadID));
1666 
1667 	resume_thread(threadID);
1668 	return threadID;
1669 
1670 err5:
1671 	sNotificationService.Notify(TEAM_REMOVED, team);
1672 	remove_images(team);
1673 err4:
1674 	team->address_space->RemoveAndPut();
1675 err3:
1676 	delete_realtime_sem_context(team->realtime_sem_context);
1677 err25:
1678 	vfs_put_io_context(team->io_context);
1679 err2:
1680 	free(forkArgs);
1681 err1:
1682 	// remove the team structure from the team hash table and delete the team
1683 	// structure
1684 	teamLocker.Lock();
1685 
1686 	remove_team_from_group(team);
1687 	remove_team_from_parent(parentTeam, team);
1688 	hash_remove(sTeamHash, team);
1689 
1690 	teamLocker.Unlock();
1691 
1692 	delete_team_struct(team);
1693 
1694 	return status;
1695 }
1696 
1697 
1698 /*!	Returns if the specified \a team has any children belonging to the
1699 	specified \a group.
1700 	Must be called with the team lock held.
1701 */
1702 static bool
1703 has_children_in_group(struct team* parent, pid_t groupID)
1704 {
1705 	struct team* team;
1706 
1707 	struct process_group* group = team_get_process_group_locked(
1708 		parent->group->session, groupID);
1709 	if (group == NULL)
1710 		return false;
1711 
1712 	for (team = group->teams; team; team = team->group_next) {
1713 		if (team->parent == parent)
1714 			return true;
1715 	}
1716 
1717 	return false;
1718 }
1719 
1720 
1721 static job_control_entry*
1722 get_job_control_entry(team_job_control_children* children, pid_t id)
1723 {
1724 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1725 		 job_control_entry* entry = it.Next();) {
1726 
1727 		if (id > 0) {
1728 			if (entry->thread == id)
1729 				return entry;
1730 		} else if (id == -1) {
1731 			return entry;
1732 		} else {
1733 			pid_t processGroup
1734 				= (entry->team ? entry->team->group_id : entry->group_id);
1735 			if (processGroup == -id)
1736 				return entry;
1737 		}
1738 	}
1739 
1740 	return NULL;
1741 }
1742 
1743 
1744 static job_control_entry*
1745 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1746 {
1747 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1748 
1749 	if (entry == NULL && (flags & WCONTINUED) != 0)
1750 		entry = get_job_control_entry(team->continued_children, id);
1751 
1752 	if (entry == NULL && (flags & WUNTRACED) != 0)
1753 		entry = get_job_control_entry(team->stopped_children, id);
1754 
1755 	return entry;
1756 }
1757 
1758 
1759 job_control_entry::job_control_entry()
1760 	:
1761 	has_group_ref(false)
1762 {
1763 }
1764 
1765 
1766 job_control_entry::~job_control_entry()
1767 {
1768 	if (has_group_ref) {
1769 		InterruptsSpinLocker locker(gTeamSpinlock);
1770 		release_process_group_ref(group_id);
1771 	}
1772 }
1773 
1774 
1775 /*!	Team and thread lock must be held.
1776 */
1777 void
1778 job_control_entry::InitDeadState()
1779 {
1780 	if (team != NULL) {
1781 		struct thread* thread = team->main_thread;
1782 		group_id = team->group_id;
1783 		this->thread = thread->id;
1784 		status = thread->exit.status;
1785 		reason = thread->exit.reason;
1786 		signal = thread->exit.signal;
1787 		team = NULL;
1788 		acquire_process_group_ref(group_id);
1789 		has_group_ref = true;
1790 	}
1791 }
1792 
1793 
1794 job_control_entry&
1795 job_control_entry::operator=(const job_control_entry& other)
1796 {
1797 	state = other.state;
1798 	thread = other.thread;
1799 	has_group_ref = false;
1800 	team = other.team;
1801 	group_id = other.group_id;
1802 	status = other.status;
1803 	reason = other.reason;
1804 	signal = other.signal;
1805 
1806 	return *this;
1807 }
1808 
1809 
1810 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1811 	comes to the reason why a thread has died than waitpid() can be.
1812 */
1813 static thread_id
1814 wait_for_child(pid_t child, uint32 flags, int32* _reason,
1815 	status_t* _returnCode)
1816 {
1817 	struct thread* thread = thread_get_current_thread();
1818 	struct team* team = thread->team;
1819 	struct job_control_entry foundEntry;
1820 	struct job_control_entry* freeDeathEntry = NULL;
1821 	status_t status = B_OK;
1822 
1823 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1824 
1825 	T(WaitForChild(child, flags));
1826 
1827 	if (child == 0) {
1828 		// wait for all children in the process group of the calling team
1829 		child = -team->group_id;
1830 	}
1831 
1832 	bool ignoreFoundEntries = false;
1833 	bool ignoreFoundEntriesChecked = false;
1834 
1835 	while (true) {
1836 		InterruptsSpinLocker locker(gTeamSpinlock);
1837 
1838 		// check whether any condition holds
1839 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1840 
1841 		// If we don't have an entry yet, check whether there are any children
1842 		// complying to the process group specification at all.
1843 		if (entry == NULL) {
1844 			// No success yet -- check whether there are any children we could
1845 			// wait for.
1846 			bool childrenExist = false;
1847 			if (child == -1) {
1848 				childrenExist = team->children != NULL;
1849 			} else if (child < -1) {
1850 				childrenExist = has_children_in_group(team, -child);
1851 			} else {
1852 				if (struct team* childTeam = team_get_team_struct_locked(child))
1853 					childrenExist = childTeam->parent == team;
1854 			}
1855 
1856 			if (!childrenExist) {
1857 				// there is no child we could wait for
1858 				status = ECHILD;
1859 			} else {
1860 				// the children we're waiting for are still running
1861 				status = B_WOULD_BLOCK;
1862 			}
1863 		} else {
1864 			// got something
1865 			foundEntry = *entry;
1866 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1867 				// The child is dead. Reap its death entry.
1868 				freeDeathEntry = entry;
1869 				team->dead_children->entries.Remove(entry);
1870 				team->dead_children->count--;
1871 			} else {
1872 				// The child is well. Reset its job control state.
1873 				team_set_job_control_state(entry->team,
1874 					JOB_CONTROL_STATE_NONE, 0, false);
1875 			}
1876 		}
1877 
1878 		// If we haven't got anything yet, prepare for waiting for the
1879 		// condition variable.
1880 		ConditionVariableEntry deadWaitEntry;
1881 
1882 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1883 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1884 
1885 		locker.Unlock();
1886 
1887 		// we got our entry and can return to our caller
1888 		if (status == B_OK) {
1889 			if (ignoreFoundEntries) {
1890 				// ... unless we shall ignore found entries
1891 				delete freeDeathEntry;
1892 				freeDeathEntry = NULL;
1893 				continue;
1894 			}
1895 
1896 			break;
1897 		}
1898 
1899 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1900 			T(WaitForChildDone(status));
1901 			return status;
1902 		}
1903 
1904 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1905 		if (status == B_INTERRUPTED) {
1906 			T(WaitForChildDone(status));
1907 			return status;
1908 		}
1909 
1910 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1911 		// all our children are dead and fail with ECHILD. We check the
1912 		// condition at this point.
1913 		if (!ignoreFoundEntriesChecked) {
1914 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1915 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1916 				|| handler.sa_handler == SIG_IGN) {
1917 				ignoreFoundEntries = true;
1918 			}
1919 
1920 			ignoreFoundEntriesChecked = true;
1921 		}
1922 	}
1923 
1924 	delete freeDeathEntry;
1925 
1926 	// when we got here, we have a valid death entry, and
1927 	// already got unregistered from the team or group
1928 	int reason = 0;
1929 	switch (foundEntry.state) {
1930 		case JOB_CONTROL_STATE_DEAD:
1931 			reason = foundEntry.reason;
1932 			break;
1933 		case JOB_CONTROL_STATE_STOPPED:
1934 			reason = THREAD_STOPPED;
1935 			break;
1936 		case JOB_CONTROL_STATE_CONTINUED:
1937 			reason = THREAD_CONTINUED;
1938 			break;
1939 		case JOB_CONTROL_STATE_NONE:
1940 			// can't happen
1941 			break;
1942 	}
1943 
1944 	*_returnCode = foundEntry.status;
1945 	*_reason = (foundEntry.signal << 16) | reason;
1946 
1947 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1948 	// status is available.
1949 	if (is_signal_blocked(SIGCHLD)) {
1950 		InterruptsSpinLocker locker(gTeamSpinlock);
1951 
1952 		if (get_job_control_entry(team, child, flags) == NULL)
1953 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1954 	}
1955 
1956 	// When the team is dead, the main thread continues to live in the kernel
1957 	// team for a very short time. To avoid surprises for the caller we rather
1958 	// wait until the thread is really gone.
1959 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1960 		wait_for_thread(foundEntry.thread, NULL);
1961 
1962 	T(WaitForChildDone(foundEntry));
1963 
1964 	return foundEntry.thread;
1965 }
1966 
1967 
1968 /*! Fills the team_info structure with information from the specified
1969 	team.
1970 	The team lock must be held when called.
1971 */
1972 static status_t
1973 fill_team_info(struct team* team, team_info* info, size_t size)
1974 {
1975 	if (size != sizeof(team_info))
1976 		return B_BAD_VALUE;
1977 
1978 	// ToDo: Set more informations for team_info
1979 	memset(info, 0, size);
1980 
1981 	info->team = team->id;
1982 	info->thread_count = team->num_threads;
1983 	info->image_count = count_images(team);
1984 	//info->area_count =
1985 	info->debugger_nub_thread = team->debug_info.nub_thread;
1986 	info->debugger_nub_port = team->debug_info.nub_port;
1987 	//info->uid =
1988 	//info->gid =
1989 
1990 	strlcpy(info->args, team->args, sizeof(info->args));
1991 	info->argc = 1;
1992 
1993 	return B_OK;
1994 }
1995 
1996 
1997 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1998 	Interrupts must be disabled and team lock be held.
1999 */
2000 static bool
2001 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
2002 {
2003 	// Orphaned Process Group: "A process group in which the parent of every
2004 	// member is either itself a member of the group or is not a member of the
2005 	// group's session." (Open Group Base Specs Issue 6)
2006 
2007 	// once orphaned, things won't change (exception: cf. setpgid())
2008 	if (group->orphaned)
2009 		return true;
2010 
2011 	struct team* team = group->teams;
2012 	while (team != NULL) {
2013 		struct team* parent = team->parent;
2014 		if (team->id != dyingProcess && parent != NULL
2015 			&& parent->id != dyingProcess
2016 			&& parent->group_id != group->id
2017 			&& parent->session_id == group->session->id) {
2018 			return false;
2019 		}
2020 
2021 		team = team->group_next;
2022 	}
2023 
2024 	group->orphaned = true;
2025 	return true;
2026 }
2027 
2028 
2029 /*!	Returns whether the process group contains stopped processes.
2030 	Interrupts must be disabled and team lock be held.
2031 */
2032 static bool
2033 process_group_has_stopped_processes(process_group* group)
2034 {
2035 	SpinLocker _(gThreadSpinlock);
2036 
2037 	struct team* team = group->teams;
2038 	while (team != NULL) {
2039 		if (team->main_thread->state == B_THREAD_SUSPENDED)
2040 			return true;
2041 
2042 		team = team->group_next;
2043 	}
2044 
2045 	return false;
2046 }
2047 
2048 
2049 //	#pragma mark - Private kernel API
2050 
2051 
2052 status_t
2053 team_init(kernel_args* args)
2054 {
2055 	struct process_session* session;
2056 	struct process_group* group;
2057 
2058 	// create the team hash table
2059 	sTeamHash = hash_init(16, offsetof(struct team, next),
2060 		&team_struct_compare, &team_struct_hash);
2061 
2062 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2063 		&process_group_compare, &process_group_hash);
2064 
2065 	// create initial session and process groups
2066 
2067 	session = create_process_session(1);
2068 	if (session == NULL)
2069 		panic("Could not create initial session.\n");
2070 
2071 	group = create_process_group(1);
2072 	if (group == NULL)
2073 		panic("Could not create initial process group.\n");
2074 
2075 	insert_group_into_session(session, group);
2076 
2077 	// create the kernel team
2078 	sKernelTeam = create_team_struct("kernel_team", true);
2079 	if (sKernelTeam == NULL)
2080 		panic("could not create kernel team!\n");
2081 	strcpy(sKernelTeam->args, sKernelTeam->name);
2082 	sKernelTeam->state = TEAM_STATE_NORMAL;
2083 
2084 	sKernelTeam->saved_set_uid = 0;
2085 	sKernelTeam->real_uid = 0;
2086 	sKernelTeam->effective_uid = 0;
2087 	sKernelTeam->saved_set_gid = 0;
2088 	sKernelTeam->real_gid = 0;
2089 	sKernelTeam->effective_gid = 0;
2090 	sKernelTeam->supplementary_groups = NULL;
2091 	sKernelTeam->supplementary_group_count = 0;
2092 
2093 	insert_team_into_group(group, sKernelTeam);
2094 
2095 	sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2096 	if (sKernelTeam->io_context == NULL)
2097 		panic("could not create io_context for kernel team!\n");
2098 
2099 	// stick it in the team hash
2100 	hash_insert(sTeamHash, sKernelTeam);
2101 
2102 	add_debugger_command_etc("team", &dump_team_info,
2103 		"Dump info about a particular team",
2104 		"[ <id> | <address> | <name> ]\n"
2105 		"Prints information about the specified team. If no argument is given\n"
2106 		"the current team is selected.\n"
2107 		"  <id>       - The ID of the team.\n"
2108 		"  <address>  - The address of the team structure.\n"
2109 		"  <name>     - The team's name.\n", 0);
2110 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2111 		"\n"
2112 		"Prints a list of all existing teams.\n", 0);
2113 
2114 	new(&sNotificationService) TeamNotificationService();
2115 
2116 	return B_OK;
2117 }
2118 
2119 
2120 int32
2121 team_max_teams(void)
2122 {
2123 	return sMaxTeams;
2124 }
2125 
2126 
2127 int32
2128 team_used_teams(void)
2129 {
2130 	return sUsedTeams;
2131 }
2132 
2133 
2134 /*!	Iterates through the list of teams. The team spinlock must be held.
2135 */
2136 struct team*
2137 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2138 {
2139 	struct hash_iterator iterator;
2140 	hash_open(sTeamHash, &iterator);
2141 
2142 	struct team* team;
2143 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2144 		if (callback(team, cookie))
2145 			break;
2146 	}
2147 
2148 	hash_close(sTeamHash, &iterator, false);
2149 
2150 	return team;
2151 }
2152 
2153 
2154 /*! Fills the provided death entry if it's in the team.
2155 	You need to have the team lock held when calling this function.
2156 */
2157 job_control_entry*
2158 team_get_death_entry(struct team* team, thread_id child, bool* _deleteEntry)
2159 {
2160 	if (child <= 0)
2161 		return NULL;
2162 
2163 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2164 		child);
2165 	if (entry) {
2166 		// remove the entry only, if the caller is the parent of the found team
2167 		if (team_get_current_team_id() == entry->thread) {
2168 			team->dead_children->entries.Remove(entry);
2169 			team->dead_children->count--;
2170 			*_deleteEntry = true;
2171 		} else {
2172 			*_deleteEntry = false;
2173 		}
2174 	}
2175 
2176 	return entry;
2177 }
2178 
2179 
2180 /*! Quick check to see if we have a valid team ID. */
2181 bool
2182 team_is_valid(team_id id)
2183 {
2184 	struct team* team;
2185 	cpu_status state;
2186 
2187 	if (id <= 0)
2188 		return false;
2189 
2190 	state = disable_interrupts();
2191 	GRAB_TEAM_LOCK();
2192 
2193 	team = team_get_team_struct_locked(id);
2194 
2195 	RELEASE_TEAM_LOCK();
2196 	restore_interrupts(state);
2197 
2198 	return team != NULL;
2199 }
2200 
2201 
2202 struct team*
2203 team_get_team_struct_locked(team_id id)
2204 {
2205 	struct team_key key;
2206 	key.id = id;
2207 
2208 	return (struct team*)hash_lookup(sTeamHash, &key);
2209 }
2210 
2211 
2212 /*! This searches the session of the team for the specified group ID.
2213 	You must hold the team lock when you call this function.
2214 */
2215 struct process_group*
2216 team_get_process_group_locked(struct process_session* session, pid_t id)
2217 {
2218 	struct process_group* group;
2219 	struct team_key key;
2220 	key.id = id;
2221 
2222 	group = (struct process_group*)hash_lookup(sGroupHash, &key);
2223 	if (group != NULL && (session == NULL || session == group->session))
2224 		return group;
2225 
2226 	return NULL;
2227 }
2228 
2229 
2230 void
2231 team_delete_process_group(struct process_group* group)
2232 {
2233 	if (group == NULL)
2234 		return;
2235 
2236 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2237 
2238 	// remove_group_from_session() keeps this pointer around
2239 	// only if the session can be freed as well
2240 	if (group->session) {
2241 		TRACE(("team_delete_process_group(): frees session %ld\n",
2242 			group->session->id));
2243 		free(group->session);
2244 	}
2245 
2246 	free(group);
2247 }
2248 
2249 
2250 void
2251 team_set_controlling_tty(int32 ttyIndex)
2252 {
2253 	struct team* team = thread_get_current_thread()->team;
2254 
2255 	InterruptsSpinLocker _(gTeamSpinlock);
2256 
2257 	team->group->session->controlling_tty = ttyIndex;
2258 	team->group->session->foreground_group = -1;
2259 }
2260 
2261 
2262 int32
2263 team_get_controlling_tty()
2264 {
2265 	struct team* team = thread_get_current_thread()->team;
2266 
2267 	InterruptsSpinLocker _(gTeamSpinlock);
2268 
2269 	return team->group->session->controlling_tty;
2270 }
2271 
2272 
2273 status_t
2274 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2275 {
2276 	struct thread* thread = thread_get_current_thread();
2277 	struct team* team = thread->team;
2278 
2279 	InterruptsSpinLocker locker(gTeamSpinlock);
2280 
2281 	process_session* session = team->group->session;
2282 
2283 	// must be the controlling tty of the calling process
2284 	if (session->controlling_tty != ttyIndex)
2285 		return ENOTTY;
2286 
2287 	// check process group -- must belong to our session
2288 	process_group* group = team_get_process_group_locked(session,
2289 		processGroupID);
2290 	if (group == NULL)
2291 		return B_BAD_VALUE;
2292 
2293 	// If we are a background group, we can't do that unharmed, only if we
2294 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2295 	if (session->foreground_group != -1
2296 		&& session->foreground_group != team->group_id
2297 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2298 		&& !is_signal_blocked(SIGTTOU)) {
2299 		pid_t groupID = team->group->id;
2300 		locker.Unlock();
2301 		send_signal(-groupID, SIGTTOU);
2302 		return B_INTERRUPTED;
2303 	}
2304 
2305 	team->group->session->foreground_group = processGroupID;
2306 
2307 	return B_OK;
2308 }
2309 
2310 
2311 /*!	Removes the specified team from the global team hash, and from its parent.
2312 	It also moves all of its children up to the parent.
2313 	You must hold the team lock when you call this function.
2314 */
2315 void
2316 team_remove_team(struct team* team)
2317 {
2318 	struct team* parent = team->parent;
2319 
2320 	// remember how long this team lasted
2321 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2322 		+ team->dead_children->kernel_time;
2323 	parent->dead_children->user_time += team->dead_threads_user_time
2324 		+ team->dead_children->user_time;
2325 
2326 	// Also grab the thread spinlock while removing the team from the hash.
2327 	// This makes the following sequence safe: grab teams lock, lookup team,
2328 	// grab threads lock, unlock teams lock,
2329 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2330 	// lock another team's IO context.
2331 	GRAB_THREAD_LOCK();
2332 	hash_remove(sTeamHash, team);
2333 	RELEASE_THREAD_LOCK();
2334 	sUsedTeams--;
2335 
2336 	team->state = TEAM_STATE_DEATH;
2337 
2338 	// If we're a controlling process (i.e. a session leader with controlling
2339 	// terminal), there's a bit of signalling we have to do.
2340 	if (team->session_id == team->id
2341 		&& team->group->session->controlling_tty >= 0) {
2342 		process_session* session = team->group->session;
2343 
2344 		session->controlling_tty = -1;
2345 
2346 		// send SIGHUP to the foreground
2347 		if (session->foreground_group >= 0) {
2348 			send_signal_etc(-session->foreground_group, SIGHUP,
2349 				SIGNAL_FLAG_TEAMS_LOCKED);
2350 		}
2351 
2352 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2353 		// stopped processes
2354 		struct team* child = team->children;
2355 		while (child != NULL) {
2356 			process_group* childGroup = child->group;
2357 			if (!childGroup->orphaned
2358 				&& update_orphaned_process_group(childGroup, team->id)
2359 				&& process_group_has_stopped_processes(childGroup)) {
2360 				send_signal_etc(-childGroup->id, SIGHUP,
2361 					SIGNAL_FLAG_TEAMS_LOCKED);
2362 				send_signal_etc(-childGroup->id, SIGCONT,
2363 					SIGNAL_FLAG_TEAMS_LOCKED);
2364 			}
2365 
2366 			child = child->siblings_next;
2367 		}
2368 	} else {
2369 		// update "orphaned" flags of all children's process groups
2370 		struct team* child = team->children;
2371 		while (child != NULL) {
2372 			process_group* childGroup = child->group;
2373 			if (!childGroup->orphaned)
2374 				update_orphaned_process_group(childGroup, team->id);
2375 
2376 			child = child->siblings_next;
2377 		}
2378 
2379 		// update "orphaned" flag of this team's process group
2380 		update_orphaned_process_group(team->group, team->id);
2381 	}
2382 
2383 	// reparent each of the team's children
2384 	reparent_children(team);
2385 
2386 	// remove us from our process group
2387 	remove_team_from_group(team);
2388 
2389 	// remove us from our parent
2390 	remove_team_from_parent(parent, team);
2391 }
2392 
2393 
2394 /*!	Kills all threads but the main thread of the team.
2395 	To be called on exit of the team's main thread. The teams spinlock must be
2396 	held. The function may temporarily drop the spinlock, but will reacquire it
2397 	before it returns.
2398 	\param team The team in question.
2399 	\param state The CPU state as returned by disable_interrupts(). Will be
2400 		adjusted, if the function needs to unlock and relock.
2401 	\return The port of the debugger for the team, -1 if none. To be passed to
2402 		team_delete_team().
2403 */
2404 port_id
2405 team_shutdown_team(struct team* team, cpu_status& state)
2406 {
2407 	ASSERT(thread_get_current_thread() == team->main_thread);
2408 
2409 	// Make sure debugging changes won't happen anymore.
2410 	port_id debuggerPort = -1;
2411 	while (true) {
2412 		// If a debugger change is in progress for the team, we'll have to
2413 		// wait until it is done.
2414 		ConditionVariableEntry waitForDebuggerEntry;
2415 		bool waitForDebugger = false;
2416 
2417 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2418 
2419 		if (team->debug_info.debugger_changed_condition != NULL) {
2420 			team->debug_info.debugger_changed_condition->Add(
2421 				&waitForDebuggerEntry);
2422 			waitForDebugger = true;
2423 		} else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2424 			// The team is being debugged. That will stop with the termination
2425 			// of the nub thread. Since we won't let go of the team lock, unless
2426 			// we set team::death_entry or until we have removed the tem from
2427 			// the team hash, no-one can install a debugger anymore. We fetch
2428 			// the debugger's port to send it a message at the bitter end.
2429 			debuggerPort = team->debug_info.debugger_port;
2430 		}
2431 
2432 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2433 
2434 		if (!waitForDebugger)
2435 			break;
2436 
2437 		// wait for the debugger change to be finished
2438 		RELEASE_TEAM_LOCK();
2439 		restore_interrupts(state);
2440 
2441 		waitForDebuggerEntry.Wait();
2442 
2443 		state = disable_interrupts();
2444 		GRAB_TEAM_LOCK();
2445 	}
2446 
2447 	// kill all threads but the main thread
2448 	team_death_entry deathEntry;
2449 	deathEntry.condition.Init(team, "team death");
2450 
2451 	while (true) {
2452 		team->death_entry = &deathEntry;
2453 		deathEntry.remaining_threads = 0;
2454 
2455 		struct thread* thread = team->thread_list;
2456 		while (thread != NULL) {
2457 			if (thread != team->main_thread) {
2458 				send_signal_etc(thread->id, SIGKILLTHR,
2459 					B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED);
2460 				deathEntry.remaining_threads++;
2461 			}
2462 
2463 			thread = thread->team_next;
2464 		}
2465 
2466 		if (deathEntry.remaining_threads == 0)
2467 			break;
2468 
2469 		// there are threads to wait for
2470 		ConditionVariableEntry entry;
2471 		deathEntry.condition.Add(&entry);
2472 
2473 		RELEASE_TEAM_LOCK();
2474 		restore_interrupts(state);
2475 
2476 		entry.Wait();
2477 
2478 		state = disable_interrupts();
2479 		GRAB_TEAM_LOCK();
2480 	}
2481 
2482 	team->death_entry = NULL;
2483 		// That makes the team "undead" again, but we have the teams spinlock
2484 		// and our caller won't drop it until after removing the team from the
2485 		// teams hash table.
2486 
2487 	return debuggerPort;
2488 }
2489 
2490 
2491 void
2492 team_delete_team(struct team* team, port_id debuggerPort)
2493 {
2494 	team_id teamID = team->id;
2495 
2496 	ASSERT(team->num_threads == 0);
2497 
2498 	// If someone is waiting for this team to be loaded, but it dies
2499 	// unexpectedly before being done, we need to notify the waiting
2500 	// thread now.
2501 
2502 	cpu_status state = disable_interrupts();
2503 	GRAB_TEAM_LOCK();
2504 
2505 	if (team->loading_info) {
2506 		// there's indeed someone waiting
2507 		struct team_loading_info* loadingInfo = team->loading_info;
2508 		team->loading_info = NULL;
2509 
2510 		loadingInfo->result = B_ERROR;
2511 		loadingInfo->done = true;
2512 
2513 		GRAB_THREAD_LOCK();
2514 
2515 		// wake up the waiting thread
2516 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2517 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2518 
2519 		RELEASE_THREAD_LOCK();
2520 	}
2521 
2522 	RELEASE_TEAM_LOCK();
2523 	restore_interrupts(state);
2524 
2525 	// notify team watchers
2526 
2527 	{
2528 		// we're not reachable from anyone anymore at this point, so we
2529 		// can safely access the list without any locking
2530 		struct team_watcher* watcher;
2531 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2532 				&team->watcher_list)) != NULL) {
2533 			watcher->hook(teamID, watcher->data);
2534 			free(watcher);
2535 		}
2536 	}
2537 
2538 	sNotificationService.Notify(TEAM_REMOVED, team);
2539 
2540 	// free team resources
2541 
2542 	vfs_put_io_context(team->io_context);
2543 	delete_realtime_sem_context(team->realtime_sem_context);
2544 	xsi_sem_undo(team);
2545 	delete_owned_ports(team);
2546 	sem_delete_owned_sems(team);
2547 	remove_images(team);
2548 	team->address_space->RemoveAndPut();
2549 
2550 	delete_team_struct(team);
2551 
2552 	// notify the debugger, that the team is gone
2553 	user_debug_team_deleted(teamID, debuggerPort);
2554 }
2555 
2556 
2557 struct team*
2558 team_get_kernel_team(void)
2559 {
2560 	return sKernelTeam;
2561 }
2562 
2563 
2564 team_id
2565 team_get_kernel_team_id(void)
2566 {
2567 	if (!sKernelTeam)
2568 		return 0;
2569 
2570 	return sKernelTeam->id;
2571 }
2572 
2573 
2574 team_id
2575 team_get_current_team_id(void)
2576 {
2577 	return thread_get_current_thread()->team->id;
2578 }
2579 
2580 
2581 status_t
2582 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
2583 {
2584 	cpu_status state;
2585 	struct team* team;
2586 	status_t status;
2587 
2588 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2589 	if (id == 1) {
2590 		// we're the kernel team, so we don't have to go through all
2591 		// the hassle (locking and hash lookup)
2592 		*_addressSpace = VMAddressSpace::GetKernel();
2593 		return B_OK;
2594 	}
2595 
2596 	state = disable_interrupts();
2597 	GRAB_TEAM_LOCK();
2598 
2599 	team = team_get_team_struct_locked(id);
2600 	if (team != NULL) {
2601 		team->address_space->Get();
2602 		*_addressSpace = team->address_space;
2603 		status = B_OK;
2604 	} else
2605 		status = B_BAD_VALUE;
2606 
2607 	RELEASE_TEAM_LOCK();
2608 	restore_interrupts(state);
2609 
2610 	return status;
2611 }
2612 
2613 
2614 /*!	Sets the team's job control state.
2615 	Interrupts must be disabled and the team lock be held.
2616 	\a threadsLocked indicates whether the thread lock is being held, too.
2617 */
2618 void
2619 team_set_job_control_state(struct team* team, job_control_state newState,
2620 	int signal, bool threadsLocked)
2621 {
2622 	if (team == NULL || team->job_control_entry == NULL)
2623 		return;
2624 
2625 	// don't touch anything, if the state stays the same or the team is already
2626 	// dead
2627 	job_control_entry* entry = team->job_control_entry;
2628 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2629 		return;
2630 
2631 	T(SetJobControlState(team->id, newState, signal));
2632 
2633 	// remove from the old list
2634 	switch (entry->state) {
2635 		case JOB_CONTROL_STATE_NONE:
2636 			// entry is in no list ATM
2637 			break;
2638 		case JOB_CONTROL_STATE_DEAD:
2639 			// can't get here
2640 			break;
2641 		case JOB_CONTROL_STATE_STOPPED:
2642 			team->parent->stopped_children->entries.Remove(entry);
2643 			break;
2644 		case JOB_CONTROL_STATE_CONTINUED:
2645 			team->parent->continued_children->entries.Remove(entry);
2646 			break;
2647 	}
2648 
2649 	entry->state = newState;
2650 	entry->signal = signal;
2651 
2652 	// add to new list
2653 	team_job_control_children* childList = NULL;
2654 	switch (entry->state) {
2655 		case JOB_CONTROL_STATE_NONE:
2656 			// entry doesn't get into any list
2657 			break;
2658 		case JOB_CONTROL_STATE_DEAD:
2659 			childList = team->parent->dead_children;
2660 			team->parent->dead_children->count++;
2661 			break;
2662 		case JOB_CONTROL_STATE_STOPPED:
2663 			childList = team->parent->stopped_children;
2664 			break;
2665 		case JOB_CONTROL_STATE_CONTINUED:
2666 			childList = team->parent->continued_children;
2667 			break;
2668 	}
2669 
2670 	if (childList != NULL) {
2671 		childList->entries.Add(entry);
2672 		team->parent->dead_children->condition_variable.NotifyAll(
2673 			threadsLocked);
2674 	}
2675 }
2676 
2677 
2678 /*! Adds a hook to the team that is called as soon as this
2679 	team goes away.
2680 	This call might get public in the future.
2681 */
2682 status_t
2683 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2684 {
2685 	struct team_watcher* watcher;
2686 	struct team* team;
2687 	cpu_status state;
2688 
2689 	if (hook == NULL || teamID < B_OK)
2690 		return B_BAD_VALUE;
2691 
2692 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2693 	if (watcher == NULL)
2694 		return B_NO_MEMORY;
2695 
2696 	watcher->hook = hook;
2697 	watcher->data = data;
2698 
2699 	// find team and add watcher
2700 
2701 	state = disable_interrupts();
2702 	GRAB_TEAM_LOCK();
2703 
2704 	team = team_get_team_struct_locked(teamID);
2705 	if (team != NULL)
2706 		list_add_item(&team->watcher_list, watcher);
2707 
2708 	RELEASE_TEAM_LOCK();
2709 	restore_interrupts(state);
2710 
2711 	if (team == NULL) {
2712 		free(watcher);
2713 		return B_BAD_TEAM_ID;
2714 	}
2715 
2716 	return B_OK;
2717 }
2718 
2719 
2720 status_t
2721 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
2722 {
2723 	struct team_watcher* watcher = NULL;
2724 	struct team* team;
2725 	cpu_status state;
2726 
2727 	if (hook == NULL || teamID < B_OK)
2728 		return B_BAD_VALUE;
2729 
2730 	// find team and remove watcher (if present)
2731 
2732 	state = disable_interrupts();
2733 	GRAB_TEAM_LOCK();
2734 
2735 	team = team_get_team_struct_locked(teamID);
2736 	if (team != NULL) {
2737 		// search for watcher
2738 		while ((watcher = (struct team_watcher*)list_get_next_item(
2739 				&team->watcher_list, watcher)) != NULL) {
2740 			if (watcher->hook == hook && watcher->data == data) {
2741 				// got it!
2742 				list_remove_item(&team->watcher_list, watcher);
2743 				break;
2744 			}
2745 		}
2746 	}
2747 
2748 	RELEASE_TEAM_LOCK();
2749 	restore_interrupts(state);
2750 
2751 	if (watcher == NULL)
2752 		return B_ENTRY_NOT_FOUND;
2753 
2754 	free(watcher);
2755 	return B_OK;
2756 }
2757 
2758 
2759 /*!	The team lock must be held or the team must still be single threaded.
2760 */
2761 struct user_thread*
2762 team_allocate_user_thread(struct team* team)
2763 {
2764 	if (team->user_data == 0)
2765 		return NULL;
2766 
2767 	user_thread* thread = NULL;
2768 
2769 	// take an entry from the free list, if any
2770 	if (struct free_user_thread* entry = team->free_user_threads) {
2771 		thread = entry->thread;
2772 		team->free_user_threads = entry->next;
2773 		deferred_free(entry);
2774 		return thread;
2775 	} else {
2776 		// enough space left?
2777 		size_t needed = _ALIGN(sizeof(user_thread));
2778 		if (team->user_data_size - team->used_user_data < needed)
2779 			return NULL;
2780 		// TODO: This imposes a per team thread limit! We should resize the
2781 		// area, if necessary. That's problematic at this point, though, since
2782 		// we've got the team lock.
2783 
2784 		thread = (user_thread*)(team->user_data + team->used_user_data);
2785 		team->used_user_data += needed;
2786 	}
2787 
2788 	thread->defer_signals = 0;
2789 	thread->pending_signals = 0;
2790 	thread->wait_status = B_OK;
2791 
2792 	return thread;
2793 }
2794 
2795 
2796 /*!	The team lock must not be held. \a thread must be the current thread.
2797 */
2798 void
2799 team_free_user_thread(struct thread* thread)
2800 {
2801 	user_thread* userThread = thread->user_thread;
2802 	if (userThread == NULL)
2803 		return;
2804 
2805 	// create a free list entry
2806 	free_user_thread* entry
2807 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2808 	if (entry == NULL) {
2809 		// we have to leak the user thread :-/
2810 		return;
2811 	}
2812 
2813 	InterruptsSpinLocker _(gTeamSpinlock);
2814 
2815 	// detach from thread
2816 	SpinLocker threadLocker(gThreadSpinlock);
2817 	thread->user_thread = NULL;
2818 	threadLocker.Unlock();
2819 
2820 	entry->thread = userThread;
2821 	entry->next = thread->team->free_user_threads;
2822 	thread->team->free_user_threads = entry;
2823 }
2824 
2825 
2826 //	#pragma mark - Public kernel API
2827 
2828 
2829 thread_id
2830 load_image(int32 argCount, const char** args, const char** env)
2831 {
2832 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2833 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2834 }
2835 
2836 
2837 thread_id
2838 load_image_etc(int32 argCount, const char* const* args,
2839 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2840 {
2841 	// we need to flatten the args and environment
2842 
2843 	if (args == NULL)
2844 		return B_BAD_VALUE;
2845 
2846 	// determine total needed size
2847 	int32 argSize = 0;
2848 	for (int32 i = 0; i < argCount; i++)
2849 		argSize += strlen(args[i]) + 1;
2850 
2851 	int32 envCount = 0;
2852 	int32 envSize = 0;
2853 	while (env != NULL && env[envCount] != NULL)
2854 		envSize += strlen(env[envCount++]) + 1;
2855 
2856 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2857 	if (size > MAX_PROCESS_ARGS_SIZE)
2858 		return B_TOO_MANY_ARGS;
2859 
2860 	// allocate space
2861 	char** flatArgs = (char**)malloc(size);
2862 	if (flatArgs == NULL)
2863 		return B_NO_MEMORY;
2864 
2865 	char** slot = flatArgs;
2866 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2867 
2868 	// copy arguments and environment
2869 	for (int32 i = 0; i < argCount; i++) {
2870 		int32 argSize = strlen(args[i]) + 1;
2871 		memcpy(stringSpace, args[i], argSize);
2872 		*slot++ = stringSpace;
2873 		stringSpace += argSize;
2874 	}
2875 
2876 	*slot++ = NULL;
2877 
2878 	for (int32 i = 0; i < envCount; i++) {
2879 		int32 envSize = strlen(env[i]) + 1;
2880 		memcpy(stringSpace, env[i], envSize);
2881 		*slot++ = stringSpace;
2882 		stringSpace += envSize;
2883 	}
2884 
2885 	*slot++ = NULL;
2886 
2887 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2888 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2889 
2890 	free(flatArgs);
2891 		// load_image_internal() unset our variable if it took over ownership
2892 
2893 	return thread;
2894 }
2895 
2896 
2897 status_t
2898 wait_for_team(team_id id, status_t* _returnCode)
2899 {
2900 	struct team* team;
2901 	thread_id thread;
2902 	cpu_status state;
2903 
2904 	// find main thread and wait for that
2905 
2906 	state = disable_interrupts();
2907 	GRAB_TEAM_LOCK();
2908 
2909 	team = team_get_team_struct_locked(id);
2910 	if (team != NULL && team->main_thread != NULL)
2911 		thread = team->main_thread->id;
2912 	else
2913 		thread = B_BAD_THREAD_ID;
2914 
2915 	RELEASE_TEAM_LOCK();
2916 	restore_interrupts(state);
2917 
2918 	if (thread < 0)
2919 		return thread;
2920 
2921 	return wait_for_thread(thread, _returnCode);
2922 }
2923 
2924 
2925 status_t
2926 kill_team(team_id id)
2927 {
2928 	status_t status = B_OK;
2929 	thread_id threadID = -1;
2930 	struct team* team;
2931 	cpu_status state;
2932 
2933 	state = disable_interrupts();
2934 	GRAB_TEAM_LOCK();
2935 
2936 	team = team_get_team_struct_locked(id);
2937 	if (team != NULL) {
2938 		if (team != sKernelTeam) {
2939 			threadID = team->id;
2940 				// the team ID is the same as the ID of its main thread
2941 		} else
2942 			status = B_NOT_ALLOWED;
2943 	} else
2944 		status = B_BAD_THREAD_ID;
2945 
2946 	RELEASE_TEAM_LOCK();
2947 	restore_interrupts(state);
2948 
2949 	if (status < B_OK)
2950 		return status;
2951 
2952 	// just kill the main thread in the team. The cleanup code there will
2953 	// take care of the team
2954 	return kill_thread(threadID);
2955 }
2956 
2957 
2958 status_t
2959 _get_team_info(team_id id, team_info* info, size_t size)
2960 {
2961 	cpu_status state;
2962 	status_t status = B_OK;
2963 	struct team* team;
2964 
2965 	state = disable_interrupts();
2966 	GRAB_TEAM_LOCK();
2967 
2968 	if (id == B_CURRENT_TEAM)
2969 		team = thread_get_current_thread()->team;
2970 	else
2971 		team = team_get_team_struct_locked(id);
2972 
2973 	if (team == NULL) {
2974 		status = B_BAD_TEAM_ID;
2975 		goto err;
2976 	}
2977 
2978 	status = fill_team_info(team, info, size);
2979 
2980 err:
2981 	RELEASE_TEAM_LOCK();
2982 	restore_interrupts(state);
2983 
2984 	return status;
2985 }
2986 
2987 
2988 status_t
2989 _get_next_team_info(int32* cookie, team_info* info, size_t size)
2990 {
2991 	status_t status = B_BAD_TEAM_ID;
2992 	struct team* team = NULL;
2993 	int32 slot = *cookie;
2994 	team_id lastTeamID;
2995 	cpu_status state;
2996 
2997 	if (slot < 1)
2998 		slot = 1;
2999 
3000 	state = disable_interrupts();
3001 	GRAB_TEAM_LOCK();
3002 
3003 	lastTeamID = peek_next_thread_id();
3004 	if (slot >= lastTeamID)
3005 		goto err;
3006 
3007 	// get next valid team
3008 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3009 		slot++;
3010 
3011 	if (team) {
3012 		status = fill_team_info(team, info, size);
3013 		*cookie = ++slot;
3014 	}
3015 
3016 err:
3017 	RELEASE_TEAM_LOCK();
3018 	restore_interrupts(state);
3019 
3020 	return status;
3021 }
3022 
3023 
3024 status_t
3025 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3026 {
3027 	bigtime_t kernelTime = 0, userTime = 0;
3028 	status_t status = B_OK;
3029 	struct team* team;
3030 	cpu_status state;
3031 
3032 	if (size != sizeof(team_usage_info)
3033 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
3034 		return B_BAD_VALUE;
3035 
3036 	state = disable_interrupts();
3037 	GRAB_TEAM_LOCK();
3038 
3039 	if (id == B_CURRENT_TEAM)
3040 		team = thread_get_current_thread()->team;
3041 	else
3042 		team = team_get_team_struct_locked(id);
3043 
3044 	if (team == NULL) {
3045 		status = B_BAD_TEAM_ID;
3046 		goto out;
3047 	}
3048 
3049 	switch (who) {
3050 		case B_TEAM_USAGE_SELF:
3051 		{
3052 			struct thread* thread = team->thread_list;
3053 
3054 			for (; thread != NULL; thread = thread->team_next) {
3055 				kernelTime += thread->kernel_time;
3056 				userTime += thread->user_time;
3057 			}
3058 
3059 			kernelTime += team->dead_threads_kernel_time;
3060 			userTime += team->dead_threads_user_time;
3061 			break;
3062 		}
3063 
3064 		case B_TEAM_USAGE_CHILDREN:
3065 		{
3066 			struct team* child = team->children;
3067 			for (; child != NULL; child = child->siblings_next) {
3068 				struct thread* thread = team->thread_list;
3069 
3070 				for (; thread != NULL; thread = thread->team_next) {
3071 					kernelTime += thread->kernel_time;
3072 					userTime += thread->user_time;
3073 				}
3074 
3075 				kernelTime += child->dead_threads_kernel_time;
3076 				userTime += child->dead_threads_user_time;
3077 			}
3078 
3079 			kernelTime += team->dead_children->kernel_time;
3080 			userTime += team->dead_children->user_time;
3081 			break;
3082 		}
3083 	}
3084 
3085 out:
3086 	RELEASE_TEAM_LOCK();
3087 	restore_interrupts(state);
3088 
3089 	if (status == B_OK) {
3090 		info->kernel_time = kernelTime;
3091 		info->user_time = userTime;
3092 	}
3093 
3094 	return status;
3095 }
3096 
3097 
3098 pid_t
3099 getpid(void)
3100 {
3101 	return thread_get_current_thread()->team->id;
3102 }
3103 
3104 
3105 pid_t
3106 getppid(void)
3107 {
3108 	struct team* team = thread_get_current_thread()->team;
3109 	cpu_status state;
3110 	pid_t parent;
3111 
3112 	state = disable_interrupts();
3113 	GRAB_TEAM_LOCK();
3114 
3115 	parent = team->parent->id;
3116 
3117 	RELEASE_TEAM_LOCK();
3118 	restore_interrupts(state);
3119 
3120 	return parent;
3121 }
3122 
3123 
3124 pid_t
3125 getpgid(pid_t process)
3126 {
3127 	struct thread* thread;
3128 	pid_t result = -1;
3129 	cpu_status state;
3130 
3131 	if (process == 0)
3132 		process = thread_get_current_thread()->team->id;
3133 
3134 	state = disable_interrupts();
3135 	GRAB_THREAD_LOCK();
3136 
3137 	thread = thread_get_thread_struct_locked(process);
3138 	if (thread != NULL)
3139 		result = thread->team->group_id;
3140 
3141 	RELEASE_THREAD_LOCK();
3142 	restore_interrupts(state);
3143 
3144 	return thread != NULL ? result : B_BAD_VALUE;
3145 }
3146 
3147 
3148 pid_t
3149 getsid(pid_t process)
3150 {
3151 	struct thread* thread;
3152 	pid_t result = -1;
3153 	cpu_status state;
3154 
3155 	if (process == 0)
3156 		process = thread_get_current_thread()->team->id;
3157 
3158 	state = disable_interrupts();
3159 	GRAB_THREAD_LOCK();
3160 
3161 	thread = thread_get_thread_struct_locked(process);
3162 	if (thread != NULL)
3163 		result = thread->team->session_id;
3164 
3165 	RELEASE_THREAD_LOCK();
3166 	restore_interrupts(state);
3167 
3168 	return thread != NULL ? result : B_BAD_VALUE;
3169 }
3170 
3171 
3172 //	#pragma mark - User syscalls
3173 
3174 
3175 status_t
3176 _user_exec(const char* userPath, const char* const* userFlatArgs,
3177 	size_t flatArgsSize, int32 argCount, int32 envCount)
3178 {
3179 	// NOTE: Since this function normally doesn't return, don't use automatic
3180 	// variables that need destruction in the function scope.
3181 	char path[B_PATH_NAME_LENGTH];
3182 
3183 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3184 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3185 		return B_BAD_ADDRESS;
3186 
3187 	// copy and relocate the flat arguments
3188 	char** flatArgs;
3189 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3190 		argCount, envCount, flatArgs);
3191 
3192 	if (error == B_OK) {
3193 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3194 			envCount);
3195 			// this one only returns in case of error
3196 	}
3197 
3198 	free(flatArgs);
3199 	return error;
3200 }
3201 
3202 
3203 thread_id
3204 _user_fork(void)
3205 {
3206 	return fork_team();
3207 }
3208 
3209 
3210 thread_id
3211 _user_wait_for_child(thread_id child, uint32 flags, int32* _userReason,
3212 	status_t* _userReturnCode)
3213 {
3214 	status_t returnCode;
3215 	int32 reason;
3216 	thread_id deadChild;
3217 
3218 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3219 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3220 		return B_BAD_ADDRESS;
3221 
3222 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3223 
3224 	if (deadChild >= B_OK) {
3225 		// copy result data on successful completion
3226 		if ((_userReason != NULL
3227 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3228 			|| (_userReturnCode != NULL
3229 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3230 					< B_OK)) {
3231 			return B_BAD_ADDRESS;
3232 		}
3233 
3234 		return deadChild;
3235 	}
3236 
3237 	return syscall_restart_handle_post(deadChild);
3238 }
3239 
3240 
3241 pid_t
3242 _user_process_info(pid_t process, int32 which)
3243 {
3244 	// we only allow to return the parent of the current process
3245 	if (which == PARENT_ID
3246 		&& process != 0 && process != thread_get_current_thread()->team->id)
3247 		return B_BAD_VALUE;
3248 
3249 	switch (which) {
3250 		case SESSION_ID:
3251 			return getsid(process);
3252 		case GROUP_ID:
3253 			return getpgid(process);
3254 		case PARENT_ID:
3255 			return getppid();
3256 	}
3257 
3258 	return B_BAD_VALUE;
3259 }
3260 
3261 
3262 pid_t
3263 _user_setpgid(pid_t processID, pid_t groupID)
3264 {
3265 	struct thread* thread = thread_get_current_thread();
3266 	struct team* currentTeam = thread->team;
3267 	struct team* team;
3268 
3269 	if (groupID < 0)
3270 		return B_BAD_VALUE;
3271 
3272 	if (processID == 0)
3273 		processID = currentTeam->id;
3274 
3275 	// if the group ID is not specified, use the target process' ID
3276 	if (groupID == 0)
3277 		groupID = processID;
3278 
3279 	if (processID == currentTeam->id) {
3280 		// we set our own group
3281 
3282 		// we must not change our process group ID if we're a session leader
3283 		if (is_session_leader(currentTeam))
3284 			return B_NOT_ALLOWED;
3285 	} else {
3286 		// another team is the target of the call -- check it out
3287 		InterruptsSpinLocker _(gTeamSpinlock);
3288 
3289 		team = team_get_team_struct_locked(processID);
3290 		if (team == NULL)
3291 			return ESRCH;
3292 
3293 		// The team must be a child of the calling team and in the same session.
3294 		// (If that's the case it isn't a session leader either.)
3295 		if (team->parent != currentTeam
3296 			|| team->session_id != currentTeam->session_id) {
3297 			return B_NOT_ALLOWED;
3298 		}
3299 
3300 		if (team->group_id == groupID)
3301 			return groupID;
3302 
3303 		// The call is also supposed to fail on a child, when the child already
3304 		// has executed exec*() [EACCES].
3305 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3306 			return EACCES;
3307 	}
3308 
3309 	struct process_group* group = NULL;
3310 	if (groupID == processID) {
3311 		// A new process group might be needed.
3312 		group = create_process_group(groupID);
3313 		if (group == NULL)
3314 			return B_NO_MEMORY;
3315 
3316 		// Assume orphaned. We consider the situation of the team's parent
3317 		// below.
3318 		group->orphaned = true;
3319 	}
3320 
3321 	status_t status = B_OK;
3322 	struct process_group* freeGroup = NULL;
3323 
3324 	InterruptsSpinLocker locker(gTeamSpinlock);
3325 
3326 	team = team_get_team_struct_locked(processID);
3327 	if (team != NULL) {
3328 		// check the conditions again -- they might have changed in the meantime
3329 		if (is_session_leader(team)
3330 			|| team->session_id != currentTeam->session_id) {
3331 			status = B_NOT_ALLOWED;
3332 		} else if (team != currentTeam
3333 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3334 			status = EACCES;
3335 		} else if (team->group_id == groupID) {
3336 			// the team is already in the desired process group
3337 			freeGroup = group;
3338 		} else {
3339 			// Check if a process group with the requested ID already exists.
3340 			struct process_group* targetGroup
3341 				= team_get_process_group_locked(team->group->session, groupID);
3342 			if (targetGroup != NULL) {
3343 				// In case of processID == groupID we have to free the
3344 				// allocated group.
3345 				freeGroup = group;
3346 			} else if (processID == groupID) {
3347 				// We created a new process group, let us insert it into the
3348 				// team's session.
3349 				insert_group_into_session(team->group->session, group);
3350 				targetGroup = group;
3351 			}
3352 
3353 			if (targetGroup != NULL) {
3354 				// we got a group, let's move the team there
3355 				process_group* oldGroup = team->group;
3356 
3357 				remove_team_from_group(team);
3358 				insert_team_into_group(targetGroup, team);
3359 
3360 				// Update the "orphaned" flag of all potentially affected
3361 				// groups.
3362 
3363 				// the team's old group
3364 				if (oldGroup->teams != NULL) {
3365 					oldGroup->orphaned = false;
3366 					update_orphaned_process_group(oldGroup, -1);
3367 				}
3368 
3369 				// the team's new group
3370 				struct team* parent = team->parent;
3371 				targetGroup->orphaned &= parent == NULL
3372 					|| parent->group == targetGroup
3373 					|| team->parent->session_id != team->session_id;
3374 
3375 				// children's groups
3376 				struct team* child = team->children;
3377 				while (child != NULL) {
3378 					child->group->orphaned = false;
3379 					update_orphaned_process_group(child->group, -1);
3380 
3381 					child = child->siblings_next;
3382 				}
3383 			} else
3384 				status = B_NOT_ALLOWED;
3385 		}
3386 	} else
3387 		status = B_NOT_ALLOWED;
3388 
3389 	// Changing the process group might have changed the situation for a parent
3390 	// waiting in wait_for_child(). Hence we notify it.
3391 	if (status == B_OK)
3392 		team->parent->dead_children->condition_variable.NotifyAll(false);
3393 
3394 	locker.Unlock();
3395 
3396 	if (status != B_OK) {
3397 		// in case of error, the group hasn't been added into the hash
3398 		team_delete_process_group(group);
3399 	}
3400 
3401 	team_delete_process_group(freeGroup);
3402 
3403 	return status == B_OK ? groupID : status;
3404 }
3405 
3406 
3407 pid_t
3408 _user_setsid(void)
3409 {
3410 	struct team* team = thread_get_current_thread()->team;
3411 	struct process_session* session;
3412 	struct process_group* group;
3413 	cpu_status state;
3414 	bool failed = false;
3415 
3416 	// the team must not already be a process group leader
3417 	if (is_process_group_leader(team))
3418 		return B_NOT_ALLOWED;
3419 
3420 	group = create_process_group(team->id);
3421 	if (group == NULL)
3422 		return B_NO_MEMORY;
3423 
3424 	session = create_process_session(group->id);
3425 	if (session == NULL) {
3426 		team_delete_process_group(group);
3427 		return B_NO_MEMORY;
3428 	}
3429 
3430 	state = disable_interrupts();
3431 	GRAB_TEAM_LOCK();
3432 
3433 	// this may have changed since the check above
3434 	if (!is_process_group_leader(team)) {
3435 		remove_team_from_group(team);
3436 
3437 		insert_group_into_session(session, group);
3438 		insert_team_into_group(group, team);
3439 	} else
3440 		failed = true;
3441 
3442 	RELEASE_TEAM_LOCK();
3443 	restore_interrupts(state);
3444 
3445 	if (failed) {
3446 		team_delete_process_group(group);
3447 		free(session);
3448 		return B_NOT_ALLOWED;
3449 	}
3450 
3451 	return team->group_id;
3452 }
3453 
3454 
3455 status_t
3456 _user_wait_for_team(team_id id, status_t* _userReturnCode)
3457 {
3458 	status_t returnCode;
3459 	status_t status;
3460 
3461 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3462 		return B_BAD_ADDRESS;
3463 
3464 	status = wait_for_team(id, &returnCode);
3465 	if (status >= B_OK && _userReturnCode != NULL) {
3466 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
3467 				!= B_OK)
3468 			return B_BAD_ADDRESS;
3469 		return B_OK;
3470 	}
3471 
3472 	return syscall_restart_handle_post(status);
3473 }
3474 
3475 
3476 thread_id
3477 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3478 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3479 	port_id errorPort, uint32 errorToken)
3480 {
3481 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3482 
3483 	if (argCount < 1)
3484 		return B_BAD_VALUE;
3485 
3486 	// copy and relocate the flat arguments
3487 	char** flatArgs;
3488 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3489 		argCount, envCount, flatArgs);
3490 	if (error != B_OK)
3491 		return error;
3492 
3493 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3494 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3495 		errorToken);
3496 
3497 	free(flatArgs);
3498 		// load_image_internal() unset our variable if it took over ownership
3499 
3500 	return thread;
3501 }
3502 
3503 
3504 void
3505 _user_exit_team(status_t returnValue)
3506 {
3507 	struct thread* thread = thread_get_current_thread();
3508 	struct team* team = thread->team;
3509 	struct thread* mainThread = team->main_thread;
3510 
3511 	mainThread->exit.status = returnValue;
3512 	mainThread->exit.reason = THREAD_RETURN_EXIT;
3513 
3514 	// Also set the exit code in the current thread for the sake of it
3515 	if (thread != mainThread) {
3516 		thread->exit.status = returnValue;
3517 		thread->exit.reason = THREAD_RETURN_EXIT;
3518 	}
3519 
3520 	if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT)
3521 			!= 0) {
3522 		// This team is currently being debugged, and requested that teams
3523 		// should not be exited.
3524 		user_debug_stop_thread();
3525 	}
3526 
3527 	send_signal(thread->id, SIGKILL);
3528 }
3529 
3530 
3531 status_t
3532 _user_kill_team(team_id team)
3533 {
3534 	return kill_team(team);
3535 }
3536 
3537 
3538 status_t
3539 _user_get_team_info(team_id id, team_info* userInfo)
3540 {
3541 	status_t status;
3542 	team_info info;
3543 
3544 	if (!IS_USER_ADDRESS(userInfo))
3545 		return B_BAD_ADDRESS;
3546 
3547 	status = _get_team_info(id, &info, sizeof(team_info));
3548 	if (status == B_OK) {
3549 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3550 			return B_BAD_ADDRESS;
3551 	}
3552 
3553 	return status;
3554 }
3555 
3556 
3557 status_t
3558 _user_get_next_team_info(int32* userCookie, team_info* userInfo)
3559 {
3560 	status_t status;
3561 	team_info info;
3562 	int32 cookie;
3563 
3564 	if (!IS_USER_ADDRESS(userCookie)
3565 		|| !IS_USER_ADDRESS(userInfo)
3566 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3567 		return B_BAD_ADDRESS;
3568 
3569 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3570 	if (status != B_OK)
3571 		return status;
3572 
3573 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3574 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3575 		return B_BAD_ADDRESS;
3576 
3577 	return status;
3578 }
3579 
3580 
3581 team_id
3582 _user_get_current_team(void)
3583 {
3584 	return team_get_current_team_id();
3585 }
3586 
3587 
3588 status_t
3589 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
3590 	size_t size)
3591 {
3592 	team_usage_info info;
3593 	status_t status;
3594 
3595 	if (!IS_USER_ADDRESS(userInfo))
3596 		return B_BAD_ADDRESS;
3597 
3598 	status = _get_team_usage_info(team, who, &info, size);
3599 	if (status != B_OK)
3600 		return status;
3601 
3602 	if (user_memcpy(userInfo, &info, size) < B_OK)
3603 		return B_BAD_ADDRESS;
3604 
3605 	return status;
3606 }
3607 
3608