xref: /haiku/src/system/kernel/team.cpp (revision b2c7de82305294ddf7dd438eecf63f281ef33eba)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*!	Team functions */
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/wait.h>
15 
16 #include <OS.h>
17 
18 #include <AutoDeleter.h>
19 
20 #include <elf.h>
21 #include <file_cache.h>
22 #include <heap.h>
23 #include <int.h>
24 #include <kernel.h>
25 #include <kimage.h>
26 #include <kscheduler.h>
27 #include <ksignal.h>
28 #include <port.h>
29 #include <sem.h>
30 #include <syscall_process_info.h>
31 #include <syscall_restart.h>
32 #include <syscalls.h>
33 #include <team.h>
34 #include <tls.h>
35 #include <tracing.h>
36 #include <user_runtime.h>
37 #include <usergroup.h>
38 #include <vfs.h>
39 #include <vm.h>
40 #include <vm_address_space.h>
41 #include <util/AutoLock.h>
42 #include <util/khash.h>
43 
44 //#define TRACE_TEAM
45 #ifdef TRACE_TEAM
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 struct team_key {
53 	team_id id;
54 };
55 
56 struct team_arg {
57 	uint32	arg_count;
58 	char	**args;
59 	uint32	env_count;
60 	char	**env;
61 	port_id	error_port;
62 	uint32	error_token;
63 };
64 
65 struct fork_arg {
66 	area_id		user_stack_area;
67 	addr_t		user_stack_base;
68 	size_t		user_stack_size;
69 	addr_t		user_local_storage;
70 	sigset_t	sig_block_mask;
71 
72 	struct arch_fork_arg arch_info;
73 };
74 
75 
76 static hash_table *sTeamHash = NULL;
77 static hash_table *sGroupHash = NULL;
78 static struct team *sKernelTeam = NULL;
79 
80 // some arbitrary chosen limits - should probably depend on the available
81 // memory (the limit is not yet enforced)
82 static int32 sMaxTeams = 2048;
83 static int32 sUsedTeams = 1;
84 
85 spinlock team_spinlock = 0;
86 
87 
88 // #pragma mark - Tracing
89 
90 
91 #if TEAM_TRACING
92 namespace TeamTracing {
93 
94 class TeamForked : public AbstractTraceEntry {
95 	public:
96 		TeamForked(thread_id forkedThread)
97 			:
98 			fForkedThread(forkedThread)
99 		{
100 			Initialized();
101 		}
102 
103 		virtual void AddDump(TraceOutput& out)
104 		{
105 			out.Print("team forked, new thread %ld", fForkedThread);
106 		}
107 
108 	private:
109 		thread_id			fForkedThread;
110 };
111 
112 
113 class ExecTeam : public AbstractTraceEntry {
114 	public:
115 		ExecTeam(const char* path, int32 argCount, const char* const* args,
116 				int32 envCount, const char* const* env)
117 			:
118 			fArgCount(argCount),
119 			fArgs(NULL)
120 		{
121 			fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
122 				false);
123 
124 			// determine the buffer size we need for the args
125 			size_t argBufferSize = 0;
126 			for (int32 i = 0; i < argCount; i++)
127 				argBufferSize += strlen(args[i]) + 1;
128 
129 			// allocate a buffer
130 			fArgs = (char*)alloc_tracing_buffer(argBufferSize);
131 			if (fArgs) {
132 				char* buffer = fArgs;
133 				for (int32 i = 0; i < argCount; i++) {
134 					size_t argSize = strlen(args[i]) + 1;
135 					memcpy(buffer, args[i], argSize);
136 					buffer += argSize;
137 				}
138 			}
139 
140 			// ignore env for the time being
141 			(void)envCount;
142 			(void)env;
143 
144 			Initialized();
145 		}
146 
147 		virtual void AddDump(TraceOutput& out)
148 		{
149 			out.Print("team exec, \"%p\", args:", fPath);
150 
151 			char* args = fArgs;
152 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
153 				out.Print(" \"%s\"", args);
154 				args += strlen(args) + 1;
155 			}
156 		}
157 
158 	private:
159 		char*	fPath;
160 		int32	fArgCount;
161 		char*	fArgs;
162 };
163 
164 
165 static const char*
166 job_control_state_name(job_control_state state)
167 {
168 	switch (state) {
169 		case JOB_CONTROL_STATE_NONE:
170 			return "none";
171 		case JOB_CONTROL_STATE_STOPPED:
172 			return "stopped";
173 		case JOB_CONTROL_STATE_CONTINUED:
174 			return "continued";
175 		case JOB_CONTROL_STATE_DEAD:
176 			return "dead";
177 		default:
178 			return "invalid";
179 	}
180 }
181 
182 
183 class SetJobControlState : public AbstractTraceEntry {
184 	public:
185 		SetJobControlState(team_id team, job_control_state newState, int signal)
186 			:
187 			fTeam(team),
188 			fNewState(newState),
189 			fSignal(signal)
190 		{
191 			Initialized();
192 		}
193 
194 		virtual void AddDump(TraceOutput& out)
195 		{
196 			out.Print("team set job control state, team %ld, "
197 				"new state: %s, signal: %d",
198 				fTeam, job_control_state_name(fNewState), fSignal);
199 		}
200 
201 	private:
202 		team_id				fTeam;
203 		job_control_state	fNewState;
204 		int					fSignal;
205 };
206 
207 
208 class WaitForChild : public AbstractTraceEntry {
209 	public:
210 		WaitForChild(pid_t child, uint32 flags)
211 			:
212 			fChild(child),
213 			fFlags(flags)
214 		{
215 			Initialized();
216 		}
217 
218 		virtual void AddDump(TraceOutput& out)
219 		{
220 			out.Print("team wait for child, child: %ld, "
221 				"flags: 0x%lx", fChild, fFlags);
222 		}
223 
224 	private:
225 		pid_t	fChild;
226 		uint32	fFlags;
227 };
228 
229 
230 class WaitForChildDone : public AbstractTraceEntry {
231 	public:
232 		WaitForChildDone(const job_control_entry& entry)
233 			:
234 			fState(entry.state),
235 			fTeam(entry.thread),
236 			fStatus(entry.status),
237 			fReason(entry.reason),
238 			fSignal(entry.signal)
239 		{
240 			Initialized();
241 		}
242 
243 		WaitForChildDone(status_t error)
244 			:
245 			fTeam(error)
246 		{
247 			Initialized();
248 		}
249 
250 		virtual void AddDump(TraceOutput& out)
251 		{
252 			if (fTeam >= 0) {
253 				out.Print("team wait for child done, team: %ld, "
254 					"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
255 					fTeam, job_control_state_name(fState), fStatus, fReason,
256 					fSignal);
257 			} else {
258 				out.Print("team wait for child failed, error: "
259 					"0x%lx, ", fTeam);
260 			}
261 		}
262 
263 	private:
264 		job_control_state	fState;
265 		team_id				fTeam;
266 		status_t			fStatus;
267 		uint16				fReason;
268 		uint16				fSignal;
269 };
270 
271 }	// namespace TeamTracing
272 
273 #	define T(x) new(std::nothrow) TeamTracing::x;
274 #else
275 #	define T(x) ;
276 #endif
277 
278 
279 
280 //	#pragma mark - Private functions
281 
282 
283 static void
284 _dump_team_info(struct team *team)
285 {
286 	kprintf("TEAM: %p\n", team);
287 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
288 	kprintf("name:        '%s'\n", team->name);
289 	kprintf("args:        '%s'\n", team->args);
290 	kprintf("next:        %p\n", team->next);
291 	kprintf("parent:      %p", team->parent);
292 	if (team->parent != NULL) {
293 		kprintf(" (id = %ld)\n", team->parent->id);
294 	} else
295 		kprintf("\n");
296 
297 	kprintf("children:    %p\n", team->children);
298 	kprintf("num_threads: %d\n", team->num_threads);
299 	kprintf("state:       %d\n", team->state);
300 	kprintf("flags:       0x%lx\n", team->flags);
301 	kprintf("io_context:  %p\n", team->io_context);
302 	if (team->address_space)
303 		kprintf("address_space: %p\n", team->address_space);
304 	kprintf("main_thread: %p\n", team->main_thread);
305 	kprintf("thread_list: %p\n", team->thread_list);
306 	kprintf("group_id:    %ld\n", team->group_id);
307 	kprintf("session_id:  %ld\n", team->session_id);
308 }
309 
310 
311 static int
312 dump_team_info(int argc, char **argv)
313 {
314 	struct hash_iterator iterator;
315 	struct team *team;
316 	team_id id = -1;
317 	bool found = false;
318 
319 	if (argc < 2) {
320 		struct thread* thread = thread_get_current_thread();
321 		if (thread != NULL && thread->team != NULL)
322 			_dump_team_info(thread->team);
323 		else
324 			kprintf("No current team!\n");
325 		return 0;
326 	}
327 
328 	id = strtoul(argv[1], NULL, 0);
329 	if (IS_KERNEL_ADDRESS(id)) {
330 		// semi-hack
331 		_dump_team_info((struct team *)id);
332 		return 0;
333 	}
334 
335 	// walk through the thread list, trying to match name or id
336 	hash_open(sTeamHash, &iterator);
337 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
338 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
339 			_dump_team_info(team);
340 			found = true;
341 			break;
342 		}
343 	}
344 	hash_close(sTeamHash, &iterator, false);
345 
346 	if (!found)
347 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
348 	return 0;
349 }
350 
351 
352 static int
353 dump_teams(int argc, char **argv)
354 {
355 	struct hash_iterator iterator;
356 	struct team *team;
357 
358 	kprintf("team           id  parent      name\n");
359 	hash_open(sTeamHash, &iterator);
360 
361 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
362 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
363 	}
364 
365 	hash_close(sTeamHash, &iterator, false);
366 	return 0;
367 }
368 
369 
370 /*!	Frees an array of strings in kernel space.
371 
372 	\param strings strings array
373 	\param count number of strings in array
374 */
375 static void
376 free_strings_array(char **strings, int32 count)
377 {
378 	int32 i;
379 
380 	if (strings == NULL)
381 		return;
382 
383 	for (i = 0; i < count; i++)
384 		free(strings[i]);
385 
386     free(strings);
387 }
388 
389 
390 /*!	Copy an array of strings in kernel space
391 
392 	\param strings strings array to be copied
393 	\param count number of strings in array
394 	\param kstrings	pointer to the kernel copy
395 	\return \c B_OK on success, or an appropriate error code on
396 		failure.
397 */
398 static status_t
399 kernel_copy_strings_array(char * const *in, int32 count, char ***_strings)
400 {
401 	status_t status;
402 	char **strings;
403 	int32 i = 0;
404 
405 	strings = (char **)malloc((count + 1) * sizeof(char *));
406 	if (strings == NULL)
407 		return B_NO_MEMORY;
408 
409 	for (; i < count; i++) {
410 		strings[i] = strdup(in[i]);
411 		if (strings[i] == NULL) {
412 			status = B_NO_MEMORY;
413 			goto error;
414 		}
415 	}
416 
417 	strings[count] = NULL;
418 	*_strings = strings;
419 
420 	return B_OK;
421 
422 error:
423 	free_strings_array(strings, i);
424 	return status;
425 }
426 
427 
428 /*!	Copy an array of strings from user space to kernel space
429 
430 	\param strings userspace strings array
431 	\param count number of strings in array
432 	\param kstrings	pointer to the kernel copy
433 	\return \c B_OK on success, or an appropriate error code on
434 		failure.
435 */
436 static status_t
437 user_copy_strings_array(char * const *userStrings, int32 count, char ***_strings)
438 {
439 	char *buffer;
440 	char **strings;
441 	status_t err;
442 	int32 i = 0;
443 
444 	if (!IS_USER_ADDRESS(userStrings))
445 		return B_BAD_ADDRESS;
446 
447 	// buffer for safely accessing the user string
448 	// TODO: maybe have a user_strdup() instead?
449 	buffer = (char *)malloc(4 * B_PAGE_SIZE);
450 	if (buffer == NULL)
451 		return B_NO_MEMORY;
452 
453 	strings = (char **)malloc((count + 1) * sizeof(char *));
454 	if (strings == NULL) {
455 		err = B_NO_MEMORY;
456 		goto error;
457 	}
458 
459 	if ((err = user_memcpy(strings, userStrings, count * sizeof(char *))) < B_OK)
460 		goto error;
461 
462 	// scan all strings and copy to kernel space
463 
464 	for (; i < count; i++) {
465 		err = user_strlcpy(buffer, strings[i], 4 * B_PAGE_SIZE);
466 		if (err < B_OK)
467 			goto error;
468 
469 		strings[i] = strdup(buffer);
470 		if (strings[i] == NULL) {
471 			err = B_NO_MEMORY;
472 			goto error;
473 		}
474 	}
475 
476 	strings[count] = NULL;
477 	*_strings = strings;
478 	free(buffer);
479 
480 	return B_OK;
481 
482 error:
483 	free_strings_array(strings, i);
484 	free(buffer);
485 
486 	TRACE(("user_copy_strings_array failed %ld\n", err));
487 	return err;
488 }
489 
490 
491 static status_t
492 copy_strings_array(char * const *strings, int32 count, char ***_strings,
493 	bool kernel)
494 {
495 	if (kernel)
496 		return kernel_copy_strings_array(strings, count, _strings);
497 
498 	return user_copy_strings_array(strings, count, _strings);
499 }
500 
501 
502 static int
503 team_struct_compare(void *_p, const void *_key)
504 {
505 	struct team *p = (struct team*)_p;
506 	const struct team_key *key = (const struct team_key*)_key;
507 
508 	if (p->id == key->id)
509 		return 0;
510 
511 	return 1;
512 }
513 
514 
515 static uint32
516 team_struct_hash(void *_p, const void *_key, uint32 range)
517 {
518 	struct team *p = (struct team*)_p;
519 	const struct team_key *key = (const struct team_key*)_key;
520 
521 	if (p != NULL)
522 		return p->id % range;
523 
524 	return (uint32)key->id % range;
525 }
526 
527 
528 static int
529 process_group_compare(void *_group, const void *_key)
530 {
531 	struct process_group *group = (struct process_group*)_group;
532 	const struct team_key *key = (const struct team_key*)_key;
533 
534 	if (group->id == key->id)
535 		return 0;
536 
537 	return 1;
538 }
539 
540 
541 static uint32
542 process_group_hash(void *_group, const void *_key, uint32 range)
543 {
544 	struct process_group *group = (struct process_group*)_group;
545 	const struct team_key *key = (const struct team_key*)_key;
546 
547 	if (group != NULL)
548 		return group->id % range;
549 
550 	return (uint32)key->id % range;
551 }
552 
553 
554 static void
555 insert_team_into_parent(struct team *parent, struct team *team)
556 {
557 	ASSERT(parent != NULL);
558 
559 	team->siblings_next = parent->children;
560 	parent->children = team;
561 	team->parent = parent;
562 }
563 
564 
565 /*!	Note: must have team lock held */
566 static void
567 remove_team_from_parent(struct team *parent, struct team *team)
568 {
569 	struct team *child, *last = NULL;
570 
571 	for (child = parent->children; child != NULL; child = child->siblings_next) {
572 		if (child == team) {
573 			if (last == NULL)
574 				parent->children = child->siblings_next;
575 			else
576 				last->siblings_next = child->siblings_next;
577 
578 			team->parent = NULL;
579 			break;
580 		}
581 		last = child;
582 	}
583 }
584 
585 
586 /*!	Reparent each of our children
587 	Note: must have team lock held
588 */
589 static void
590 reparent_children(struct team *team)
591 {
592 	struct team *child;
593 
594 	while ((child = team->children) != NULL) {
595 		// remove the child from the current proc and add to the parent
596 		remove_team_from_parent(team, child);
597 		insert_team_into_parent(sKernelTeam, child);
598 	}
599 
600 	// move job control entries too
601 	sKernelTeam->stopped_children->entries.MoveFrom(
602 		&team->stopped_children->entries);
603 	sKernelTeam->continued_children->entries.MoveFrom(
604 		&team->continued_children->entries);
605 
606 	// Note, we don't move the dead children entries. Those will be deleted
607 	// when the team structure is deleted.
608 }
609 
610 
611 static bool
612 is_session_leader(struct team *team)
613 {
614 	return team->session_id == team->id;
615 }
616 
617 
618 static bool
619 is_process_group_leader(struct team *team)
620 {
621 	return team->group_id == team->id;
622 }
623 
624 
625 static void
626 deferred_delete_process_group(struct process_group *group)
627 {
628 	if (group == NULL)
629 		return;
630 
631 	// remove_group_from_session() keeps this pointer around
632 	// only if the session can be freed as well
633 	if (group->session) {
634 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
635 			group->session->id));
636 		deferred_free(group->session);
637 	}
638 
639 	deferred_free(group);
640 }
641 
642 
643 /*!	Removes a group from a session, and puts the session object
644 	back into the session cache, if it's not used anymore.
645 	You must hold the team lock when calling this function.
646 */
647 static void
648 remove_group_from_session(struct process_group *group)
649 {
650 	struct process_session *session = group->session;
651 
652 	// the group must be in any session to let this function have any effect
653 	if (session == NULL)
654 		return;
655 
656 	hash_remove(sGroupHash, group);
657 
658 	// we cannot free the resource here, so we're keeping the group link
659 	// around - this way it'll be freed by free_process_group()
660 	if (--session->group_count > 0)
661 		group->session = NULL;
662 }
663 
664 
665 /*!	Team lock must be held.
666 */
667 static void
668 acquire_process_group_ref(pid_t groupID)
669 {
670 	process_group* group = team_get_process_group_locked(NULL, groupID);
671 	if (group == NULL) {
672 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
673 		return;
674 	}
675 
676 	group->refs++;
677 }
678 
679 
680 /*!	Team lock must be held.
681 */
682 static void
683 release_process_group_ref(pid_t groupID)
684 {
685 	process_group* group = team_get_process_group_locked(NULL, groupID);
686 	if (group == NULL) {
687 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
688 		return;
689 	}
690 
691 	if (group->refs <= 0) {
692 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
693 		return;
694 	}
695 
696 	if (--group->refs > 0)
697 		return;
698 
699 	// group is no longer used
700 
701 	remove_group_from_session(group);
702 	deferred_delete_process_group(group);
703 }
704 
705 
706 /*!	You must hold the team lock when calling this function. */
707 static void
708 insert_group_into_session(struct process_session *session, struct process_group *group)
709 {
710 	if (group == NULL)
711 		return;
712 
713 	group->session = session;
714 	hash_insert(sGroupHash, group);
715 	session->group_count++;
716 }
717 
718 
719 /*!	You must hold the team lock when calling this function. */
720 static void
721 insert_team_into_group(struct process_group *group, struct team *team)
722 {
723 	team->group = group;
724 	team->group_id = group->id;
725 	team->session_id = group->session->id;
726 
727 	team->group_next = group->teams;
728 	group->teams = team;
729 	acquire_process_group_ref(group->id);
730 }
731 
732 
733 /*!	Removes the team from the group.
734 
735 	\param team the team that'll be removed from it's group
736 */
737 static void
738 remove_team_from_group(struct team *team)
739 {
740 	struct process_group *group = team->group;
741 	struct team *current, *last = NULL;
742 
743 	// the team must be in any team to let this function have any effect
744 	if  (group == NULL)
745 		return;
746 
747 	for (current = group->teams; current != NULL; current = current->group_next) {
748 		if (current == team) {
749 			if (last == NULL)
750 				group->teams = current->group_next;
751 			else
752 				last->group_next = current->group_next;
753 
754 			team->group = NULL;
755 			break;
756 		}
757 		last = current;
758 	}
759 
760 	team->group = NULL;
761 	team->group_next = NULL;
762 
763 	release_process_group_ref(group->id);
764 }
765 
766 
767 static struct process_group *
768 create_process_group(pid_t id)
769 {
770 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
771 	if (group == NULL)
772 		return NULL;
773 
774 	group->id = id;
775 	group->refs = 0;
776 	group->session = NULL;
777 	group->teams = NULL;
778 	group->orphaned = true;
779 	return group;
780 }
781 
782 
783 static struct process_session *
784 create_process_session(pid_t id)
785 {
786 	struct process_session *session
787 		= (struct process_session *)malloc(sizeof(struct process_session));
788 	if (session == NULL)
789 		return NULL;
790 
791 	session->id = id;
792 	session->group_count = 0;
793 	session->controlling_tty = -1;
794 	session->foreground_group = -1;
795 
796 	return session;
797 }
798 
799 
800 static void
801 set_team_name(struct team* team, const char* name)
802 {
803 	if (const char* lastSlash = strrchr(name, '/'))
804 		name = lastSlash + 1;
805 
806 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
807 }
808 
809 
810 static struct team *
811 create_team_struct(const char *name, bool kernel)
812 {
813 	struct team *team = (struct team *)malloc(sizeof(struct team));
814 	if (team == NULL)
815 		return NULL;
816 	MemoryDeleter teamDeleter(team);
817 
818 	team->next = team->siblings_next = team->children = team->parent = NULL;
819 	team->id = allocate_thread_id();
820 	set_team_name(team, name);
821 	team->args[0] = '\0';
822 	team->num_threads = 0;
823 	team->io_context = NULL;
824 	team->address_space = NULL;
825 	team->thread_list = NULL;
826 	team->main_thread = NULL;
827 	team->loading_info = NULL;
828 	team->state = TEAM_STATE_BIRTH;
829 	team->flags = 0;
830 	team->death_sem = -1;
831 
832 	team->supplementary_groups = NULL;
833 	team->supplementary_group_count = 0;
834 
835 	team->dead_threads_kernel_time = 0;
836 	team->dead_threads_user_time = 0;
837 
838 	// dead threads
839 	list_init(&team->dead_threads);
840 	team->dead_threads_count = 0;
841 
842 	// dead children
843 	team->dead_children = new(nothrow) team_dead_children;
844 	if (team->dead_children == NULL)
845 		return NULL;
846 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
847 
848 	team->dead_children->count = 0;
849 	team->dead_children->kernel_time = 0;
850 	team->dead_children->user_time = 0;
851 
852 	// stopped children
853 	team->stopped_children = new(nothrow) team_job_control_children;
854 	if (team->stopped_children == NULL)
855 		return NULL;
856 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
857 		team->stopped_children);
858 
859 	// continued children
860 	team->continued_children = new(nothrow) team_job_control_children;
861 	if (team->continued_children == NULL)
862 		return NULL;
863 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
864 		team->continued_children);
865 
866 	// job control entry
867 	team->job_control_entry = new(nothrow) job_control_entry;
868 	if (team->job_control_entry == NULL)
869 		return NULL;
870 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
871 		team->job_control_entry);
872 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
873 	team->job_control_entry->thread = team->id;
874 	team->job_control_entry->team = team;
875 
876 	list_init(&team->image_list);
877 	list_init(&team->watcher_list);
878 
879 	clear_team_debug_info(&team->debug_info, true);
880 
881 	if (arch_team_init_team_struct(team, kernel) < 0)
882 		return NULL;
883 
884 	// publish dead/stopped/continued children condition vars
885 	team->dead_children->condition_variable.Publish(team->dead_children,
886 		"team children");
887 
888 	// keep all allocated structures
889 	jobControlEntryDeleter.Detach();
890 	continuedChildrenDeleter.Detach();
891 	stoppedChildrenDeleter.Detach();
892 	deadChildrenDeleter.Detach();
893 	teamDeleter.Detach();
894 
895 	return team;
896 }
897 
898 
899 static void
900 delete_team_struct(struct team *team)
901 {
902 	team->dead_children->condition_variable.Unpublish();
903 
904 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
905 			&team->dead_threads)) {
906 		free(threadDeathEntry);
907 	}
908 
909 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
910 		delete entry;
911 
912 	malloc_referenced_release(team->supplementary_groups);
913 
914 	delete team->job_control_entry;
915 		// usually already NULL and transferred to the parent
916 	delete team->continued_children;
917 	delete team->stopped_children;
918 	delete team->dead_children;
919 	free(team);
920 }
921 
922 
923 static uint32
924 get_arguments_data_size(char **args, int32 argc)
925 {
926 	uint32 size = 0;
927 	int32 count;
928 
929 	for (count = 0; count < argc; count++)
930 		size += strlen(args[count]) + 1;
931 
932 	return size + (argc + 1) * sizeof(char *) + sizeof(struct user_space_program_args);
933 }
934 
935 
936 static void
937 free_team_arg(struct team_arg *teamArg)
938 {
939 	free_strings_array(teamArg->args, teamArg->arg_count);
940 	free_strings_array(teamArg->env, teamArg->env_count);
941 
942 	free(teamArg);
943 }
944 
945 
946 static status_t
947 create_team_arg(struct team_arg **_teamArg, int32 argCount, char * const *args,
948 	int32 envCount, char * const *env, port_id port, uint32 token, bool kernel)
949 {
950 	status_t status;
951 	char **argsCopy;
952 	char **envCopy;
953 
954 	struct team_arg *teamArg = (struct team_arg *)malloc(sizeof(struct team_arg));
955 	if (teamArg == NULL)
956 		return B_NO_MEMORY;
957 
958 	// copy the args over
959 
960 	status = copy_strings_array(args, argCount, &argsCopy, kernel);
961 	if (status != B_OK)
962 		return status;
963 
964 	status = copy_strings_array(env, envCount, &envCopy, kernel);
965 	if (status != B_OK) {
966 		free_strings_array(argsCopy, argCount);
967 		return status;
968 	}
969 
970 	teamArg->arg_count = argCount;
971 	teamArg->args = argsCopy;
972 	teamArg->env_count = envCount;
973 	teamArg->env = envCopy;
974 	teamArg->error_port = port;
975 	teamArg->error_token = token;
976 
977 	*_teamArg = teamArg;
978 	return B_OK;
979 }
980 
981 
982 static int32
983 team_create_thread_start(void *args)
984 {
985 	status_t err;
986 	struct thread *t;
987 	struct team *team;
988 	struct team_arg *teamArgs = (struct team_arg*)args;
989 	const char *path;
990 	addr_t entry;
991 	char ustack_name[128];
992 	uint32 sizeLeft;
993 	char **userArgs;
994 	char **userEnv;
995 	char *userDest;
996 	struct user_space_program_args *programArgs;
997 	uint32 argCount, envCount, i;
998 
999 	t = thread_get_current_thread();
1000 	team = t->team;
1001 	cache_node_launched(teamArgs->arg_count, teamArgs->args);
1002 
1003 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1004 
1005 	// create an initial primary stack area
1006 
1007 	// Main stack area layout is currently as follows (starting from 0):
1008 	//
1009 	// size							| usage
1010 	// -----------------------------+--------------------------------
1011 	// USER_MAIN_THREAD_STACK_SIZE	| actual stack
1012 	// TLS_SIZE						| TLS data
1013 	// ENV_SIZE						| environment variables
1014 	// arguments size				| arguments passed to the team
1015 
1016 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1017 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1018 
1019 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE + TLS_SIZE + ENV_SIZE +
1020 		get_arguments_data_size(teamArgs->args, teamArgs->arg_count));
1021 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1022 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE;
1023 		// the exact location at the end of the user stack area
1024 
1025 	sprintf(ustack_name, "%s_main_stack", team->name);
1026 	t->user_stack_area = create_area_etc(team, ustack_name, (void **)&t->user_stack_base,
1027 		B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
1028 	if (t->user_stack_area < 0) {
1029 		dprintf("team_create_thread_start: could not create default user stack region\n");
1030 
1031 		free_team_arg(teamArgs);
1032 		return t->user_stack_area;
1033 	}
1034 
1035 	// now that the TLS area is allocated, initialize TLS
1036 	arch_thread_init_tls(t);
1037 
1038 	argCount = teamArgs->arg_count;
1039 	envCount = teamArgs->env_count;
1040 
1041 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1042 		+ t->user_stack_size + TLS_SIZE + ENV_SIZE);
1043 	userArgs = (char **)(programArgs + 1);
1044 	userDest = (char *)(userArgs + argCount + 1);
1045 
1046 	TRACE(("addr: stack base = 0x%lx, userArgs = %p, userDest = %p, sizeLeft = %lu\n",
1047 		t->user_stack_base, userArgs, userDest, sizeLeft));
1048 
1049 	sizeLeft = t->user_stack_base + sizeLeft - (addr_t)userDest;
1050 
1051 	for (i = 0; i < argCount; i++) {
1052 		ssize_t length = user_strlcpy(userDest, teamArgs->args[i], sizeLeft);
1053 		if (length < B_OK) {
1054 			argCount = 0;
1055 			break;
1056 		}
1057 
1058 		userArgs[i] = userDest;
1059 		userDest += ++length;
1060 		sizeLeft -= length;
1061 	}
1062 	userArgs[argCount] = NULL;
1063 
1064 	userEnv = (char **)(t->user_stack_base + t->user_stack_size + TLS_SIZE);
1065 	sizeLeft = ENV_SIZE;
1066 	userDest = (char *)userEnv + ENV_SIZE - 1;
1067 		// the environment variables are copied from back to front
1068 
1069 	TRACE(("team_create_thread_start: envc: %ld, env: %p\n",
1070 		teamArgs->env_count, (void *)teamArgs->env));
1071 
1072 	for (i = 0; i < envCount; i++) {
1073 		ssize_t length = strlen(teamArgs->env[i]) + 1;
1074 		userDest -= length;
1075 		if (userDest < (char *)&userEnv[envCount]) {
1076 			envCount = i;
1077 			break;
1078 		}
1079 
1080 		userEnv[i] = userDest;
1081 
1082 		if (user_memcpy(userDest, teamArgs->env[i], length) < B_OK) {
1083 			envCount = 0;
1084 			break;
1085 		}
1086 
1087 		sizeLeft -= length;
1088 	}
1089 	userEnv[envCount] = NULL;
1090 
1091 	path = teamArgs->args[0];
1092 	if (user_memcpy(programArgs->program_path, path,
1093 				sizeof(programArgs->program_path)) < B_OK
1094 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1095 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1096 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1097 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1098 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1099 				sizeof(port_id)) < B_OK
1100 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1101 				sizeof(uint32)) < B_OK) {
1102 		// the team deletion process will clean this mess
1103 		return B_BAD_ADDRESS;
1104 	}
1105 
1106 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1107 
1108 	// add args to info member
1109 	team->args[0] = 0;
1110 	strlcpy(team->args, path, sizeof(team->args));
1111 	for (i = 1; i < argCount; i++) {
1112 		strlcat(team->args, " ", sizeof(team->args));
1113 		strlcat(team->args, teamArgs->args[i], sizeof(team->args));
1114 	}
1115 
1116 	free_team_arg(teamArgs);
1117 		// the arguments are already on the user stack, we no longer need them in this form
1118 
1119 	// ToDo: don't use fixed paths!
1120 	err = elf_load_user_image("/boot/beos/system/runtime_loader", team, 0, &entry);
1121 	if (err < B_OK) {
1122 		// Luckily, we don't have to clean up the mess we created - that's
1123 		// done for us by the normal team deletion process
1124 		TRACE(("team_create_thread_start: error when elf_load_user_image() %s\n", strerror(err)));
1125 		return err;
1126 	}
1127 
1128 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1129 
1130 	team->state = TEAM_STATE_NORMAL;
1131 
1132 	// jump to the entry point in user space
1133 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1134 		// only returns in case of error
1135 }
1136 
1137 
1138 /*!	The BeOS kernel exports a function with this name, but most probably with
1139 	different parameters; we should not make it public.
1140 */
1141 static thread_id
1142 load_image_etc(int32 argCount, char * const *args, int32 envCount,
1143 	char * const *env, int32 priority, uint32 flags,
1144 	port_id errorPort, uint32 errorToken, bool kernel)
1145 {
1146 	struct team *team, *parent;
1147 	const char *threadName;
1148 	thread_id thread;
1149 	status_t status;
1150 	cpu_status state;
1151 	struct team_arg *teamArgs;
1152 	struct team_loading_info loadingInfo;
1153 
1154 	if (args == NULL || argCount == 0)
1155 		return B_BAD_VALUE;
1156 
1157 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1158 		args[0], args, argCount));
1159 
1160 	team = create_team_struct(args[0], false);
1161 	if (team == NULL)
1162 		return B_NO_MEMORY;
1163 
1164 	parent = thread_get_current_thread()->team;
1165 
1166 	if (flags & B_WAIT_TILL_LOADED) {
1167 		loadingInfo.thread = thread_get_current_thread();
1168 		loadingInfo.result = B_ERROR;
1169 		loadingInfo.done = false;
1170 		team->loading_info = &loadingInfo;
1171 	}
1172 
1173 	// Inherit the parent's user/group, but also check the executable's
1174 	// set-user/group-id permission
1175 	inherit_parent_user_and_group(team, parent);
1176 	update_set_id_user_and_group(team, args[0]);
1177 
1178 	state = disable_interrupts();
1179 	GRAB_TEAM_LOCK();
1180 
1181 	hash_insert(sTeamHash, team);
1182 	insert_team_into_parent(parent, team);
1183 	insert_team_into_group(parent->group, team);
1184 	sUsedTeams++;
1185 
1186 	RELEASE_TEAM_LOCK();
1187 	restore_interrupts(state);
1188 
1189 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1190 		errorPort, errorToken, kernel);
1191 	if (status != B_OK)
1192 		goto err1;
1193 
1194 	// create a new io_context for this team
1195 	team->io_context = vfs_new_io_context(parent->io_context);
1196 	if (!team->io_context) {
1197 		status = B_NO_MEMORY;
1198 		goto err2;
1199 	}
1200 
1201 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1202 	vfs_exec_io_context(team->io_context);
1203 
1204 	// create an address space for this team
1205 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1206 		&team->address_space);
1207 	if (status < B_OK)
1208 		goto err3;
1209 
1210 	// cut the path from the main thread name
1211 	threadName = strrchr(args[0], '/');
1212 	if (threadName != NULL)
1213 		threadName++;
1214 	else
1215 		threadName = args[0];
1216 
1217 	// Create a kernel thread, but under the context of the new team
1218 	// The new thread will take over ownership of teamArgs
1219 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1220 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1221 	if (thread < 0) {
1222 		status = thread;
1223 		goto err4;
1224 	}
1225 
1226 	// wait for the loader of the new team to finish its work
1227 	if (flags & B_WAIT_TILL_LOADED) {
1228 		struct thread *mainThread;
1229 
1230 		state = disable_interrupts();
1231 		GRAB_THREAD_LOCK();
1232 
1233 		mainThread = thread_get_thread_struct_locked(thread);
1234 		if (mainThread) {
1235 			// resume the team's main thread
1236 			if (mainThread->state == B_THREAD_SUSPENDED)
1237 				scheduler_enqueue_in_run_queue(mainThread);
1238 
1239 			// Now suspend ourselves until loading is finished.
1240 			// We will be woken either by the thread, when it finished or
1241 			// aborted loading, or when the team is going to die (e.g. is
1242 			// killed). In either case the one setting `loadingInfo.done' is
1243 			// responsible for removing the info from the team structure.
1244 			while (!loadingInfo.done) {
1245 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1246 				scheduler_reschedule();
1247 			}
1248 		} else {
1249 			// Impressive! Someone managed to kill the thread in this short
1250 			// time.
1251 		}
1252 
1253 		RELEASE_THREAD_LOCK();
1254 		restore_interrupts(state);
1255 
1256 		if (loadingInfo.result < B_OK)
1257 			return loadingInfo.result;
1258 	}
1259 
1260 	// notify the debugger
1261 	user_debug_team_created(team->id);
1262 
1263 	return thread;
1264 
1265 err4:
1266 	vm_put_address_space(team->address_space);
1267 err3:
1268 	vfs_free_io_context(team->io_context);
1269 err2:
1270 	free_team_arg(teamArgs);
1271 err1:
1272 	// remove the team structure from the team hash table and delete the team structure
1273 	state = disable_interrupts();
1274 	GRAB_TEAM_LOCK();
1275 
1276 	remove_team_from_group(team);
1277 	remove_team_from_parent(parent, team);
1278 	hash_remove(sTeamHash, team);
1279 
1280 	RELEASE_TEAM_LOCK();
1281 	restore_interrupts(state);
1282 
1283 	delete_team_struct(team);
1284 
1285 	return status;
1286 }
1287 
1288 
1289 /*!	Almost shuts down the current team and loads a new image into it.
1290 	If successful, this function does not return and will takeover ownership of
1291 	the arguments provided.
1292 	This function may only be called from user space.
1293 */
1294 static status_t
1295 exec_team(const char *path, int32 argCount, char * const *args,
1296 	int32 envCount, char * const *env)
1297 {
1298 	struct team *team = thread_get_current_thread()->team;
1299 	struct team_arg *teamArgs;
1300 	const char *threadName;
1301 	status_t status = B_OK;
1302 	cpu_status state;
1303 	struct thread *thread;
1304 	thread_id nubThreadID = -1;
1305 
1306 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1307 		args[0], argCount, envCount, team->id));
1308 
1309 	// switching the kernel at run time is probably not a good idea :)
1310 	if (team == team_get_kernel_team())
1311 		return B_NOT_ALLOWED;
1312 
1313 	// we currently need to be single threaded here
1314 	// ToDo: maybe we should just kill all other threads and
1315 	//	make the current thread the team's main thread?
1316 	if (team->main_thread != thread_get_current_thread())
1317 		return B_NOT_ALLOWED;
1318 
1319 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1320 	// We iterate through the thread list to make sure that there's no other
1321 	// thread.
1322 	state = disable_interrupts();
1323 	GRAB_TEAM_LOCK();
1324 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1325 
1326 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1327 		nubThreadID = team->debug_info.nub_thread;
1328 
1329 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1330 
1331 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1332 		if (thread != team->main_thread && thread->id != nubThreadID) {
1333 			status = B_NOT_ALLOWED;
1334 			break;
1335 		}
1336 	}
1337 
1338 	RELEASE_TEAM_LOCK();
1339 	restore_interrupts(state);
1340 
1341 	if (status != B_OK)
1342 		return status;
1343 
1344 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1345 		-1, 0, false);
1346 	if (status != B_OK)
1347 		return status;
1348 
1349 	T(ExecTeam(path, teamArgs->arg_count, teamArgs->args, envCount, env));
1350 		// trace here, so we don't have to deal with the user addresses
1351 
1352 	// replace args[0] with the path argument, just to be on the safe side
1353 	free(teamArgs->args[0]);
1354 	teamArgs->args[0] = strdup(path);
1355 
1356 	// ToDo: remove team resources if there are any left
1357 	// thread_atkernel_exit() might not be called at all
1358 
1359 	thread_reset_for_exec();
1360 
1361 	user_debug_prepare_for_exec();
1362 
1363 	vm_delete_areas(team->address_space);
1364 	delete_owned_ports(team->id);
1365 	sem_delete_owned_sems(team->id);
1366 	remove_images(team);
1367 	vfs_exec_io_context(team->io_context);
1368 
1369 	user_debug_finish_after_exec();
1370 
1371 	// rename the team
1372 
1373 	set_team_name(team, path);
1374 
1375 	// cut the path from the team name and rename the main thread, too
1376 	threadName = strrchr(path, '/');
1377 	if (threadName != NULL)
1378 		threadName++;
1379 	else
1380 		threadName = path;
1381 	rename_thread(thread_get_current_thread_id(), threadName);
1382 
1383 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1384 
1385 	// Update user/group according to the executable's set-user/group-id
1386 	// permission.
1387 	update_set_id_user_and_group(team, path);
1388 
1389 	status = team_create_thread_start(teamArgs);
1390 		// this one usually doesn't return...
1391 
1392 	// sorry, we have to kill us, there is no way out anymore
1393 	// (without any areas left and all that)
1394 	exit_thread(status);
1395 
1396 	// we return a status here since the signal that is sent by the
1397 	// call above is not immediately handled
1398 	return B_ERROR;
1399 }
1400 
1401 
1402 /*! This is the first function to be called from the newly created
1403 	main child thread.
1404 	It will fill in everything what's left to do from fork_arg, and
1405 	return from the parent's fork() syscall to the child.
1406 */
1407 static int32
1408 fork_team_thread_start(void *_args)
1409 {
1410 	struct thread *thread = thread_get_current_thread();
1411 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1412 
1413 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1414 		// we need a local copy of the arch dependent part
1415 
1416 	thread->user_stack_area = forkArgs->user_stack_area;
1417 	thread->user_stack_base = forkArgs->user_stack_base;
1418 	thread->user_stack_size = forkArgs->user_stack_size;
1419 	thread->user_local_storage = forkArgs->user_local_storage;
1420 	thread->sig_block_mask = forkArgs->sig_block_mask;
1421 
1422 	arch_thread_init_tls(thread);
1423 
1424 	free(forkArgs);
1425 
1426 	// set frame of the parent thread to this one, too
1427 
1428 	arch_restore_fork_frame(&archArgs);
1429 		// This one won't return here
1430 
1431 	return 0;
1432 }
1433 
1434 
1435 static thread_id
1436 fork_team(void)
1437 {
1438 	struct thread *parentThread = thread_get_current_thread();
1439 	struct team *parentTeam = parentThread->team, *team;
1440 	struct fork_arg *forkArgs;
1441 	struct area_info info;
1442 	thread_id threadID;
1443 	cpu_status state;
1444 	status_t status;
1445 	int32 cookie;
1446 
1447 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1448 
1449 	if (parentTeam == team_get_kernel_team())
1450 		return B_NOT_ALLOWED;
1451 
1452 	// create a new team
1453 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1454 
1455 	team = create_team_struct(parentTeam->name, false);
1456 	if (team == NULL)
1457 		return B_NO_MEMORY;
1458 
1459 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1460 
1461 	// Inherit the parent's user/group.
1462 	inherit_parent_user_and_group(team, parentTeam);
1463 
1464 	state = disable_interrupts();
1465 	GRAB_TEAM_LOCK();
1466 
1467 	hash_insert(sTeamHash, team);
1468 	insert_team_into_parent(parentTeam, team);
1469 	insert_team_into_group(parentTeam->group, team);
1470 	sUsedTeams++;
1471 
1472 	RELEASE_TEAM_LOCK();
1473 	restore_interrupts(state);
1474 
1475 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1476 	if (forkArgs == NULL) {
1477 		status = B_NO_MEMORY;
1478 		goto err1;
1479 	}
1480 
1481 	// create a new io_context for this team
1482 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1483 	if (!team->io_context) {
1484 		status = B_NO_MEMORY;
1485 		goto err2;
1486 	}
1487 
1488 	// create an address space for this team
1489 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1490 		&team->address_space);
1491 	if (status < B_OK)
1492 		goto err3;
1493 
1494 	// copy all areas of the team
1495 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1496 	// ToDo: all stacks of other threads than the current one could be left out
1497 
1498 	cookie = 0;
1499 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1500 		void *address;
1501 		area_id area = vm_copy_area(team->address_space->id, info.name,
1502 			&address, B_CLONE_ADDRESS, info.protection, info.area);
1503 		if (area < B_OK) {
1504 			status = area;
1505 			break;
1506 		}
1507 
1508 		if (info.area == parentThread->user_stack_area)
1509 			forkArgs->user_stack_area = area;
1510 	}
1511 
1512 	if (status < B_OK)
1513 		goto err4;
1514 
1515 	forkArgs->user_stack_base = parentThread->user_stack_base;
1516 	forkArgs->user_stack_size = parentThread->user_stack_size;
1517 	forkArgs->user_local_storage = parentThread->user_local_storage;
1518 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1519 	arch_store_fork_frame(&forkArgs->arch_info);
1520 
1521 	// ToDo: copy image list
1522 
1523 	// create a kernel thread under the context of the new team
1524 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1525 		parentThread->name, parentThread->priority, forkArgs,
1526 		team->id, team->id);
1527 	if (threadID < 0) {
1528 		status = threadID;
1529 		goto err4;
1530 	}
1531 
1532 	// notify the debugger
1533 	user_debug_team_created(team->id);
1534 
1535 	T(TeamForked(threadID));
1536 
1537 	resume_thread(threadID);
1538 	return threadID;
1539 
1540 err4:
1541 	vm_delete_address_space(team->address_space);
1542 err3:
1543 	vfs_free_io_context(team->io_context);
1544 err2:
1545 	free(forkArgs);
1546 err1:
1547 	// remove the team structure from the team hash table and delete the team structure
1548 	state = disable_interrupts();
1549 	GRAB_TEAM_LOCK();
1550 
1551 	remove_team_from_group(team);
1552 	remove_team_from_parent(parentTeam, team);
1553 	hash_remove(sTeamHash, team);
1554 
1555 	RELEASE_TEAM_LOCK();
1556 	restore_interrupts(state);
1557 
1558 	delete_team_struct(team);
1559 
1560 	return status;
1561 }
1562 
1563 
1564 /*!	Returns if the specified \a team has any children belonging to the
1565 	specified \a group.
1566 	Must be called with the team lock held.
1567 */
1568 static bool
1569 has_children_in_group(struct team *parent, pid_t groupID)
1570 {
1571 	struct team *team;
1572 
1573 	struct process_group *group = team_get_process_group_locked(
1574 		parent->group->session, groupID);
1575 	if (group == NULL)
1576 		return false;
1577 
1578 	for (team = group->teams; team; team = team->group_next) {
1579 		if (team->parent == parent)
1580 			return true;
1581 	}
1582 
1583 	return false;
1584 }
1585 
1586 
1587 static job_control_entry*
1588 get_job_control_entry(team_job_control_children* children, pid_t id)
1589 {
1590 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1591 		 job_control_entry* entry = it.Next();) {
1592 
1593 		if (id > 0) {
1594 			if (entry->thread == id)
1595 				return entry;
1596 		} else if (id == -1) {
1597 			return entry;
1598 		} else {
1599 			pid_t processGroup
1600 				= (entry->team ? entry->team->group_id : entry->group_id);
1601 			if (processGroup == -id)
1602 				return entry;
1603 		}
1604 	}
1605 
1606 	return NULL;
1607 }
1608 
1609 
1610 static job_control_entry*
1611 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1612 {
1613 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1614 
1615 	if (entry == NULL && (flags & WCONTINUED) != 0)
1616 		entry = get_job_control_entry(team->continued_children, id);
1617 
1618 	if (entry == NULL && (flags & WUNTRACED) != 0)
1619 		entry = get_job_control_entry(team->stopped_children, id);
1620 
1621 	return entry;
1622 }
1623 
1624 
1625 job_control_entry::job_control_entry()
1626 	:
1627 	has_group_ref(false)
1628 {
1629 }
1630 
1631 
1632 job_control_entry::~job_control_entry()
1633 {
1634 	if (has_group_ref) {
1635 		InterruptsSpinLocker locker(team_spinlock);
1636 		release_process_group_ref(group_id);
1637 	}
1638 }
1639 
1640 
1641 /*!	Team and thread lock must be held.
1642 */
1643 void
1644 job_control_entry::InitDeadState()
1645 {
1646 	if (team != NULL) {
1647 		struct thread* thread = team->main_thread;
1648 		group_id = team->group_id;
1649 		this->thread = thread->id;
1650 		status = thread->exit.status;
1651 		reason = thread->exit.reason;
1652 		signal = thread->exit.signal;
1653 		team = NULL;
1654 		acquire_process_group_ref(group_id);
1655 		has_group_ref = true;
1656 	}
1657 }
1658 
1659 
1660 job_control_entry&
1661 job_control_entry::operator=(const job_control_entry& other)
1662 {
1663 	state = other.state;
1664 	thread = other.thread;
1665 	has_group_ref = false;
1666 	team = other.team;
1667 	group_id = other.group_id;
1668 	status = other.status;
1669 	reason = other.reason;
1670 	signal = other.signal;
1671 
1672 	return *this;
1673 }
1674 
1675 
1676 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1677 	comes to the reason why a thread has died than waitpid() can be.
1678 */
1679 static thread_id
1680 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1681 	status_t *_returnCode)
1682 {
1683 	struct thread* thread = thread_get_current_thread();
1684 	struct team* team = thread->team;
1685 	struct job_control_entry foundEntry;
1686 	struct job_control_entry* freeDeathEntry = NULL;
1687 	status_t status = B_OK;
1688 
1689 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1690 
1691 	T(WaitForChild(child, flags));
1692 
1693 	if (child == 0) {
1694 		// wait for all children in the process group of the calling team
1695 		child = -team->group_id;
1696 	}
1697 
1698 	bool ignoreFoundEntries = false;
1699 	bool ignoreFoundEntriesChecked = false;
1700 
1701 	while (true) {
1702 		InterruptsSpinLocker locker(team_spinlock);
1703 
1704 		// check whether any condition holds
1705 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1706 
1707 		// If we don't have an entry yet, check whether there are any children
1708 		// complying to the process group specification at all.
1709 		if (entry == NULL) {
1710 			// No success yet -- check whether there are any children we could
1711 			// wait for.
1712 			bool childrenExist = false;
1713 			if (child == -1) {
1714 				childrenExist = team->children != NULL;
1715 			} else if (child < -1) {
1716 				childrenExist = has_children_in_group(team, -child);
1717 			} else {
1718 				if (struct team* childTeam = team_get_team_struct_locked(child))
1719 					childrenExist = childTeam->parent == team;
1720 			}
1721 
1722 			if (!childrenExist) {
1723 				// there is no child we could wait for
1724 				status = ECHILD;
1725 			} else {
1726 				// the children we're waiting for are still running
1727 				status = B_WOULD_BLOCK;
1728 			}
1729 		} else {
1730 			// got something
1731 			foundEntry = *entry;
1732 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1733 				// The child is dead. Reap its death entry.
1734 				freeDeathEntry = entry;
1735 				team->dead_children->entries.Remove(entry);
1736 				team->dead_children->count--;
1737 			} else {
1738 				// The child is well. Reset its job control state.
1739 				team_set_job_control_state(entry->team,
1740 					JOB_CONTROL_STATE_NONE, 0, false);
1741 			}
1742 		}
1743 
1744 		// If we haven't got anything yet, prepare for waiting for the
1745 		// condition variable.
1746 		ConditionVariableEntry deadWaitEntry;
1747 
1748 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1749 			deadWaitEntry.Add(team->dead_children, B_CAN_INTERRUPT);
1750 
1751 		locker.Unlock();
1752 
1753 		// we got our entry and can return to our caller
1754 		if (status == B_OK) {
1755 			if (ignoreFoundEntries) {
1756 				// ... unless we shall ignore found entries
1757 				delete freeDeathEntry;
1758 				freeDeathEntry = NULL;
1759 				continue;
1760 			}
1761 
1762 			break;
1763 		}
1764 
1765 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1766 			T(WaitForChildDone(status));
1767 			return status;
1768 		}
1769 
1770 		status = deadWaitEntry.Wait();
1771 		if (status == B_INTERRUPTED) {
1772 			T(WaitForChildDone(status));
1773 			return status;
1774 		}
1775 
1776 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1777 		// all our children are dead and fail with ECHILD. We check the
1778 		// condition at this point.
1779 		if (!ignoreFoundEntriesChecked) {
1780 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1781 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1782 				|| handler.sa_handler == SIG_IGN) {
1783 				ignoreFoundEntries = true;
1784 			}
1785 
1786 			ignoreFoundEntriesChecked = true;
1787 		}
1788 	}
1789 
1790 	delete freeDeathEntry;
1791 
1792 	// when we got here, we have a valid death entry, and
1793 	// already got unregistered from the team or group
1794 	int reason = 0;
1795 	switch (foundEntry.state) {
1796 		case JOB_CONTROL_STATE_DEAD:
1797 			reason = foundEntry.reason;
1798 			break;
1799 		case JOB_CONTROL_STATE_STOPPED:
1800 			reason = THREAD_STOPPED;
1801 			break;
1802 		case JOB_CONTROL_STATE_CONTINUED:
1803 			reason = THREAD_CONTINUED;
1804 			break;
1805 		case JOB_CONTROL_STATE_NONE:
1806 			// can't happen
1807 			break;
1808 	}
1809 
1810 	*_returnCode = foundEntry.status;
1811 	*_reason = (foundEntry.signal << 16) | reason;
1812 
1813 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1814 	// status is available.
1815 	if (is_signal_blocked(SIGCHLD)) {
1816 		InterruptsSpinLocker locker(team_spinlock);
1817 
1818 		if (get_job_control_entry(team, child, flags) == NULL)
1819 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1820 	}
1821 
1822 	T(WaitForChildDone(foundEntry));
1823 
1824 	return foundEntry.thread;
1825 }
1826 
1827 
1828 /*! Fills the team_info structure with information from the specified
1829 	team.
1830 	The team lock must be held when called.
1831 */
1832 static status_t
1833 fill_team_info(struct team *team, team_info *info, size_t size)
1834 {
1835 	if (size != sizeof(team_info))
1836 		return B_BAD_VALUE;
1837 
1838 	// ToDo: Set more informations for team_info
1839 	memset(info, 0, size);
1840 
1841 	info->team = team->id;
1842 	info->thread_count = team->num_threads;
1843 	info->image_count = count_images(team);
1844 	//info->area_count =
1845 	info->debugger_nub_thread = team->debug_info.nub_thread;
1846 	info->debugger_nub_port = team->debug_info.nub_port;
1847 	//info->uid =
1848 	//info->gid =
1849 
1850 	strlcpy(info->args, team->args, sizeof(info->args));
1851 	info->argc = 1;
1852 
1853 	return B_OK;
1854 }
1855 
1856 
1857 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1858 	Interrupts must be disabled and team lock be held.
1859 */
1860 static bool
1861 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1862 {
1863 	// Orphaned Process Group: "A process group in which the parent of every
1864 	// member is either itself a member of the group or is not a member of the
1865 	// group's session." (Open Group Base Specs Issue 6)
1866 
1867 	// once orphaned, things won't change (exception: cf. setpgid())
1868 	if (group->orphaned)
1869 		return true;
1870 
1871 	struct team* team = group->teams;
1872 	while (team != NULL) {
1873 		struct team* parent = team->parent;
1874 		if (team->id != dyingProcess && parent != NULL
1875 			&& parent->id != dyingProcess
1876 			&& parent->group_id != group->id
1877 			&& parent->session_id == group->session->id) {
1878 			return false;
1879 		}
1880 
1881 		team = team->group_next;
1882 	}
1883 
1884 	group->orphaned = true;
1885 	return true;
1886 }
1887 
1888 
1889 /*!	Returns whether the process group contains stopped processes.
1890 	Interrupts must be disabled and team lock be held.
1891 */
1892 static bool
1893 process_group_has_stopped_processes(process_group* group)
1894 {
1895 	SpinLocker _(thread_spinlock);
1896 
1897 	struct team* team = group->teams;
1898 	while (team != NULL) {
1899 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1900 			return true;
1901 
1902 		team = team->group_next;
1903 	}
1904 
1905 	return false;
1906 }
1907 
1908 
1909 //	#pragma mark - Private kernel API
1910 
1911 
1912 status_t
1913 team_init(kernel_args *args)
1914 {
1915 	struct process_session *session;
1916 	struct process_group *group;
1917 
1918 	// create the team hash table
1919 	sTeamHash = hash_init(16, offsetof(struct team, next),
1920 		&team_struct_compare, &team_struct_hash);
1921 
1922 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1923 		&process_group_compare, &process_group_hash);
1924 
1925 	// create initial session and process groups
1926 
1927 	session = create_process_session(1);
1928 	if (session == NULL)
1929 		panic("Could not create initial session.\n");
1930 
1931 	group = create_process_group(1);
1932 	if (group == NULL)
1933 		panic("Could not create initial process group.\n");
1934 
1935 	insert_group_into_session(session, group);
1936 
1937 	// create the kernel team
1938 	sKernelTeam = create_team_struct("kernel_team", true);
1939 	if (sKernelTeam == NULL)
1940 		panic("could not create kernel team!\n");
1941 	strcpy(sKernelTeam->args, sKernelTeam->name);
1942 	sKernelTeam->state = TEAM_STATE_NORMAL;
1943 
1944 	sKernelTeam->saved_set_uid = 0;
1945 	sKernelTeam->real_uid = 0;
1946 	sKernelTeam->effective_uid = 0;
1947 	sKernelTeam->saved_set_gid = 0;
1948 	sKernelTeam->real_gid = 0;
1949 	sKernelTeam->effective_gid = 0;
1950 	sKernelTeam->supplementary_groups = NULL;
1951 	sKernelTeam->supplementary_group_count = 0;
1952 
1953 	insert_team_into_group(group, sKernelTeam);
1954 
1955 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1956 	if (sKernelTeam->io_context == NULL)
1957 		panic("could not create io_context for kernel team!\n");
1958 
1959 	// stick it in the team hash
1960 	hash_insert(sTeamHash, sKernelTeam);
1961 
1962 	add_debugger_command_etc("team", &dump_team_info,
1963 		"Dump info about a particular team",
1964 		"[ <id> | <address> | <name> ]\n"
1965 		"Prints information about the specified team. If no argument is given\n"
1966 		"the current team is selected.\n"
1967 		"  <id>       - The ID of the team.\n"
1968 		"  <address>  - The address of the team structure.\n"
1969 		"  <name>     - The team's name.\n", 0);
1970 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1971 		"\n"
1972 		"Prints a list of all existing teams.\n", 0);
1973 	return 0;
1974 }
1975 
1976 
1977 int32
1978 team_max_teams(void)
1979 {
1980 	return sMaxTeams;
1981 }
1982 
1983 
1984 int32
1985 team_used_teams(void)
1986 {
1987 	return sUsedTeams;
1988 }
1989 
1990 
1991 /*! Fills the provided death entry if it's in the team.
1992 	You need to have the team lock held when calling this function.
1993 */
1994 job_control_entry*
1995 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
1996 {
1997 	if (child <= 0)
1998 		return NULL;
1999 
2000 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2001 		child);
2002 	if (entry) {
2003 		// remove the entry only, if the caller is the parent of the found team
2004 		if (team_get_current_team_id() == entry->thread) {
2005 			team->dead_children->entries.Remove(entry);
2006 			team->dead_children->count--;
2007 			*_deleteEntry = true;
2008 		} else {
2009 			*_deleteEntry = false;
2010 		}
2011 	}
2012 
2013 	return entry;
2014 }
2015 
2016 
2017 /*! Quick check to see if we have a valid team ID. */
2018 bool
2019 team_is_valid(team_id id)
2020 {
2021 	struct team *team;
2022 	cpu_status state;
2023 
2024 	if (id <= 0)
2025 		return false;
2026 
2027 	state = disable_interrupts();
2028 	GRAB_TEAM_LOCK();
2029 
2030 	team = team_get_team_struct_locked(id);
2031 
2032 	RELEASE_TEAM_LOCK();
2033 	restore_interrupts(state);
2034 
2035 	return team != NULL;
2036 }
2037 
2038 
2039 struct team *
2040 team_get_team_struct_locked(team_id id)
2041 {
2042 	struct team_key key;
2043 	key.id = id;
2044 
2045 	return (struct team*)hash_lookup(sTeamHash, &key);
2046 }
2047 
2048 
2049 /*! This searches the session of the team for the specified group ID.
2050 	You must hold the team lock when you call this function.
2051 */
2052 struct process_group *
2053 team_get_process_group_locked(struct process_session *session, pid_t id)
2054 {
2055 	struct process_group *group;
2056 	struct team_key key;
2057 	key.id = id;
2058 
2059 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2060 	if (group != NULL && (session == NULL || session == group->session))
2061 		return group;
2062 
2063 	return NULL;
2064 }
2065 
2066 
2067 void
2068 team_delete_process_group(struct process_group *group)
2069 {
2070 	if (group == NULL)
2071 		return;
2072 
2073 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2074 
2075 	// remove_group_from_session() keeps this pointer around
2076 	// only if the session can be freed as well
2077 	if (group->session) {
2078 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2079 		free(group->session);
2080 	}
2081 
2082 	free(group);
2083 }
2084 
2085 
2086 void
2087 team_set_controlling_tty(int32 ttyIndex)
2088 {
2089 	struct team* team = thread_get_current_thread()->team;
2090 
2091 	InterruptsSpinLocker _(team_spinlock);
2092 
2093 	team->group->session->controlling_tty = ttyIndex;
2094 	team->group->session->foreground_group = -1;
2095 }
2096 
2097 
2098 int32
2099 team_get_controlling_tty()
2100 {
2101 	struct team* team = thread_get_current_thread()->team;
2102 
2103 	InterruptsSpinLocker _(team_spinlock);
2104 
2105 	return team->group->session->controlling_tty;
2106 }
2107 
2108 
2109 status_t
2110 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2111 {
2112 	struct thread* thread = thread_get_current_thread();
2113 	struct team* team = thread->team;
2114 
2115 	InterruptsSpinLocker locker(team_spinlock);
2116 
2117 	process_session* session = team->group->session;
2118 
2119 	// must be the controlling tty of the calling process
2120 	if (session->controlling_tty != ttyIndex)
2121 		return ENOTTY;
2122 
2123 	// check process group -- must belong to our session
2124 	process_group* group = team_get_process_group_locked(session,
2125 		processGroupID);
2126 	if (group == NULL)
2127 		return B_BAD_VALUE;
2128 
2129 	// If we are a background group, we can't do that unharmed, only if we
2130 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2131 	if (session->foreground_group != -1
2132 		&& session->foreground_group != team->group_id
2133 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2134 		&& !is_signal_blocked(SIGTTOU)) {
2135 		pid_t groupID = team->group->id;
2136 		locker.Unlock();
2137 		send_signal(-groupID, SIGTTOU);
2138 		return B_INTERRUPTED;
2139 	}
2140 
2141 	team->group->session->foreground_group = processGroupID;
2142 
2143 	return B_OK;
2144 }
2145 
2146 
2147 /*!	Removes the specified team from the global team hash, and from its parent.
2148 	It also moves all of its children up to the parent.
2149 	You must hold the team lock when you call this function.
2150 */
2151 void
2152 team_remove_team(struct team *team)
2153 {
2154 	struct team *parent = team->parent;
2155 
2156 	// remember how long this team lasted
2157 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2158 		+ team->dead_children->kernel_time;
2159 	parent->dead_children->user_time += team->dead_threads_user_time
2160 		+ team->dead_children->user_time;
2161 
2162 	// Also grab the thread spinlock while removing the team from the hash.
2163 	// This makes the following sequence safe: grab teams lock, lookup team,
2164 	// grab threads lock, unlock teams lock,
2165 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2166 	// lock another team's IO context.
2167 	GRAB_THREAD_LOCK();
2168 	hash_remove(sTeamHash, team);
2169 	RELEASE_THREAD_LOCK();
2170 	sUsedTeams--;
2171 
2172 	team->state = TEAM_STATE_DEATH;
2173 
2174 	// If we're a controlling process (i.e. a session leader with controlling
2175 	// terminal), there's a bit of signalling we have to do.
2176 	if (team->session_id == team->id
2177 		&& team->group->session->controlling_tty >= 0) {
2178 		process_session* session = team->group->session;
2179 
2180 		session->controlling_tty = -1;
2181 
2182 		// send SIGHUP to the foreground
2183 		if (session->foreground_group >= 0) {
2184 			send_signal_etc(-session->foreground_group, SIGHUP,
2185 				SIGNAL_FLAG_TEAMS_LOCKED);
2186 		}
2187 
2188 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2189 		// stopped processes
2190 		struct team* child = team->children;
2191 		while (child != NULL) {
2192 			process_group* childGroup = child->group;
2193 			if (!childGroup->orphaned
2194 				&& update_orphaned_process_group(childGroup, team->id)
2195 				&& process_group_has_stopped_processes(childGroup)) {
2196 				send_signal_etc(-childGroup->id, SIGHUP,
2197 					SIGNAL_FLAG_TEAMS_LOCKED);
2198 				send_signal_etc(-childGroup->id, SIGCONT,
2199 					SIGNAL_FLAG_TEAMS_LOCKED);
2200 			}
2201 
2202 			child = child->siblings_next;
2203 		}
2204 	} else {
2205 		// update "orphaned" flags of all children's process groups
2206 		struct team* child = team->children;
2207 		while (child != NULL) {
2208 			process_group* childGroup = child->group;
2209 			if (!childGroup->orphaned)
2210 				update_orphaned_process_group(childGroup, team->id);
2211 
2212 			child = child->siblings_next;
2213 		}
2214 
2215 		// update "orphaned" flag of this team's process group
2216 		update_orphaned_process_group(team->group, team->id);
2217 	}
2218 
2219 	// reparent each of the team's children
2220 	reparent_children(team);
2221 
2222 	// remove us from our process group
2223 	remove_team_from_group(team);
2224 
2225 	// remove us from our parent
2226 	remove_team_from_parent(parent, team);
2227 }
2228 
2229 
2230 void
2231 team_delete_team(struct team *team)
2232 {
2233 	team_id teamID = team->id;
2234 	port_id debuggerPort = -1;
2235 	cpu_status state;
2236 
2237 	if (team->num_threads > 0) {
2238 		// there are other threads still in this team,
2239 		// cycle through and signal kill on each of the threads
2240 		// ToDo: this can be optimized. There's got to be a better solution.
2241 		struct thread *temp_thread;
2242 		char death_sem_name[B_OS_NAME_LENGTH];
2243 		sem_id deathSem;
2244 		int32 threadCount;
2245 
2246 		sprintf(death_sem_name, "team %ld death sem", teamID);
2247 		deathSem = create_sem(0, death_sem_name);
2248 		if (deathSem < 0)
2249 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2250 
2251 		state = disable_interrupts();
2252 		GRAB_TEAM_LOCK();
2253 
2254 		team->death_sem = deathSem;
2255 		threadCount = team->num_threads;
2256 
2257 		// If the team was being debugged, that will stop with the termination
2258 		// of the nub thread. The team structure has already been removed from
2259 		// the team hash table at this point, so noone can install a debugger
2260 		// anymore. We fetch the debugger's port to send it a message at the
2261 		// bitter end.
2262 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2263 
2264 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2265 			debuggerPort = team->debug_info.debugger_port;
2266 
2267 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2268 
2269 		// we can safely walk the list because of the lock. no new threads can be created
2270 		// because of the TEAM_STATE_DEATH flag on the team
2271 		temp_thread = team->thread_list;
2272 		while (temp_thread) {
2273 			struct thread *next = temp_thread->team_next;
2274 
2275 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2276 			temp_thread = next;
2277 		}
2278 
2279 		RELEASE_TEAM_LOCK();
2280 		restore_interrupts(state);
2281 
2282 		// wait until all threads in team are dead.
2283 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2284 		delete_sem(team->death_sem);
2285 	}
2286 
2287 	// If someone is waiting for this team to be loaded, but it dies
2288 	// unexpectedly before being done, we need to notify the waiting
2289 	// thread now.
2290 
2291 	state = disable_interrupts();
2292 	GRAB_TEAM_LOCK();
2293 
2294 	if (team->loading_info) {
2295 		// there's indeed someone waiting
2296 		struct team_loading_info *loadingInfo = team->loading_info;
2297 		team->loading_info = NULL;
2298 
2299 		loadingInfo->result = B_ERROR;
2300 		loadingInfo->done = true;
2301 
2302 		GRAB_THREAD_LOCK();
2303 
2304 		// wake up the waiting thread
2305 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2306 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2307 
2308 		RELEASE_THREAD_LOCK();
2309 	}
2310 
2311 	RELEASE_TEAM_LOCK();
2312 	restore_interrupts(state);
2313 
2314 	// notify team watchers
2315 
2316 	{
2317 		// we're not reachable from anyone anymore at this point, so we
2318 		// can safely access the list without any locking
2319 		struct team_watcher *watcher;
2320 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2321 				&team->watcher_list)) != NULL) {
2322 			watcher->hook(teamID, watcher->data);
2323 			free(watcher);
2324 		}
2325 	}
2326 
2327 	// free team resources
2328 
2329 	vfs_free_io_context(team->io_context);
2330 	delete_owned_ports(teamID);
2331 	sem_delete_owned_sems(teamID);
2332 	remove_images(team);
2333 	vm_delete_address_space(team->address_space);
2334 
2335 	delete_team_struct(team);
2336 
2337 	// notify the debugger, that the team is gone
2338 	user_debug_team_deleted(teamID, debuggerPort);
2339 }
2340 
2341 
2342 struct team *
2343 team_get_kernel_team(void)
2344 {
2345 	return sKernelTeam;
2346 }
2347 
2348 
2349 team_id
2350 team_get_kernel_team_id(void)
2351 {
2352 	if (!sKernelTeam)
2353 		return 0;
2354 
2355 	return sKernelTeam->id;
2356 }
2357 
2358 
2359 team_id
2360 team_get_current_team_id(void)
2361 {
2362 	return thread_get_current_thread()->team->id;
2363 }
2364 
2365 
2366 status_t
2367 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2368 {
2369 	cpu_status state;
2370 	struct team *team;
2371 	status_t status;
2372 
2373 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2374 	if (id == 1) {
2375 		// we're the kernel team, so we don't have to go through all
2376 		// the hassle (locking and hash lookup)
2377 		*_addressSpace = vm_get_kernel_address_space();
2378 		return B_OK;
2379 	}
2380 
2381 	state = disable_interrupts();
2382 	GRAB_TEAM_LOCK();
2383 
2384 	team = team_get_team_struct_locked(id);
2385 	if (team != NULL) {
2386 		atomic_add(&team->address_space->ref_count, 1);
2387 		*_addressSpace = team->address_space;
2388 		status = B_OK;
2389 	} else
2390 		status = B_BAD_VALUE;
2391 
2392 	RELEASE_TEAM_LOCK();
2393 	restore_interrupts(state);
2394 
2395 	return status;
2396 }
2397 
2398 
2399 /*!	Sets the team's job control state.
2400 	Interrupts must be disabled and the team lock be held.
2401 	\a threadsLocked indicates whether the thread lock is being held, too.
2402 */
2403 void
2404 team_set_job_control_state(struct team* team, job_control_state newState,
2405 	int signal, bool threadsLocked)
2406 {
2407 	if (team == NULL || team->job_control_entry == NULL)
2408 		return;
2409 
2410 	// don't touch anything, if the state stays the same or the team is already
2411 	// dead
2412 	job_control_entry* entry = team->job_control_entry;
2413 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2414 		return;
2415 
2416 	T(SetJobControlState(team->id, newState, signal));
2417 
2418 	// remove from the old list
2419 	switch (entry->state) {
2420 		case JOB_CONTROL_STATE_NONE:
2421 			// entry is in no list ATM
2422 			break;
2423 		case JOB_CONTROL_STATE_DEAD:
2424 			// can't get here
2425 			break;
2426 		case JOB_CONTROL_STATE_STOPPED:
2427 			team->parent->stopped_children->entries.Remove(entry);
2428 			break;
2429 		case JOB_CONTROL_STATE_CONTINUED:
2430 			team->parent->continued_children->entries.Remove(entry);
2431 			break;
2432 	}
2433 
2434 	entry->state = newState;
2435 	entry->signal = signal;
2436 
2437 	// add to new list
2438 	team_job_control_children* childList = NULL;
2439 	switch (entry->state) {
2440 		case JOB_CONTROL_STATE_NONE:
2441 			// entry doesn't get into any list
2442 			break;
2443 		case JOB_CONTROL_STATE_DEAD:
2444 			childList = team->parent->dead_children;
2445 			team->parent->dead_children->count++;
2446 			break;
2447 		case JOB_CONTROL_STATE_STOPPED:
2448 			childList = team->parent->stopped_children;
2449 			break;
2450 		case JOB_CONTROL_STATE_CONTINUED:
2451 			childList = team->parent->continued_children;
2452 			break;
2453 	}
2454 
2455 	if (childList != NULL) {
2456 		childList->entries.Add(entry);
2457 		team->parent->dead_children->condition_variable.NotifyAll(
2458 			threadsLocked);
2459 	}
2460 }
2461 
2462 
2463 /*! Adds a hook to the team that is called as soon as this
2464 	team goes away.
2465 	This call might get public in the future.
2466 */
2467 status_t
2468 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2469 {
2470 	struct team_watcher *watcher;
2471 	struct team *team;
2472 	cpu_status state;
2473 
2474 	if (hook == NULL || teamID < B_OK)
2475 		return B_BAD_VALUE;
2476 
2477 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2478 	if (watcher == NULL)
2479 		return B_NO_MEMORY;
2480 
2481 	watcher->hook = hook;
2482 	watcher->data = data;
2483 
2484 	// find team and add watcher
2485 
2486 	state = disable_interrupts();
2487 	GRAB_TEAM_LOCK();
2488 
2489 	team = team_get_team_struct_locked(teamID);
2490 	if (team != NULL)
2491 		list_add_item(&team->watcher_list, watcher);
2492 
2493 	RELEASE_TEAM_LOCK();
2494 	restore_interrupts(state);
2495 
2496 	if (team == NULL) {
2497 		free(watcher);
2498 		return B_BAD_TEAM_ID;
2499 	}
2500 
2501 	return B_OK;
2502 }
2503 
2504 
2505 status_t
2506 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2507 {
2508 	struct team_watcher *watcher = NULL;
2509 	struct team *team;
2510 	cpu_status state;
2511 
2512 	if (hook == NULL || teamID < B_OK)
2513 		return B_BAD_VALUE;
2514 
2515 	// find team and remove watcher (if present)
2516 
2517 	state = disable_interrupts();
2518 	GRAB_TEAM_LOCK();
2519 
2520 	team = team_get_team_struct_locked(teamID);
2521 	if (team != NULL) {
2522 		// search for watcher
2523 		while ((watcher = (struct team_watcher*)list_get_next_item(
2524 				&team->watcher_list, watcher)) != NULL) {
2525 			if (watcher->hook == hook && watcher->data == data) {
2526 				// got it!
2527 				list_remove_item(&team->watcher_list, watcher);
2528 				break;
2529 			}
2530 		}
2531 	}
2532 
2533 	RELEASE_TEAM_LOCK();
2534 	restore_interrupts(state);
2535 
2536 	if (watcher == NULL)
2537 		return B_ENTRY_NOT_FOUND;
2538 
2539 	free(watcher);
2540 	return B_OK;
2541 }
2542 
2543 
2544 //	#pragma mark - Public kernel API
2545 
2546 
2547 thread_id
2548 load_image(int32 argCount, const char **args, const char **env)
2549 {
2550 	int32 envCount = 0;
2551 
2552 	// count env variables
2553 	while (env && env[envCount] != NULL)
2554 		envCount++;
2555 
2556 	return load_image_etc(argCount, (char * const *)args, envCount,
2557 		(char * const *)env, B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED,
2558 		-1, 0, true);
2559 }
2560 
2561 
2562 status_t
2563 wait_for_team(team_id id, status_t *_returnCode)
2564 {
2565 	struct team *team;
2566 	thread_id thread;
2567 	cpu_status state;
2568 
2569 	// find main thread and wait for that
2570 
2571 	state = disable_interrupts();
2572 	GRAB_TEAM_LOCK();
2573 
2574 	team = team_get_team_struct_locked(id);
2575 	if (team != NULL && team->main_thread != NULL)
2576 		thread = team->main_thread->id;
2577 	else
2578 		thread = B_BAD_THREAD_ID;
2579 
2580 	RELEASE_TEAM_LOCK();
2581 	restore_interrupts(state);
2582 
2583 	if (thread < 0)
2584 		return thread;
2585 
2586 	return wait_for_thread(thread, _returnCode);
2587 }
2588 
2589 
2590 status_t
2591 kill_team(team_id id)
2592 {
2593 	status_t status = B_OK;
2594 	thread_id threadID = -1;
2595 	struct team *team;
2596 	cpu_status state;
2597 
2598 	state = disable_interrupts();
2599 	GRAB_TEAM_LOCK();
2600 
2601 	team = team_get_team_struct_locked(id);
2602 	if (team != NULL) {
2603 		if (team != sKernelTeam) {
2604 			threadID = team->id;
2605 				// the team ID is the same as the ID of its main thread
2606 		} else
2607 			status = B_NOT_ALLOWED;
2608 	} else
2609 		status = B_BAD_THREAD_ID;
2610 
2611 	RELEASE_TEAM_LOCK();
2612 	restore_interrupts(state);
2613 
2614 	if (status < B_OK)
2615 		return status;
2616 
2617 	// just kill the main thread in the team. The cleanup code there will
2618 	// take care of the team
2619 	return kill_thread(threadID);
2620 }
2621 
2622 
2623 status_t
2624 _get_team_info(team_id id, team_info *info, size_t size)
2625 {
2626 	cpu_status state;
2627 	status_t status = B_OK;
2628 	struct team *team;
2629 
2630 	state = disable_interrupts();
2631 	GRAB_TEAM_LOCK();
2632 
2633 	if (id == B_CURRENT_TEAM)
2634 		team = thread_get_current_thread()->team;
2635 	else
2636 		team = team_get_team_struct_locked(id);
2637 
2638 	if (team == NULL) {
2639 		status = B_BAD_TEAM_ID;
2640 		goto err;
2641 	}
2642 
2643 	status = fill_team_info(team, info, size);
2644 
2645 err:
2646 	RELEASE_TEAM_LOCK();
2647 	restore_interrupts(state);
2648 
2649 	return status;
2650 }
2651 
2652 
2653 status_t
2654 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2655 {
2656 	status_t status = B_BAD_TEAM_ID;
2657 	struct team *team = NULL;
2658 	int32 slot = *cookie;
2659 	team_id lastTeamID;
2660 	cpu_status state;
2661 
2662 	if (slot < 1)
2663 		slot = 1;
2664 
2665 	state = disable_interrupts();
2666 	GRAB_TEAM_LOCK();
2667 
2668 	lastTeamID = peek_next_thread_id();
2669 	if (slot >= lastTeamID)
2670 		goto err;
2671 
2672 	// get next valid team
2673 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2674 		slot++;
2675 
2676 	if (team) {
2677 		status = fill_team_info(team, info, size);
2678 		*cookie = ++slot;
2679 	}
2680 
2681 err:
2682 	RELEASE_TEAM_LOCK();
2683 	restore_interrupts(state);
2684 
2685 	return status;
2686 }
2687 
2688 
2689 status_t
2690 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2691 {
2692 	bigtime_t kernelTime = 0, userTime = 0;
2693 	status_t status = B_OK;
2694 	struct team *team;
2695 	cpu_status state;
2696 
2697 	if (size != sizeof(team_usage_info)
2698 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2699 		return B_BAD_VALUE;
2700 
2701 	state = disable_interrupts();
2702 	GRAB_TEAM_LOCK();
2703 
2704 	if (id == B_CURRENT_TEAM)
2705 		team = thread_get_current_thread()->team;
2706 	else
2707 		team = team_get_team_struct_locked(id);
2708 
2709 	if (team == NULL) {
2710 		status = B_BAD_TEAM_ID;
2711 		goto out;
2712 	}
2713 
2714 	switch (who) {
2715 		case B_TEAM_USAGE_SELF:
2716 		{
2717 			struct thread *thread = team->thread_list;
2718 
2719 			for (; thread != NULL; thread = thread->team_next) {
2720 				kernelTime += thread->kernel_time;
2721 				userTime += thread->user_time;
2722 			}
2723 
2724 			kernelTime += team->dead_threads_kernel_time;
2725 			userTime += team->dead_threads_user_time;
2726 			break;
2727 		}
2728 
2729 		case B_TEAM_USAGE_CHILDREN:
2730 		{
2731 			struct team *child = team->children;
2732 			for (; child != NULL; child = child->siblings_next) {
2733 				struct thread *thread = team->thread_list;
2734 
2735 				for (; thread != NULL; thread = thread->team_next) {
2736 					kernelTime += thread->kernel_time;
2737 					userTime += thread->user_time;
2738 				}
2739 
2740 				kernelTime += child->dead_threads_kernel_time;
2741 				userTime += child->dead_threads_user_time;
2742 			}
2743 
2744 			kernelTime += team->dead_children->kernel_time;
2745 			userTime += team->dead_children->user_time;
2746 			break;
2747 		}
2748 	}
2749 
2750 out:
2751 	RELEASE_TEAM_LOCK();
2752 	restore_interrupts(state);
2753 
2754 	if (status == B_OK) {
2755 		info->kernel_time = kernelTime;
2756 		info->user_time = userTime;
2757 	}
2758 
2759 	return status;
2760 }
2761 
2762 
2763 pid_t
2764 getpid(void)
2765 {
2766 	return thread_get_current_thread()->team->id;
2767 }
2768 
2769 
2770 pid_t
2771 getppid(void)
2772 {
2773 	struct team *team = thread_get_current_thread()->team;
2774 	cpu_status state;
2775 	pid_t parent;
2776 
2777 	state = disable_interrupts();
2778 	GRAB_TEAM_LOCK();
2779 
2780 	parent = team->parent->id;
2781 
2782 	RELEASE_TEAM_LOCK();
2783 	restore_interrupts(state);
2784 
2785 	return parent;
2786 }
2787 
2788 
2789 pid_t
2790 getpgid(pid_t process)
2791 {
2792 	struct thread *thread;
2793 	pid_t result = -1;
2794 	cpu_status state;
2795 
2796 	if (process == 0)
2797 		process = thread_get_current_thread()->team->id;
2798 
2799 	state = disable_interrupts();
2800 	GRAB_THREAD_LOCK();
2801 
2802 	thread = thread_get_thread_struct_locked(process);
2803 	if (thread != NULL)
2804 		result = thread->team->group_id;
2805 
2806 	RELEASE_THREAD_LOCK();
2807 	restore_interrupts(state);
2808 
2809 	return thread != NULL ? result : B_BAD_VALUE;
2810 }
2811 
2812 
2813 pid_t
2814 getsid(pid_t process)
2815 {
2816 	struct thread *thread;
2817 	pid_t result = -1;
2818 	cpu_status state;
2819 
2820 	if (process == 0)
2821 		process = thread_get_current_thread()->team->id;
2822 
2823 	state = disable_interrupts();
2824 	GRAB_THREAD_LOCK();
2825 
2826 	thread = thread_get_thread_struct_locked(process);
2827 	if (thread != NULL)
2828 		result = thread->team->session_id;
2829 
2830 	RELEASE_THREAD_LOCK();
2831 	restore_interrupts(state);
2832 
2833 	return thread != NULL ? result : B_BAD_VALUE;
2834 }
2835 
2836 
2837 //	#pragma mark - User syscalls
2838 
2839 
2840 status_t
2841 _user_exec(const char *userPath, int32 argCount, char * const *userArgs,
2842 	int32 envCount, char * const *userEnvironment)
2843 {
2844 	char path[B_PATH_NAME_LENGTH];
2845 
2846 	if (argCount < 1)
2847 		return B_BAD_VALUE;
2848 
2849 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userArgs)
2850 		|| !IS_USER_ADDRESS(userEnvironment)
2851 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2852 		return B_BAD_ADDRESS;
2853 
2854 	return exec_team(path, argCount, userArgs, envCount, userEnvironment);
2855 		// this one only returns in case of error
2856 }
2857 
2858 
2859 thread_id
2860 _user_fork(void)
2861 {
2862 	return fork_team();
2863 }
2864 
2865 
2866 thread_id
2867 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
2868 {
2869 	status_t returnCode;
2870 	int32 reason;
2871 	thread_id deadChild;
2872 
2873 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
2874 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
2875 		return B_BAD_ADDRESS;
2876 
2877 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
2878 
2879 	if (deadChild >= B_OK) {
2880 		// copy result data on successful completion
2881 		if ((_userReason != NULL
2882 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
2883 			|| (_userReturnCode != NULL
2884 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
2885 					< B_OK)) {
2886 			return B_BAD_ADDRESS;
2887 		}
2888 
2889 		return deadChild;
2890 	}
2891 
2892 	return syscall_restart_handle_post(deadChild);
2893 }
2894 
2895 
2896 pid_t
2897 _user_process_info(pid_t process, int32 which)
2898 {
2899 	// we only allow to return the parent of the current process
2900 	if (which == PARENT_ID
2901 		&& process != 0 && process != thread_get_current_thread()->team->id)
2902 		return B_BAD_VALUE;
2903 
2904 	switch (which) {
2905 		case SESSION_ID:
2906 			return getsid(process);
2907 		case GROUP_ID:
2908 			return getpgid(process);
2909 		case PARENT_ID:
2910 			return getppid();
2911 	}
2912 
2913 	return B_BAD_VALUE;
2914 }
2915 
2916 
2917 pid_t
2918 _user_setpgid(pid_t processID, pid_t groupID)
2919 {
2920 	struct thread *thread = thread_get_current_thread();
2921 	struct team *currentTeam = thread->team;
2922 	struct team *team;
2923 
2924 	if (groupID < 0)
2925 		return B_BAD_VALUE;
2926 
2927 	if (processID == 0)
2928 		processID = currentTeam->id;
2929 
2930 	// if the group ID is not specified, use the target process' ID
2931 	if (groupID == 0)
2932 		groupID = processID;
2933 
2934 	if (processID == currentTeam->id) {
2935 		// we set our own group
2936 
2937 		// we must not change our process group ID if we're a session leader
2938 		if (is_session_leader(currentTeam))
2939 			return B_NOT_ALLOWED;
2940 	} else {
2941 		// another team is the target of the call -- check it out
2942 		InterruptsSpinLocker _(team_spinlock);
2943 
2944 		team = team_get_team_struct_locked(processID);
2945 		if (team == NULL)
2946 			return ESRCH;
2947 
2948 		// The team must be a child of the calling team and in the same session.
2949 		// (If that's the case it isn't a session leader either.)
2950 		if (team->parent != currentTeam
2951 			|| team->session_id != currentTeam->session_id) {
2952 			return B_NOT_ALLOWED;
2953 		}
2954 
2955 		if (team->group_id == groupID)
2956 			return groupID;
2957 
2958 		// The call is also supposed to fail on a child, when the child already
2959 		// has executed exec*() [EACCES].
2960 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
2961 			return EACCES;
2962 	}
2963 
2964 	struct process_group *group = NULL;
2965 	if (groupID == processID) {
2966 		// A new process group might be needed.
2967 		group = create_process_group(groupID);
2968 		if (group == NULL)
2969 			return B_NO_MEMORY;
2970 
2971 		// Assume orphaned. We consider the situation of the team's parent
2972 		// below.
2973 		group->orphaned = true;
2974 	}
2975 
2976 	status_t status = B_OK;
2977 	struct process_group *freeGroup = NULL;
2978 
2979 	InterruptsSpinLocker locker(team_spinlock);
2980 
2981 	team = team_get_team_struct_locked(processID);
2982 	if (team != NULL) {
2983 		// check the conditions again -- they might have changed in the meantime
2984 		if (is_session_leader(team)
2985 			|| team->session_id != currentTeam->session_id) {
2986 			status = B_NOT_ALLOWED;
2987 		} else if (team != currentTeam
2988 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
2989 			status = EACCES;
2990 		} else if (team->group_id == groupID) {
2991 			// the team is already in the desired process group
2992 			freeGroup = group;
2993 		} else {
2994 			// Check if a process group with the requested ID already exists.
2995 			struct process_group *targetGroup
2996 				= team_get_process_group_locked(team->group->session, groupID);
2997 			if (targetGroup != NULL) {
2998 				// In case of processID == groupID we have to free the
2999 				// allocated group.
3000 				freeGroup = group;
3001 			} else if (processID == groupID) {
3002 				// We created a new process group, let us insert it into the
3003 				// team's session.
3004 				insert_group_into_session(team->group->session, group);
3005 				targetGroup = group;
3006 			}
3007 
3008 			if (targetGroup != NULL) {
3009 				// we got a group, let's move the team there
3010 				process_group* oldGroup = team->group;
3011 
3012 				remove_team_from_group(team);
3013 				insert_team_into_group(targetGroup, team);
3014 
3015 				// Update the "orphaned" flag of all potentially affected
3016 				// groups.
3017 
3018 				// the team's old group
3019 				if (oldGroup->teams != NULL) {
3020 					oldGroup->orphaned = false;
3021 					update_orphaned_process_group(oldGroup, -1);
3022 				}
3023 
3024 				// the team's new group
3025 				struct team* parent = team->parent;
3026 				targetGroup->orphaned &= parent == NULL
3027 					|| parent->group == targetGroup
3028 					|| team->parent->session_id != team->session_id;
3029 
3030 				// children's groups
3031 				struct team* child = team->children;
3032 				while (child != NULL) {
3033 					child->group->orphaned = false;
3034 					update_orphaned_process_group(child->group, -1);
3035 
3036 					child = child->siblings_next;
3037 				}
3038 			} else
3039 				status = B_NOT_ALLOWED;
3040 		}
3041 	} else
3042 		status = B_NOT_ALLOWED;
3043 
3044 	// Changing the process group might have changed the situation for a parent
3045 	// waiting in wait_for_child(). Hence we notify it.
3046 	if (status == B_OK)
3047 		team->parent->dead_children->condition_variable.NotifyAll(false);
3048 
3049 	locker.Unlock();
3050 
3051 	if (status != B_OK) {
3052 		// in case of error, the group hasn't been added into the hash
3053 		team_delete_process_group(group);
3054 	}
3055 
3056 	team_delete_process_group(freeGroup);
3057 
3058 	return status == B_OK ? groupID : status;
3059 }
3060 
3061 
3062 pid_t
3063 _user_setsid(void)
3064 {
3065 	struct team *team = thread_get_current_thread()->team;
3066 	struct process_session *session;
3067 	struct process_group *group;
3068 	cpu_status state;
3069 	bool failed = false;
3070 
3071 	// the team must not already be a process group leader
3072 	if (is_process_group_leader(team))
3073 		return B_NOT_ALLOWED;
3074 
3075 	group = create_process_group(team->id);
3076 	if (group == NULL)
3077 		return B_NO_MEMORY;
3078 
3079 	session = create_process_session(group->id);
3080 	if (session == NULL) {
3081 		team_delete_process_group(group);
3082 		return B_NO_MEMORY;
3083 	}
3084 
3085 	state = disable_interrupts();
3086 	GRAB_TEAM_LOCK();
3087 
3088 	// this may have changed since the check above
3089 	if (!is_process_group_leader(team)) {
3090 		remove_team_from_group(team);
3091 
3092 		insert_group_into_session(session, group);
3093 		insert_team_into_group(group, team);
3094 	} else
3095 		failed = true;
3096 
3097 	RELEASE_TEAM_LOCK();
3098 	restore_interrupts(state);
3099 
3100 	if (failed) {
3101 		team_delete_process_group(group);
3102 		free(session);
3103 		return B_NOT_ALLOWED;
3104 	}
3105 
3106 	return team->group_id;
3107 }
3108 
3109 
3110 status_t
3111 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3112 {
3113 	status_t returnCode;
3114 	status_t status;
3115 
3116 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3117 		return B_BAD_ADDRESS;
3118 
3119 	status = wait_for_team(id, &returnCode);
3120 	if (status >= B_OK && _userReturnCode != NULL) {
3121 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3122 			return B_BAD_ADDRESS;
3123 		return B_OK;
3124 	}
3125 
3126 	return syscall_restart_handle_post(status);
3127 }
3128 
3129 
3130 team_id
3131 _user_load_image(int32 argCount, const char **userArgs, int32 envCount,
3132 	const char **userEnv, int32 priority, uint32 flags, port_id errorPort,
3133 	uint32 errorToken)
3134 {
3135 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3136 
3137 	if (argCount < 1 || userArgs == NULL || userEnv == NULL)
3138 		return B_BAD_VALUE;
3139 
3140 	if (!IS_USER_ADDRESS(userArgs) || !IS_USER_ADDRESS(userEnv))
3141 		return B_BAD_ADDRESS;
3142 
3143 	return load_image_etc(argCount, (char * const *)userArgs,
3144 		envCount, (char * const *)userEnv, priority, flags, errorPort,
3145 		errorToken, false);
3146 }
3147 
3148 
3149 void
3150 _user_exit_team(status_t returnValue)
3151 {
3152 	struct thread *thread = thread_get_current_thread();
3153 
3154 	thread->exit.status = returnValue;
3155 	thread->exit.reason = THREAD_RETURN_EXIT;
3156 
3157 	send_signal(thread->id, SIGKILL);
3158 }
3159 
3160 
3161 status_t
3162 _user_kill_team(team_id team)
3163 {
3164 	return kill_team(team);
3165 }
3166 
3167 
3168 status_t
3169 _user_get_team_info(team_id id, team_info *userInfo)
3170 {
3171 	status_t status;
3172 	team_info info;
3173 
3174 	if (!IS_USER_ADDRESS(userInfo))
3175 		return B_BAD_ADDRESS;
3176 
3177 	status = _get_team_info(id, &info, sizeof(team_info));
3178 	if (status == B_OK) {
3179 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3180 			return B_BAD_ADDRESS;
3181 	}
3182 
3183 	return status;
3184 }
3185 
3186 
3187 status_t
3188 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3189 {
3190 	status_t status;
3191 	team_info info;
3192 	int32 cookie;
3193 
3194 	if (!IS_USER_ADDRESS(userCookie)
3195 		|| !IS_USER_ADDRESS(userInfo)
3196 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3197 		return B_BAD_ADDRESS;
3198 
3199 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3200 	if (status != B_OK)
3201 		return status;
3202 
3203 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3204 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3205 		return B_BAD_ADDRESS;
3206 
3207 	return status;
3208 }
3209 
3210 
3211 team_id
3212 _user_get_current_team(void)
3213 {
3214 	return team_get_current_team_id();
3215 }
3216 
3217 
3218 status_t
3219 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3220 {
3221 	team_usage_info info;
3222 	status_t status;
3223 
3224 	if (!IS_USER_ADDRESS(userInfo))
3225 		return B_BAD_ADDRESS;
3226 
3227 	status = _get_team_usage_info(team, who, &info, size);
3228 	if (status != B_OK)
3229 		return status;
3230 
3231 	if (user_memcpy(userInfo, &info, size) < B_OK)
3232 		return B_BAD_ADDRESS;
3233 
3234 	return status;
3235 }
3236 
3237