xref: /haiku/src/system/kernel/team.cpp (revision 89755088d790ff4fe36f8aa77dacb2bd15507108)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*!	Team functions */
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/wait.h>
15 
16 #include <OS.h>
17 
18 #include <AutoDeleter.h>
19 
20 #include <elf.h>
21 #include <file_cache.h>
22 #include <heap.h>
23 #include <int.h>
24 #include <kernel.h>
25 #include <kimage.h>
26 #include <kscheduler.h>
27 #include <ksignal.h>
28 #include <port.h>
29 #include <sem.h>
30 #include <syscall_process_info.h>
31 #include <syscall_restart.h>
32 #include <syscalls.h>
33 #include <team.h>
34 #include <tls.h>
35 #include <tracing.h>
36 #include <user_runtime.h>
37 #include <usergroup.h>
38 #include <vfs.h>
39 #include <vm.h>
40 #include <vm_address_space.h>
41 #include <util/AutoLock.h>
42 #include <util/khash.h>
43 
44 //#define TRACE_TEAM
45 #ifdef TRACE_TEAM
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 struct team_key {
53 	team_id id;
54 };
55 
56 struct team_arg {
57 	uint32	arg_count;
58 	char	**args;
59 	uint32	env_count;
60 	char	**env;
61 	port_id	error_port;
62 	uint32	error_token;
63 };
64 
65 struct fork_arg {
66 	area_id		user_stack_area;
67 	addr_t		user_stack_base;
68 	size_t		user_stack_size;
69 	addr_t		user_local_storage;
70 	sigset_t	sig_block_mask;
71 
72 	struct arch_fork_arg arch_info;
73 };
74 
75 
76 static hash_table *sTeamHash = NULL;
77 static hash_table *sGroupHash = NULL;
78 static struct team *sKernelTeam = NULL;
79 
80 // some arbitrary chosen limits - should probably depend on the available
81 // memory (the limit is not yet enforced)
82 static int32 sMaxTeams = 2048;
83 static int32 sUsedTeams = 1;
84 
85 spinlock team_spinlock = 0;
86 
87 
88 // #pragma mark - Tracing
89 
90 
91 #ifdef TEAM_TRACING
92 namespace TeamTracing {
93 
94 class TeamForked : public AbstractTraceEntry {
95 	public:
96 		TeamForked(thread_id forkedThread)
97 			:
98 			fForkedThread(forkedThread)
99 		{
100 			Initialized();
101 		}
102 
103 		virtual void AddDump(TraceOutput& out)
104 		{
105 			out.Print("team forked, new thread %ld", fForkedThread);
106 		}
107 
108 	private:
109 		thread_id			fForkedThread;
110 };
111 
112 
113 class ExecTeam : public AbstractTraceEntry {
114 	public:
115 		ExecTeam(const char* path, int32 argCount, const char* const* args,
116 				int32 envCount, const char* const* env)
117 			:
118 			fArgCount(argCount),
119 			fArgs(NULL)
120 		{
121 			fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
122 				false);
123 
124 			// determine the buffer size we need for the args
125 			size_t argBufferSize = 0;
126 			for (int32 i = 0; i < argCount; i++)
127 				argBufferSize += strlen(args[i]) + 1;
128 
129 			// allocate a buffer
130 			fArgs = (char*)alloc_tracing_buffer(argBufferSize);
131 			if (fArgs) {
132 				char* buffer = fArgs;
133 				for (int32 i = 0; i < argCount; i++) {
134 					size_t argSize = strlen(args[i]) + 1;
135 					memcpy(buffer, args[i], argSize);
136 					buffer += argSize;
137 				}
138 			}
139 
140 			// ignore env for the time being
141 			(void)envCount;
142 			(void)env;
143 
144 			Initialized();
145 		}
146 
147 		virtual void AddDump(TraceOutput& out)
148 		{
149 			out.Print("team exec, \"%p\", args:", fPath);
150 
151 			char* args = fArgs;
152 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
153 				out.Print(" \"%s\"", args);
154 				args += strlen(args) + 1;
155 			}
156 		}
157 
158 	private:
159 		char*	fPath;
160 		int32	fArgCount;
161 		char*	fArgs;
162 };
163 
164 
165 static const char*
166 job_control_state_name(job_control_state state)
167 {
168 	switch (state) {
169 		case JOB_CONTROL_STATE_NONE:
170 			return "none";
171 		case JOB_CONTROL_STATE_STOPPED:
172 			return "stopped";
173 		case JOB_CONTROL_STATE_CONTINUED:
174 			return "continued";
175 		case JOB_CONTROL_STATE_DEAD:
176 			return "dead";
177 		default:
178 			return "invalid";
179 	}
180 }
181 
182 
183 class SetJobControlState : public AbstractTraceEntry {
184 	public:
185 		SetJobControlState(team_id team, job_control_state newState, int signal)
186 			:
187 			fTeam(team),
188 			fNewState(newState),
189 			fSignal(signal)
190 		{
191 			Initialized();
192 		}
193 
194 		virtual void AddDump(TraceOutput& out)
195 		{
196 			out.Print("team set job control state, team %ld, "
197 				"new state: %s, signal: %d",
198 				fTeam, job_control_state_name(fNewState), fSignal);
199 		}
200 
201 	private:
202 		team_id				fTeam;
203 		job_control_state	fNewState;
204 		int					fSignal;
205 };
206 
207 
208 class WaitForChild : public AbstractTraceEntry {
209 	public:
210 		WaitForChild(pid_t child, uint32 flags)
211 			:
212 			fChild(child),
213 			fFlags(flags)
214 		{
215 			Initialized();
216 		}
217 
218 		virtual void AddDump(TraceOutput& out)
219 		{
220 			out.Print("team wait for child, child: %ld, "
221 				"flags: 0x%lx", fChild, fFlags);
222 		}
223 
224 	private:
225 		pid_t	fChild;
226 		uint32	fFlags;
227 };
228 
229 
230 class WaitForChildDone : public AbstractTraceEntry {
231 	public:
232 		WaitForChildDone(const job_control_entry& entry)
233 			:
234 			fState(entry.state),
235 			fTeam(entry.thread),
236 			fStatus(entry.status),
237 			fReason(entry.reason),
238 			fSignal(entry.signal)
239 		{
240 			Initialized();
241 		}
242 
243 		WaitForChildDone(status_t error)
244 			:
245 			fTeam(error)
246 		{
247 			Initialized();
248 		}
249 
250 		virtual void AddDump(TraceOutput& out)
251 		{
252 			if (fTeam >= 0) {
253 				out.Print("team wait for child done, team: %ld, "
254 					"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
255 					fTeam, job_control_state_name(fState), fStatus, fReason,
256 					fSignal);
257 			} else {
258 				out.Print("team wait for child failed, error: "
259 					"0x%lx, ", fTeam);
260 			}
261 		}
262 
263 	private:
264 		job_control_state	fState;
265 		team_id				fTeam;
266 		status_t			fStatus;
267 		uint16				fReason;
268 		uint16				fSignal;
269 };
270 
271 }	// namespace TeamTracing
272 
273 #	define T(x) new(std::nothrow) TeamTracing::x;
274 #else
275 #	define T(x) ;
276 #endif
277 
278 
279 
280 //	#pragma mark - Private functions
281 
282 
283 static void
284 _dump_team_info(struct team *team)
285 {
286 	kprintf("TEAM: %p\n", team);
287 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
288 	kprintf("name:        '%s'\n", team->name);
289 	kprintf("args:        '%s'\n", team->args);
290 	kprintf("next:        %p\n", team->next);
291 	kprintf("parent:      %p", team->parent);
292 	if (team->parent != NULL) {
293 		kprintf(" (id = %ld)\n", team->parent->id);
294 	} else
295 		kprintf("\n");
296 
297 	kprintf("children:    %p\n", team->children);
298 	kprintf("num_threads: %d\n", team->num_threads);
299 	kprintf("state:       %d\n", team->state);
300 	kprintf("flags:       0x%lx\n", team->flags);
301 	kprintf("io_context:  %p\n", team->io_context);
302 	if (team->address_space)
303 		kprintf("address_space: %p\n", team->address_space);
304 	kprintf("main_thread: %p\n", team->main_thread);
305 	kprintf("thread_list: %p\n", team->thread_list);
306 	kprintf("group_id:    %ld\n", team->group_id);
307 	kprintf("session_id:  %ld\n", team->session_id);
308 }
309 
310 
311 static int
312 dump_team_info(int argc, char **argv)
313 {
314 	struct hash_iterator iterator;
315 	struct team *team;
316 	team_id id = -1;
317 	bool found = false;
318 
319 	if (argc < 2) {
320 		struct thread* thread = thread_get_current_thread();
321 		if (thread != NULL && thread->team != NULL)
322 			_dump_team_info(thread->team);
323 		else
324 			kprintf("No current team!\n");
325 		return 0;
326 	}
327 
328 	id = strtoul(argv[1], NULL, 0);
329 	if (IS_KERNEL_ADDRESS(id)) {
330 		// semi-hack
331 		_dump_team_info((struct team *)id);
332 		return 0;
333 	}
334 
335 	// walk through the thread list, trying to match name or id
336 	hash_open(sTeamHash, &iterator);
337 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
338 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
339 			_dump_team_info(team);
340 			found = true;
341 			break;
342 		}
343 	}
344 	hash_close(sTeamHash, &iterator, false);
345 
346 	if (!found)
347 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
348 	return 0;
349 }
350 
351 
352 static int
353 dump_teams(int argc, char **argv)
354 {
355 	struct hash_iterator iterator;
356 	struct team *team;
357 
358 	kprintf("team           id  parent      name\n");
359 	hash_open(sTeamHash, &iterator);
360 
361 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
362 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
363 	}
364 
365 	hash_close(sTeamHash, &iterator, false);
366 	return 0;
367 }
368 
369 
370 /*!	Frees an array of strings in kernel space.
371 
372 	\param strings strings array
373 	\param count number of strings in array
374 */
375 static void
376 free_strings_array(char **strings, int32 count)
377 {
378 	int32 i;
379 
380 	if (strings == NULL)
381 		return;
382 
383 	for (i = 0; i < count; i++)
384 		free(strings[i]);
385 
386     free(strings);
387 }
388 
389 
390 /*!	Copy an array of strings in kernel space
391 
392 	\param strings strings array to be copied
393 	\param count number of strings in array
394 	\param kstrings	pointer to the kernel copy
395 	\return \c B_OK on success, or an appropriate error code on
396 		failure.
397 */
398 static status_t
399 kernel_copy_strings_array(char * const *in, int32 count, char ***_strings)
400 {
401 	status_t status;
402 	char **strings;
403 	int32 i = 0;
404 
405 	strings = (char **)malloc((count + 1) * sizeof(char *));
406 	if (strings == NULL)
407 		return B_NO_MEMORY;
408 
409 	for (; i < count; i++) {
410 		strings[i] = strdup(in[i]);
411 		if (strings[i] == NULL) {
412 			status = B_NO_MEMORY;
413 			goto error;
414 		}
415 	}
416 
417 	strings[count] = NULL;
418 	*_strings = strings;
419 
420 	return B_OK;
421 
422 error:
423 	free_strings_array(strings, i);
424 	return status;
425 }
426 
427 
428 /*!	Copy an array of strings from user space to kernel space
429 
430 	\param strings userspace strings array
431 	\param count number of strings in array
432 	\param kstrings	pointer to the kernel copy
433 	\return \c B_OK on success, or an appropriate error code on
434 		failure.
435 */
436 static status_t
437 user_copy_strings_array(char * const *userStrings, int32 count, char ***_strings)
438 {
439 	char *buffer;
440 	char **strings;
441 	status_t err;
442 	int32 i = 0;
443 
444 	if (!IS_USER_ADDRESS(userStrings))
445 		return B_BAD_ADDRESS;
446 
447 	// buffer for safely accessing the user string
448 	// TODO: maybe have a user_strdup() instead?
449 	buffer = (char *)malloc(4 * B_PAGE_SIZE);
450 	if (buffer == NULL)
451 		return B_NO_MEMORY;
452 
453 	strings = (char **)malloc((count + 1) * sizeof(char *));
454 	if (strings == NULL) {
455 		err = B_NO_MEMORY;
456 		goto error;
457 	}
458 
459 	if ((err = user_memcpy(strings, userStrings, count * sizeof(char *))) < B_OK)
460 		goto error;
461 
462 	// scan all strings and copy to kernel space
463 
464 	for (; i < count; i++) {
465 		err = user_strlcpy(buffer, strings[i], 4 * B_PAGE_SIZE);
466 		if (err < B_OK)
467 			goto error;
468 
469 		strings[i] = strdup(buffer);
470 		if (strings[i] == NULL) {
471 			err = B_NO_MEMORY;
472 			goto error;
473 		}
474 	}
475 
476 	strings[count] = NULL;
477 	*_strings = strings;
478 	free(buffer);
479 
480 	return B_OK;
481 
482 error:
483 	free_strings_array(strings, i);
484 	free(buffer);
485 
486 	TRACE(("user_copy_strings_array failed %ld\n", err));
487 	return err;
488 }
489 
490 
491 static status_t
492 copy_strings_array(char * const *strings, int32 count, char ***_strings,
493 	bool kernel)
494 {
495 	if (kernel)
496 		return kernel_copy_strings_array(strings, count, _strings);
497 
498 	return user_copy_strings_array(strings, count, _strings);
499 }
500 
501 
502 static int
503 team_struct_compare(void *_p, const void *_key)
504 {
505 	struct team *p = (struct team*)_p;
506 	const struct team_key *key = (const struct team_key*)_key;
507 
508 	if (p->id == key->id)
509 		return 0;
510 
511 	return 1;
512 }
513 
514 
515 static uint32
516 team_struct_hash(void *_p, const void *_key, uint32 range)
517 {
518 	struct team *p = (struct team*)_p;
519 	const struct team_key *key = (const struct team_key*)_key;
520 
521 	if (p != NULL)
522 		return p->id % range;
523 
524 	return (uint32)key->id % range;
525 }
526 
527 
528 static int
529 process_group_compare(void *_group, const void *_key)
530 {
531 	struct process_group *group = (struct process_group*)_group;
532 	const struct team_key *key = (const struct team_key*)_key;
533 
534 	if (group->id == key->id)
535 		return 0;
536 
537 	return 1;
538 }
539 
540 
541 static uint32
542 process_group_hash(void *_group, const void *_key, uint32 range)
543 {
544 	struct process_group *group = (struct process_group*)_group;
545 	const struct team_key *key = (const struct team_key*)_key;
546 
547 	if (group != NULL)
548 		return group->id % range;
549 
550 	return (uint32)key->id % range;
551 }
552 
553 
554 static void
555 insert_team_into_parent(struct team *parent, struct team *team)
556 {
557 	ASSERT(parent != NULL);
558 
559 	team->siblings_next = parent->children;
560 	parent->children = team;
561 	team->parent = parent;
562 }
563 
564 
565 /*!	Note: must have team lock held */
566 static void
567 remove_team_from_parent(struct team *parent, struct team *team)
568 {
569 	struct team *child, *last = NULL;
570 
571 	for (child = parent->children; child != NULL; child = child->siblings_next) {
572 		if (child == team) {
573 			if (last == NULL)
574 				parent->children = child->siblings_next;
575 			else
576 				last->siblings_next = child->siblings_next;
577 
578 			team->parent = NULL;
579 			break;
580 		}
581 		last = child;
582 	}
583 }
584 
585 
586 /*!	Reparent each of our children
587 	Note: must have team lock held
588 */
589 static void
590 reparent_children(struct team *team)
591 {
592 	struct team *child;
593 
594 	while ((child = team->children) != NULL) {
595 		// remove the child from the current proc and add to the parent
596 		remove_team_from_parent(team, child);
597 		insert_team_into_parent(sKernelTeam, child);
598 	}
599 
600 	// move job control entries too
601 	sKernelTeam->stopped_children->entries.MoveFrom(
602 		&team->stopped_children->entries);
603 	sKernelTeam->continued_children->entries.MoveFrom(
604 		&team->continued_children->entries);
605 
606 	// Note, we don't move the dead children entries. Those will be deleted
607 	// when the team structure is deleted.
608 }
609 
610 
611 static bool
612 is_session_leader(struct team *team)
613 {
614 	return team->session_id == team->id;
615 }
616 
617 
618 static bool
619 is_process_group_leader(struct team *team)
620 {
621 	return team->group_id == team->id;
622 }
623 
624 
625 static void
626 deferred_delete_process_group(struct process_group *group)
627 {
628 	if (group == NULL)
629 		return;
630 
631 	// remove_group_from_session() keeps this pointer around
632 	// only if the session can be freed as well
633 	if (group->session) {
634 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
635 			group->session->id));
636 		deferred_free(group->session);
637 	}
638 
639 	deferred_free(group);
640 }
641 
642 
643 /*!	Removes a group from a session, and puts the session object
644 	back into the session cache, if it's not used anymore.
645 	You must hold the team lock when calling this function.
646 */
647 static void
648 remove_group_from_session(struct process_group *group)
649 {
650 	struct process_session *session = group->session;
651 
652 	// the group must be in any session to let this function have any effect
653 	if (session == NULL)
654 		return;
655 
656 	hash_remove(sGroupHash, group);
657 
658 	// we cannot free the resource here, so we're keeping the group link
659 	// around - this way it'll be freed by free_process_group()
660 	if (--session->group_count > 0)
661 		group->session = NULL;
662 }
663 
664 
665 /*!	Team lock must be held.
666 */
667 static void
668 acquire_process_group_ref(pid_t groupID)
669 {
670 	process_group* group = team_get_process_group_locked(NULL, groupID);
671 	if (group == NULL) {
672 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
673 		return;
674 	}
675 
676 	group->refs++;
677 }
678 
679 
680 /*!	Team lock must be held.
681 */
682 static void
683 release_process_group_ref(pid_t groupID)
684 {
685 	process_group* group = team_get_process_group_locked(NULL, groupID);
686 	if (group == NULL) {
687 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
688 		return;
689 	}
690 
691 	if (group->refs <= 0) {
692 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
693 		return;
694 	}
695 
696 	if (--group->refs > 0)
697 		return;
698 
699 	// group is no longer used
700 
701 	remove_group_from_session(group);
702 	deferred_delete_process_group(group);
703 }
704 
705 
706 /*!	You must hold the team lock when calling this function. */
707 static void
708 insert_group_into_session(struct process_session *session, struct process_group *group)
709 {
710 	if (group == NULL)
711 		return;
712 
713 	group->session = session;
714 	hash_insert(sGroupHash, group);
715 	session->group_count++;
716 }
717 
718 
719 /*!	You must hold the team lock when calling this function. */
720 static void
721 insert_team_into_group(struct process_group *group, struct team *team)
722 {
723 	team->group = group;
724 	team->group_id = group->id;
725 	team->session_id = group->session->id;
726 
727 	team->group_next = group->teams;
728 	group->teams = team;
729 	acquire_process_group_ref(group->id);
730 }
731 
732 
733 /*!	Removes the team from the group.
734 
735 	\param team the team that'll be removed from it's group
736 */
737 static void
738 remove_team_from_group(struct team *team)
739 {
740 	struct process_group *group = team->group;
741 	struct team *current, *last = NULL;
742 
743 	// the team must be in any team to let this function have any effect
744 	if  (group == NULL)
745 		return;
746 
747 	for (current = group->teams; current != NULL; current = current->group_next) {
748 		if (current == team) {
749 			if (last == NULL)
750 				group->teams = current->group_next;
751 			else
752 				last->group_next = current->group_next;
753 
754 			team->group = NULL;
755 			break;
756 		}
757 		last = current;
758 	}
759 
760 	team->group = NULL;
761 	team->group_next = NULL;
762 
763 	release_process_group_ref(group->id);
764 }
765 
766 
767 static struct process_group *
768 create_process_group(pid_t id)
769 {
770 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
771 	if (group == NULL)
772 		return NULL;
773 
774 	group->id = id;
775 	group->refs = 0;
776 	group->session = NULL;
777 	group->teams = NULL;
778 	group->orphaned = true;
779 	return group;
780 }
781 
782 
783 static struct process_session *
784 create_process_session(pid_t id)
785 {
786 	struct process_session *session
787 		= (struct process_session *)malloc(sizeof(struct process_session));
788 	if (session == NULL)
789 		return NULL;
790 
791 	session->id = id;
792 	session->group_count = 0;
793 	session->controlling_tty = -1;
794 	session->foreground_group = -1;
795 
796 	return session;
797 }
798 
799 
800 static void
801 set_team_name(struct team* team, const char* name)
802 {
803 	if (const char* lastSlash = strrchr(name, '/'))
804 		name = lastSlash + 1;
805 
806 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
807 }
808 
809 
810 static struct team *
811 create_team_struct(const char *name, bool kernel)
812 {
813 	struct team *team = (struct team *)malloc(sizeof(struct team));
814 	if (team == NULL)
815 		return NULL;
816 	MemoryDeleter teamDeleter(team);
817 
818 	team->next = team->siblings_next = team->children = team->parent = NULL;
819 	team->id = allocate_thread_id();
820 	set_team_name(team, name);
821 	team->args[0] = '\0';
822 	team->num_threads = 0;
823 	team->io_context = NULL;
824 	team->address_space = NULL;
825 	team->thread_list = NULL;
826 	team->main_thread = NULL;
827 	team->loading_info = NULL;
828 	team->state = TEAM_STATE_BIRTH;
829 	team->flags = 0;
830 	team->death_sem = -1;
831 
832 	team->dead_threads_kernel_time = 0;
833 	team->dead_threads_user_time = 0;
834 
835 	// dead threads
836 	list_init(&team->dead_threads);
837 	team->dead_threads_count = 0;
838 
839 	// dead children
840 	team->dead_children = new(nothrow) team_dead_children;
841 	if (team->dead_children == NULL)
842 		return NULL;
843 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
844 
845 	team->dead_children->count = 0;
846 	team->dead_children->kernel_time = 0;
847 	team->dead_children->user_time = 0;
848 
849 	// stopped children
850 	team->stopped_children = new(nothrow) team_job_control_children;
851 	if (team->stopped_children == NULL)
852 		return NULL;
853 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
854 		team->stopped_children);
855 
856 	// continued children
857 	team->continued_children = new(nothrow) team_job_control_children;
858 	if (team->continued_children == NULL)
859 		return NULL;
860 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
861 		team->continued_children);
862 
863 	// job control entry
864 	team->job_control_entry = new(nothrow) job_control_entry;
865 	if (team->job_control_entry == NULL)
866 		return NULL;
867 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
868 		team->job_control_entry);
869 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
870 	team->job_control_entry->thread = team->id;
871 	team->job_control_entry->team = team;
872 
873 	list_init(&team->image_list);
874 	list_init(&team->watcher_list);
875 
876 	clear_team_debug_info(&team->debug_info, true);
877 
878 	if (arch_team_init_team_struct(team, kernel) < 0)
879 		return NULL;
880 
881 	// publish dead/stopped/continued children condition vars
882 	team->dead_children->condition_variable.Publish(team->dead_children,
883 		"dead children");
884 	team->stopped_children->condition_variable.Publish(team->stopped_children,
885 		"stopped children");
886 	team->continued_children->condition_variable.Publish(
887 		team->continued_children, "continued children");
888 
889 	// keep all allocated structures
890 	jobControlEntryDeleter.Detach();
891 	continuedChildrenDeleter.Detach();
892 	stoppedChildrenDeleter.Detach();
893 	deadChildrenDeleter.Detach();
894 	teamDeleter.Detach();
895 
896 	return team;
897 }
898 
899 
900 static void
901 delete_team_struct(struct team *team)
902 {
903 	team->stopped_children->condition_variable.Unpublish();
904 	team->continued_children->condition_variable.Unpublish();
905 
906 	team->dead_children->condition_variable.Unpublish();
907 
908 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
909 			&team->dead_threads)) {
910 		free(threadDeathEntry);
911 	}
912 
913 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
914 		delete entry;
915 
916 	delete team->job_control_entry;
917 		// usually already NULL and transferred to the parent
918 	delete team->continued_children;
919 	delete team->stopped_children;
920 	delete team->dead_children;
921 	free(team);
922 }
923 
924 
925 static uint32
926 get_arguments_data_size(char **args, int32 argc)
927 {
928 	uint32 size = 0;
929 	int32 count;
930 
931 	for (count = 0; count < argc; count++)
932 		size += strlen(args[count]) + 1;
933 
934 	return size + (argc + 1) * sizeof(char *) + sizeof(struct user_space_program_args);
935 }
936 
937 
938 static void
939 free_team_arg(struct team_arg *teamArg)
940 {
941 	free_strings_array(teamArg->args, teamArg->arg_count);
942 	free_strings_array(teamArg->env, teamArg->env_count);
943 
944 	free(teamArg);
945 }
946 
947 
948 static status_t
949 create_team_arg(struct team_arg **_teamArg, int32 argCount, char * const *args,
950 	int32 envCount, char * const *env, port_id port, uint32 token, bool kernel)
951 {
952 	status_t status;
953 	char **argsCopy;
954 	char **envCopy;
955 
956 	struct team_arg *teamArg = (struct team_arg *)malloc(sizeof(struct team_arg));
957 	if (teamArg == NULL)
958 		return B_NO_MEMORY;
959 
960 	// copy the args over
961 
962 	status = copy_strings_array(args, argCount, &argsCopy, kernel);
963 	if (status != B_OK)
964 		return status;
965 
966 	status = copy_strings_array(env, envCount, &envCopy, kernel);
967 	if (status != B_OK) {
968 		free_strings_array(argsCopy, argCount);
969 		return status;
970 	}
971 
972 	teamArg->arg_count = argCount;
973 	teamArg->args = argsCopy;
974 	teamArg->env_count = envCount;
975 	teamArg->env = envCopy;
976 	teamArg->error_port = port;
977 	teamArg->error_token = token;
978 
979 	*_teamArg = teamArg;
980 	return B_OK;
981 }
982 
983 
984 static int32
985 team_create_thread_start(void *args)
986 {
987 	status_t err;
988 	struct thread *t;
989 	struct team *team;
990 	struct team_arg *teamArgs = (struct team_arg*)args;
991 	const char *path;
992 	addr_t entry;
993 	char ustack_name[128];
994 	uint32 sizeLeft;
995 	char **userArgs;
996 	char **userEnv;
997 	char *userDest;
998 	struct user_space_program_args *programArgs;
999 	uint32 argCount, envCount, i;
1000 
1001 	t = thread_get_current_thread();
1002 	team = t->team;
1003 	cache_node_launched(teamArgs->arg_count, teamArgs->args);
1004 
1005 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1006 
1007 	// create an initial primary stack area
1008 
1009 	// Main stack area layout is currently as follows (starting from 0):
1010 	//
1011 	// size							| usage
1012 	// -----------------------------+--------------------------------
1013 	// USER_MAIN_THREAD_STACK_SIZE	| actual stack
1014 	// TLS_SIZE						| TLS data
1015 	// ENV_SIZE						| environment variables
1016 	// arguments size				| arguments passed to the team
1017 
1018 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1019 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1020 
1021 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE + TLS_SIZE + ENV_SIZE +
1022 		get_arguments_data_size(teamArgs->args, teamArgs->arg_count));
1023 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1024 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE;
1025 		// the exact location at the end of the user stack area
1026 
1027 	sprintf(ustack_name, "%s_main_stack", team->name);
1028 	t->user_stack_area = create_area_etc(team, ustack_name, (void **)&t->user_stack_base,
1029 		B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
1030 	if (t->user_stack_area < 0) {
1031 		dprintf("team_create_thread_start: could not create default user stack region\n");
1032 
1033 		free_team_arg(teamArgs);
1034 		return t->user_stack_area;
1035 	}
1036 
1037 	// now that the TLS area is allocated, initialize TLS
1038 	arch_thread_init_tls(t);
1039 
1040 	argCount = teamArgs->arg_count;
1041 	envCount = teamArgs->env_count;
1042 
1043 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1044 		+ t->user_stack_size + TLS_SIZE + ENV_SIZE);
1045 	userArgs = (char **)(programArgs + 1);
1046 	userDest = (char *)(userArgs + argCount + 1);
1047 
1048 	TRACE(("addr: stack base = 0x%lx, userArgs = %p, userDest = %p, sizeLeft = %lu\n",
1049 		t->user_stack_base, userArgs, userDest, sizeLeft));
1050 
1051 	sizeLeft = t->user_stack_base + sizeLeft - (addr_t)userDest;
1052 
1053 	for (i = 0; i < argCount; i++) {
1054 		ssize_t length = user_strlcpy(userDest, teamArgs->args[i], sizeLeft);
1055 		if (length < B_OK) {
1056 			argCount = 0;
1057 			break;
1058 		}
1059 
1060 		userArgs[i] = userDest;
1061 		userDest += ++length;
1062 		sizeLeft -= length;
1063 	}
1064 	userArgs[argCount] = NULL;
1065 
1066 	userEnv = (char **)(t->user_stack_base + t->user_stack_size + TLS_SIZE);
1067 	sizeLeft = ENV_SIZE;
1068 	userDest = (char *)userEnv + ENV_SIZE - 1;
1069 		// the environment variables are copied from back to front
1070 
1071 	TRACE(("team_create_thread_start: envc: %ld, env: %p\n",
1072 		teamArgs->env_count, (void *)teamArgs->env));
1073 
1074 	for (i = 0; i < envCount; i++) {
1075 		ssize_t length = strlen(teamArgs->env[i]) + 1;
1076 		userDest -= length;
1077 		if (userDest < (char *)&userEnv[envCount]) {
1078 			envCount = i;
1079 			break;
1080 		}
1081 
1082 		userEnv[i] = userDest;
1083 
1084 		if (user_memcpy(userDest, teamArgs->env[i], length) < B_OK) {
1085 			envCount = 0;
1086 			break;
1087 		}
1088 
1089 		sizeLeft -= length;
1090 	}
1091 	userEnv[envCount] = NULL;
1092 
1093 	path = teamArgs->args[0];
1094 	if (user_memcpy(programArgs->program_path, path,
1095 				sizeof(programArgs->program_path)) < B_OK
1096 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1097 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1098 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1099 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1100 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1101 				sizeof(port_id)) < B_OK
1102 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1103 				sizeof(uint32)) < B_OK) {
1104 		// the team deletion process will clean this mess
1105 		return B_BAD_ADDRESS;
1106 	}
1107 
1108 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1109 
1110 	// add args to info member
1111 	team->args[0] = 0;
1112 	strlcpy(team->args, path, sizeof(team->args));
1113 	for (i = 1; i < argCount; i++) {
1114 		strlcat(team->args, " ", sizeof(team->args));
1115 		strlcat(team->args, teamArgs->args[i], sizeof(team->args));
1116 	}
1117 
1118 	free_team_arg(teamArgs);
1119 		// the arguments are already on the user stack, we no longer need them in this form
1120 
1121 	// ToDo: don't use fixed paths!
1122 	err = elf_load_user_image("/boot/beos/system/runtime_loader", team, 0, &entry);
1123 	if (err < B_OK) {
1124 		// Luckily, we don't have to clean up the mess we created - that's
1125 		// done for us by the normal team deletion process
1126 		TRACE(("team_create_thread_start: error when elf_load_user_image() %s\n", strerror(err)));
1127 		return err;
1128 	}
1129 
1130 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1131 
1132 	team->state = TEAM_STATE_NORMAL;
1133 
1134 	// jump to the entry point in user space
1135 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1136 		// only returns in case of error
1137 }
1138 
1139 
1140 /*!	The BeOS kernel exports a function with this name, but most probably with
1141 	different parameters; we should not make it public.
1142 */
1143 static thread_id
1144 load_image_etc(int32 argCount, char * const *args, int32 envCount,
1145 	char * const *env, int32 priority, uint32 flags,
1146 	port_id errorPort, uint32 errorToken, bool kernel)
1147 {
1148 	struct team *team, *parent;
1149 	const char *threadName;
1150 	thread_id thread;
1151 	status_t status;
1152 	cpu_status state;
1153 	struct team_arg *teamArgs;
1154 	struct team_loading_info loadingInfo;
1155 
1156 	if (args == NULL || argCount == 0)
1157 		return B_BAD_VALUE;
1158 
1159 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1160 		args[0], args, argCount));
1161 
1162 	team = create_team_struct(args[0], false);
1163 	if (team == NULL)
1164 		return B_NO_MEMORY;
1165 
1166 	parent = thread_get_current_thread()->team;
1167 
1168 	if (flags & B_WAIT_TILL_LOADED) {
1169 		loadingInfo.thread = thread_get_current_thread();
1170 		loadingInfo.result = B_ERROR;
1171 		loadingInfo.done = false;
1172 		team->loading_info = &loadingInfo;
1173 	}
1174 
1175 	// Inherit the parent's user/group, but also check the executable's
1176 	// set-user/group-id permission
1177 	inherit_parent_user_and_group(team, parent);
1178 	update_set_id_user_and_group(team, args[0]);
1179 
1180 	state = disable_interrupts();
1181 	GRAB_TEAM_LOCK();
1182 
1183 	hash_insert(sTeamHash, team);
1184 	insert_team_into_parent(parent, team);
1185 	insert_team_into_group(parent->group, team);
1186 	sUsedTeams++;
1187 
1188 	RELEASE_TEAM_LOCK();
1189 	restore_interrupts(state);
1190 
1191 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1192 		errorPort, errorToken, kernel);
1193 	if (status != B_OK)
1194 		goto err1;
1195 
1196 	// create a new io_context for this team
1197 	team->io_context = vfs_new_io_context(parent->io_context);
1198 	if (!team->io_context) {
1199 		status = B_NO_MEMORY;
1200 		goto err2;
1201 	}
1202 
1203 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1204 	vfs_exec_io_context(team->io_context);
1205 
1206 	// create an address space for this team
1207 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1208 		&team->address_space);
1209 	if (status < B_OK)
1210 		goto err3;
1211 
1212 	// cut the path from the main thread name
1213 	threadName = strrchr(args[0], '/');
1214 	if (threadName != NULL)
1215 		threadName++;
1216 	else
1217 		threadName = args[0];
1218 
1219 	// Create a kernel thread, but under the context of the new team
1220 	// The new thread will take over ownership of teamArgs
1221 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1222 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1223 	if (thread < 0) {
1224 		status = thread;
1225 		goto err4;
1226 	}
1227 
1228 	// wait for the loader of the new team to finish its work
1229 	if (flags & B_WAIT_TILL_LOADED) {
1230 		struct thread *mainThread;
1231 
1232 		state = disable_interrupts();
1233 		GRAB_THREAD_LOCK();
1234 
1235 		mainThread = thread_get_thread_struct_locked(thread);
1236 		if (mainThread) {
1237 			// resume the team's main thread
1238 			if (mainThread->state == B_THREAD_SUSPENDED)
1239 				scheduler_enqueue_in_run_queue(mainThread);
1240 
1241 			// Now suspend ourselves until loading is finished.
1242 			// We will be woken either by the thread, when it finished or
1243 			// aborted loading, or when the team is going to die (e.g. is
1244 			// killed). In either case the one setting `loadingInfo.done' is
1245 			// responsible for removing the info from the team structure.
1246 			while (!loadingInfo.done) {
1247 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1248 				scheduler_reschedule();
1249 			}
1250 		} else {
1251 			// Impressive! Someone managed to kill the thread in this short
1252 			// time.
1253 		}
1254 
1255 		RELEASE_THREAD_LOCK();
1256 		restore_interrupts(state);
1257 
1258 		if (loadingInfo.result < B_OK)
1259 			return loadingInfo.result;
1260 	}
1261 
1262 	// notify the debugger
1263 	user_debug_team_created(team->id);
1264 
1265 	return thread;
1266 
1267 err4:
1268 	vm_put_address_space(team->address_space);
1269 err3:
1270 	vfs_free_io_context(team->io_context);
1271 err2:
1272 	free_team_arg(teamArgs);
1273 err1:
1274 	// remove the team structure from the team hash table and delete the team structure
1275 	state = disable_interrupts();
1276 	GRAB_TEAM_LOCK();
1277 
1278 	remove_team_from_group(team);
1279 	remove_team_from_parent(parent, team);
1280 	hash_remove(sTeamHash, team);
1281 
1282 	RELEASE_TEAM_LOCK();
1283 	restore_interrupts(state);
1284 
1285 	delete_team_struct(team);
1286 
1287 	return status;
1288 }
1289 
1290 
1291 /*!	Almost shuts down the current team and loads a new image into it.
1292 	If successful, this function does not return and will takeover ownership of
1293 	the arguments provided.
1294 	This function may only be called from user space.
1295 */
1296 static status_t
1297 exec_team(const char *path, int32 argCount, char * const *args,
1298 	int32 envCount, char * const *env)
1299 {
1300 	struct team *team = thread_get_current_thread()->team;
1301 	struct team_arg *teamArgs;
1302 	const char *threadName;
1303 	status_t status = B_OK;
1304 	cpu_status state;
1305 	struct thread *thread;
1306 	thread_id nubThreadID = -1;
1307 
1308 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1309 		args[0], argCount, envCount, team->id));
1310 
1311 	T(ExecTeam(path, argCount, args, envCount, env));
1312 
1313 	// switching the kernel at run time is probably not a good idea :)
1314 	if (team == team_get_kernel_team())
1315 		return B_NOT_ALLOWED;
1316 
1317 	// we currently need to be single threaded here
1318 	// ToDo: maybe we should just kill all other threads and
1319 	//	make the current thread the team's main thread?
1320 	if (team->main_thread != thread_get_current_thread())
1321 		return B_NOT_ALLOWED;
1322 
1323 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1324 	// We iterate through the thread list to make sure that there's no other
1325 	// thread.
1326 	state = disable_interrupts();
1327 	GRAB_TEAM_LOCK();
1328 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1329 
1330 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1331 		nubThreadID = team->debug_info.nub_thread;
1332 
1333 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1334 
1335 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1336 		if (thread != team->main_thread && thread->id != nubThreadID) {
1337 			status = B_NOT_ALLOWED;
1338 			break;
1339 		}
1340 	}
1341 
1342 	RELEASE_TEAM_LOCK();
1343 	restore_interrupts(state);
1344 
1345 	if (status != B_OK)
1346 		return status;
1347 
1348 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1349 		-1, 0, false);
1350 	if (status != B_OK)
1351 		return status;
1352 
1353 	// replace args[0] with the path argument, just to be on the safe side
1354 	free(teamArgs->args[0]);
1355 	teamArgs->args[0] = strdup(path);
1356 
1357 	// ToDo: remove team resources if there are any left
1358 	// thread_atkernel_exit() might not be called at all
1359 
1360 	thread_reset_for_exec();
1361 
1362 	user_debug_prepare_for_exec();
1363 
1364 	vm_delete_areas(team->address_space);
1365 	delete_owned_ports(team->id);
1366 	sem_delete_owned_sems(team->id);
1367 	remove_images(team);
1368 	vfs_exec_io_context(team->io_context);
1369 
1370 	user_debug_finish_after_exec();
1371 
1372 	// rename the team
1373 
1374 	set_team_name(team, path);
1375 
1376 	// cut the path from the team name and rename the main thread, too
1377 	threadName = strrchr(path, '/');
1378 	if (threadName != NULL)
1379 		threadName++;
1380 	else
1381 		threadName = path;
1382 	rename_thread(thread_get_current_thread_id(), threadName);
1383 
1384 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1385 
1386 	// Update user/group according to the executable's set-user/group-id
1387 	// permission.
1388 	update_set_id_user_and_group(team, path);
1389 
1390 	status = team_create_thread_start(teamArgs);
1391 		// this one usually doesn't return...
1392 
1393 	// sorry, we have to kill us, there is no way out anymore
1394 	// (without any areas left and all that)
1395 	exit_thread(status);
1396 
1397 	// we return a status here since the signal that is sent by the
1398 	// call above is not immediately handled
1399 	return B_ERROR;
1400 }
1401 
1402 
1403 /*! This is the first function to be called from the newly created
1404 	main child thread.
1405 	It will fill in everything what's left to do from fork_arg, and
1406 	return from the parent's fork() syscall to the child.
1407 */
1408 static int32
1409 fork_team_thread_start(void *_args)
1410 {
1411 	struct thread *thread = thread_get_current_thread();
1412 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1413 
1414 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1415 		// we need a local copy of the arch dependent part
1416 
1417 	thread->user_stack_area = forkArgs->user_stack_area;
1418 	thread->user_stack_base = forkArgs->user_stack_base;
1419 	thread->user_stack_size = forkArgs->user_stack_size;
1420 	thread->user_local_storage = forkArgs->user_local_storage;
1421 	thread->sig_block_mask = forkArgs->sig_block_mask;
1422 
1423 	arch_thread_init_tls(thread);
1424 
1425 	free(forkArgs);
1426 
1427 	// set frame of the parent thread to this one, too
1428 
1429 	arch_restore_fork_frame(&archArgs);
1430 		// This one won't return here
1431 
1432 	return 0;
1433 }
1434 
1435 
1436 static thread_id
1437 fork_team(void)
1438 {
1439 	struct thread *parentThread = thread_get_current_thread();
1440 	struct team *parentTeam = parentThread->team, *team;
1441 	struct fork_arg *forkArgs;
1442 	struct area_info info;
1443 	thread_id threadID;
1444 	cpu_status state;
1445 	status_t status;
1446 	int32 cookie;
1447 
1448 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1449 
1450 	if (parentTeam == team_get_kernel_team())
1451 		return B_NOT_ALLOWED;
1452 
1453 	// create a new team
1454 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1455 
1456 	team = create_team_struct(parentTeam->name, false);
1457 	if (team == NULL)
1458 		return B_NO_MEMORY;
1459 
1460 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1461 
1462 	// Inherit the parent's user/group.
1463 	inherit_parent_user_and_group(team, parentTeam);
1464 
1465 	state = disable_interrupts();
1466 	GRAB_TEAM_LOCK();
1467 
1468 	hash_insert(sTeamHash, team);
1469 	insert_team_into_parent(parentTeam, team);
1470 	insert_team_into_group(parentTeam->group, team);
1471 	sUsedTeams++;
1472 
1473 	RELEASE_TEAM_LOCK();
1474 	restore_interrupts(state);
1475 
1476 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1477 	if (forkArgs == NULL) {
1478 		status = B_NO_MEMORY;
1479 		goto err1;
1480 	}
1481 
1482 	// create a new io_context for this team
1483 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1484 	if (!team->io_context) {
1485 		status = B_NO_MEMORY;
1486 		goto err2;
1487 	}
1488 
1489 	// create an address space for this team
1490 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1491 		&team->address_space);
1492 	if (status < B_OK)
1493 		goto err3;
1494 
1495 	// copy all areas of the team
1496 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1497 	// ToDo: all stacks of other threads than the current one could be left out
1498 
1499 	cookie = 0;
1500 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1501 		void *address;
1502 		area_id area = vm_copy_area(team->address_space->id, info.name,
1503 			&address, B_CLONE_ADDRESS, info.protection, info.area);
1504 		if (area < B_OK) {
1505 			status = area;
1506 			break;
1507 		}
1508 
1509 		if (info.area == parentThread->user_stack_area)
1510 			forkArgs->user_stack_area = area;
1511 	}
1512 
1513 	if (status < B_OK)
1514 		goto err4;
1515 
1516 	forkArgs->user_stack_base = parentThread->user_stack_base;
1517 	forkArgs->user_stack_size = parentThread->user_stack_size;
1518 	forkArgs->user_local_storage = parentThread->user_local_storage;
1519 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1520 	arch_store_fork_frame(&forkArgs->arch_info);
1521 
1522 	// ToDo: copy image list
1523 
1524 	// create a kernel thread under the context of the new team
1525 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1526 		parentThread->name, parentThread->priority, forkArgs,
1527 		team->id, team->id);
1528 	if (threadID < 0) {
1529 		status = threadID;
1530 		goto err4;
1531 	}
1532 
1533 	// notify the debugger
1534 	user_debug_team_created(team->id);
1535 
1536 	T(TeamForked(threadID));
1537 
1538 	resume_thread(threadID);
1539 	return threadID;
1540 
1541 err4:
1542 	vm_delete_address_space(team->address_space);
1543 err3:
1544 	vfs_free_io_context(team->io_context);
1545 err2:
1546 	free(forkArgs);
1547 err1:
1548 	// remove the team structure from the team hash table and delete the team structure
1549 	state = disable_interrupts();
1550 	GRAB_TEAM_LOCK();
1551 
1552 	remove_team_from_group(team);
1553 	remove_team_from_parent(parentTeam, team);
1554 	hash_remove(sTeamHash, team);
1555 
1556 	RELEASE_TEAM_LOCK();
1557 	restore_interrupts(state);
1558 
1559 	delete_team_struct(team);
1560 
1561 	return status;
1562 }
1563 
1564 
1565 /*!	Returns if the specified \a team has any children belonging to the
1566 	specified \a group.
1567 	Must be called with the team lock held.
1568 */
1569 static bool
1570 has_children_in_group(struct team *parent, pid_t groupID)
1571 {
1572 	struct team *team;
1573 
1574 	struct process_group *group = team_get_process_group_locked(
1575 		parent->group->session, groupID);
1576 	if (group == NULL)
1577 		return false;
1578 
1579 	for (team = group->teams; team; team = team->group_next) {
1580 		if (team->parent == parent)
1581 			return true;
1582 	}
1583 
1584 	return false;
1585 }
1586 
1587 
1588 static job_control_entry*
1589 get_job_control_entry(team_job_control_children* children, pid_t id)
1590 {
1591 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1592 		 job_control_entry* entry = it.Next();) {
1593 
1594 		if (id > 0) {
1595 			if (entry->thread == id)
1596 				return entry;
1597 		} else if (id == -1) {
1598 			return entry;
1599 		} else {
1600 			pid_t processGroup
1601 				= (entry->team ? entry->team->group_id : entry->group_id);
1602 			if (processGroup == -id)
1603 				return entry;
1604 		}
1605 	}
1606 
1607 	return NULL;
1608 }
1609 
1610 
1611 static job_control_entry*
1612 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1613 {
1614 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1615 
1616 	if (entry == NULL && (flags & WCONTINUED) != 0)
1617 		entry = get_job_control_entry(team->continued_children, id);
1618 
1619 	if (entry == NULL && (flags & WUNTRACED) != 0)
1620 		entry = get_job_control_entry(team->stopped_children, id);
1621 
1622 	return entry;
1623 }
1624 
1625 
1626 job_control_entry::job_control_entry()
1627 	:
1628 	has_group_ref(false)
1629 {
1630 }
1631 
1632 
1633 job_control_entry::~job_control_entry()
1634 {
1635 	if (has_group_ref) {
1636 		InterruptsSpinLocker locker(team_spinlock);
1637 		release_process_group_ref(group_id);
1638 	}
1639 }
1640 
1641 
1642 /*!	Team and thread lock must be held.
1643 */
1644 void
1645 job_control_entry::InitDeadState()
1646 {
1647 	if (team != NULL) {
1648 		struct thread* thread = team->main_thread;
1649 		group_id = team->group_id;
1650 		this->thread = thread->id;
1651 		status = thread->exit.status;
1652 		reason = thread->exit.reason;
1653 		signal = thread->exit.signal;
1654 		team = NULL;
1655 		acquire_process_group_ref(group_id);
1656 		has_group_ref = true;
1657 	}
1658 }
1659 
1660 
1661 job_control_entry&
1662 job_control_entry::operator=(const job_control_entry& other)
1663 {
1664 	state = other.state;
1665 	thread = other.thread;
1666 	has_group_ref = false;
1667 	team = other.team;
1668 	group_id = other.group_id;
1669 	status = other.status;
1670 	reason = other.reason;
1671 	signal = other.signal;
1672 
1673 	return *this;
1674 }
1675 
1676 
1677 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1678 	comes to the reason why a thread has died than waitpid() can be.
1679 */
1680 static thread_id
1681 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1682 	status_t *_returnCode)
1683 {
1684 	struct thread* thread = thread_get_current_thread();
1685 	struct team* team = thread->team;
1686 	struct job_control_entry foundEntry;
1687 	struct job_control_entry* freeDeathEntry = NULL;
1688 	status_t status = B_OK;
1689 
1690 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1691 
1692 	T(WaitForChild(child, flags));
1693 
1694 	if (child == 0) {
1695 		// wait for all children in the process group of the calling team
1696 		child = -team->group_id;
1697 	}
1698 
1699 	bool ignoreFoundEntries = false;
1700 	bool ignoreFoundEntriesChecked = false;
1701 
1702 	while (true) {
1703 		InterruptsSpinLocker locker(team_spinlock);
1704 
1705 		// check whether any condition holds
1706 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1707 
1708 		// If we don't have an entry yet, check whether there are any children
1709 		// complying to the process group specification at all.
1710 		if (entry == NULL) {
1711 			// No success yet -- check whether there are any children we could
1712 			// wait for.
1713 			bool childrenExist = false;
1714 			if (child == -1) {
1715 				childrenExist = team->children != NULL;
1716 			} else if (child < -1) {
1717 				childrenExist = has_children_in_group(team, -child);
1718 			} else {
1719 				if (struct team* childTeam = team_get_team_struct_locked(child))
1720 					childrenExist = childTeam->parent == team;
1721 			}
1722 
1723 			if (!childrenExist) {
1724 				// there is no child we could wait for
1725 				status = ECHILD;
1726 			} else {
1727 				// the children we're waiting for are still running
1728 				status = B_WOULD_BLOCK;
1729 			}
1730 		} else {
1731 			// got something
1732 			foundEntry = *entry;
1733 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1734 				// The child is dead. Reap its death entry.
1735 				freeDeathEntry = entry;
1736 				team->dead_children->entries.Remove(entry);
1737 				team->dead_children->count--;
1738 			} else {
1739 				// The child is well. Reset its job control state.
1740 				team_set_job_control_state(entry->team,
1741 					JOB_CONTROL_STATE_NONE, 0, false);
1742 			}
1743 		}
1744 
1745 		// If we haven't got anything yet, add prepare for waiting for the
1746 		// condition variables.
1747 		ConditionVariableEntry<team_dead_children> deadWaitEntry;
1748 		ConditionVariableEntry<team_job_control_children> continuedWaitEntry;
1749 		ConditionVariableEntry<team_job_control_children> stoppedWaitEntry;
1750 
1751 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0) {
1752 			deadWaitEntry.Add(team->dead_children);
1753 
1754 			if ((flags & WCONTINUED) != 0) {
1755 				continuedWaitEntry.Add(team->continued_children,
1756 					&deadWaitEntry);
1757 			}
1758 
1759 			if ((flags & WUNTRACED) != 0)
1760 				stoppedWaitEntry.Add(team->stopped_children, &deadWaitEntry);
1761 		}
1762 
1763 		locker.Unlock();
1764 
1765 		// we got our entry and can return to our caller
1766 		if (status == B_OK) {
1767 			if (ignoreFoundEntries) {
1768 				// ... unless we shall ignore found entries
1769 				delete freeDeathEntry;
1770 				freeDeathEntry = NULL;
1771 				continue;
1772 			}
1773 
1774 			break;
1775 		}
1776 
1777 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1778 			T(WaitForChildDone(status));
1779 			return status;
1780 		}
1781 
1782 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1783 		if (status == B_INTERRUPTED) {
1784 			T(WaitForChildDone(status));
1785 			return status;
1786 		}
1787 
1788 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1789 		// all our children are dead and fail with ECHILD. We check the
1790 		// condition at this point.
1791 		if (!ignoreFoundEntriesChecked) {
1792 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1793 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1794 				|| handler.sa_handler == SIG_IGN) {
1795 				ignoreFoundEntries = true;
1796 			}
1797 
1798 			ignoreFoundEntriesChecked = true;
1799 		}
1800 	}
1801 
1802 	delete freeDeathEntry;
1803 
1804 	// when we got here, we have a valid death entry, and
1805 	// already got unregistered from the team or group
1806 	int reason = 0;
1807 	switch (foundEntry.state) {
1808 		case JOB_CONTROL_STATE_DEAD:
1809 			reason = foundEntry.reason;
1810 			break;
1811 		case JOB_CONTROL_STATE_STOPPED:
1812 			reason = THREAD_STOPPED;
1813 			break;
1814 		case JOB_CONTROL_STATE_CONTINUED:
1815 			reason = THREAD_CONTINUED;
1816 			break;
1817 		case JOB_CONTROL_STATE_NONE:
1818 			// can't happen
1819 			break;
1820 	}
1821 
1822 	*_returnCode = foundEntry.status;
1823 	*_reason = (foundEntry.signal << 16) | reason;
1824 
1825 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1826 	// status is available.
1827 	if (is_signal_blocked(SIGCHLD)) {
1828 		InterruptsSpinLocker locker(team_spinlock);
1829 
1830 		if (get_job_control_entry(team, child, flags) == NULL)
1831 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1832 	}
1833 
1834 	T(WaitForChildDone(foundEntry));
1835 
1836 	return foundEntry.thread;
1837 }
1838 
1839 
1840 /*! Fills the team_info structure with information from the specified
1841 	team.
1842 	The team lock must be held when called.
1843 */
1844 static status_t
1845 fill_team_info(struct team *team, team_info *info, size_t size)
1846 {
1847 	if (size != sizeof(team_info))
1848 		return B_BAD_VALUE;
1849 
1850 	// ToDo: Set more informations for team_info
1851 	memset(info, 0, size);
1852 
1853 	info->team = team->id;
1854 	info->thread_count = team->num_threads;
1855 	info->image_count = count_images(team);
1856 	//info->area_count =
1857 	info->debugger_nub_thread = team->debug_info.nub_thread;
1858 	info->debugger_nub_port = team->debug_info.nub_port;
1859 	//info->uid =
1860 	//info->gid =
1861 
1862 	strlcpy(info->args, team->args, sizeof(info->args));
1863 	info->argc = 1;
1864 
1865 	return B_OK;
1866 }
1867 
1868 
1869 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1870 	Interrupts must be disabled and team lock be held.
1871 */
1872 static bool
1873 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1874 {
1875 	// Orphaned Process Group: "A process group in which the parent of every
1876 	// member is either itself a member of the group or is not a member of the
1877 	// group's session." (Open Group Base Specs Issue 6)
1878 
1879 	// once orphaned, things won't change (exception: cf. setpgid())
1880 	if (group->orphaned)
1881 		return true;
1882 
1883 	struct team* team = group->teams;
1884 	while (team != NULL) {
1885 		struct team* parent = team->parent;
1886 		if (team->id != dyingProcess && parent != NULL
1887 			&& parent->id != dyingProcess
1888 			&& parent->group_id != group->id
1889 			&& parent->session_id == group->session->id) {
1890 			return false;
1891 		}
1892 
1893 		team = team->group_next;
1894 	}
1895 
1896 	group->orphaned = true;
1897 	return true;
1898 }
1899 
1900 
1901 /*!	Returns whether the process group contains stopped processes.
1902 	Interrupts must be disabled and team lock be held.
1903 */
1904 static bool
1905 process_group_has_stopped_processes(process_group* group)
1906 {
1907 	SpinLocker _(thread_spinlock);
1908 
1909 	struct team* team = group->teams;
1910 	while (team != NULL) {
1911 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1912 			return true;
1913 
1914 		team = team->group_next;
1915 	}
1916 
1917 	return false;
1918 }
1919 
1920 
1921 //	#pragma mark - Private kernel API
1922 
1923 
1924 status_t
1925 team_init(kernel_args *args)
1926 {
1927 	struct process_session *session;
1928 	struct process_group *group;
1929 
1930 	// create the team hash table
1931 	sTeamHash = hash_init(16, offsetof(struct team, next),
1932 		&team_struct_compare, &team_struct_hash);
1933 
1934 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1935 		&process_group_compare, &process_group_hash);
1936 
1937 	// create initial session and process groups
1938 
1939 	session = create_process_session(1);
1940 	if (session == NULL)
1941 		panic("Could not create initial session.\n");
1942 
1943 	group = create_process_group(1);
1944 	if (group == NULL)
1945 		panic("Could not create initial process group.\n");
1946 
1947 	insert_group_into_session(session, group);
1948 
1949 	// create the kernel team
1950 	sKernelTeam = create_team_struct("kernel_team", true);
1951 	if (sKernelTeam == NULL)
1952 		panic("could not create kernel team!\n");
1953 	strcpy(sKernelTeam->args, sKernelTeam->name);
1954 	sKernelTeam->state = TEAM_STATE_NORMAL;
1955 
1956 	sKernelTeam->saved_set_uid = 0;
1957 	sKernelTeam->real_uid = 0;
1958 	sKernelTeam->effective_uid = 0;
1959 	sKernelTeam->saved_set_gid = 0;
1960 	sKernelTeam->real_gid = 0;
1961 	sKernelTeam->effective_gid = 0;
1962 
1963 	insert_team_into_group(group, sKernelTeam);
1964 
1965 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1966 	if (sKernelTeam->io_context == NULL)
1967 		panic("could not create io_context for kernel team!\n");
1968 
1969 	// stick it in the team hash
1970 	hash_insert(sTeamHash, sKernelTeam);
1971 
1972 	add_debugger_command_etc("team", &dump_team_info,
1973 		"Dump info about a particular team",
1974 		"[ <id> | <address> | <name> ]\n"
1975 		"Prints information about the specified team. If no argument is given\n"
1976 		"the current team is selected.\n"
1977 		"  <id>       - The ID of the team.\n"
1978 		"  <address>  - The address of the team structure.\n"
1979 		"  <name>     - The team's name.\n", 0);
1980 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1981 		"\n"
1982 		"Prints a list of all existing teams.\n", 0);
1983 	return 0;
1984 }
1985 
1986 
1987 int32
1988 team_max_teams(void)
1989 {
1990 	return sMaxTeams;
1991 }
1992 
1993 
1994 int32
1995 team_used_teams(void)
1996 {
1997 	return sUsedTeams;
1998 }
1999 
2000 
2001 /*! Fills the provided death entry if it's in the team.
2002 	You need to have the team lock held when calling this function.
2003 */
2004 job_control_entry*
2005 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2006 {
2007 	if (child <= 0)
2008 		return NULL;
2009 
2010 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2011 		child);
2012 	if (entry) {
2013 		// remove the entry only, if the caller is the parent of the found team
2014 		if (team_get_current_team_id() == entry->thread) {
2015 			team->dead_children->entries.Remove(entry);
2016 			team->dead_children->count--;
2017 			*_deleteEntry = true;
2018 		} else {
2019 			*_deleteEntry = false;
2020 		}
2021 	}
2022 
2023 	return entry;
2024 }
2025 
2026 
2027 /*! Quick check to see if we have a valid team ID. */
2028 bool
2029 team_is_valid(team_id id)
2030 {
2031 	struct team *team;
2032 	cpu_status state;
2033 
2034 	if (id <= 0)
2035 		return false;
2036 
2037 	state = disable_interrupts();
2038 	GRAB_TEAM_LOCK();
2039 
2040 	team = team_get_team_struct_locked(id);
2041 
2042 	RELEASE_TEAM_LOCK();
2043 	restore_interrupts(state);
2044 
2045 	return team != NULL;
2046 }
2047 
2048 
2049 struct team *
2050 team_get_team_struct_locked(team_id id)
2051 {
2052 	struct team_key key;
2053 	key.id = id;
2054 
2055 	return (struct team*)hash_lookup(sTeamHash, &key);
2056 }
2057 
2058 
2059 /*! This searches the session of the team for the specified group ID.
2060 	You must hold the team lock when you call this function.
2061 */
2062 struct process_group *
2063 team_get_process_group_locked(struct process_session *session, pid_t id)
2064 {
2065 	struct process_group *group;
2066 	struct team_key key;
2067 	key.id = id;
2068 
2069 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2070 	if (group != NULL && (session == NULL || session == group->session))
2071 		return group;
2072 
2073 	return NULL;
2074 }
2075 
2076 
2077 void
2078 team_delete_process_group(struct process_group *group)
2079 {
2080 	if (group == NULL)
2081 		return;
2082 
2083 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2084 
2085 	// remove_group_from_session() keeps this pointer around
2086 	// only if the session can be freed as well
2087 	if (group->session) {
2088 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2089 		free(group->session);
2090 	}
2091 
2092 	free(group);
2093 }
2094 
2095 
2096 void
2097 team_set_controlling_tty(int32 ttyIndex)
2098 {
2099 	struct team* team = thread_get_current_thread()->team;
2100 
2101 	InterruptsSpinLocker _(team_spinlock);
2102 
2103 	team->group->session->controlling_tty = ttyIndex;
2104 	team->group->session->foreground_group = -1;
2105 }
2106 
2107 
2108 int32
2109 team_get_controlling_tty()
2110 {
2111 	struct team* team = thread_get_current_thread()->team;
2112 
2113 	InterruptsSpinLocker _(team_spinlock);
2114 
2115 	return team->group->session->controlling_tty;
2116 }
2117 
2118 
2119 status_t
2120 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2121 {
2122 	struct thread* thread = thread_get_current_thread();
2123 	struct team* team = thread->team;
2124 
2125 	InterruptsSpinLocker locker(team_spinlock);
2126 
2127 	process_session* session = team->group->session;
2128 
2129 	// must be the controlling tty of the calling process
2130 	if (session->controlling_tty != ttyIndex)
2131 		return ENOTTY;
2132 
2133 	// check process group -- must belong to our session
2134 	process_group* group = team_get_process_group_locked(session,
2135 		processGroupID);
2136 	if (group == NULL)
2137 		return B_BAD_VALUE;
2138 
2139 	// If we are a background group, we can't do that unharmed, only if we
2140 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2141 	if (session->foreground_group != -1
2142 		&& session->foreground_group != team->group_id
2143 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2144 		&& !is_signal_blocked(SIGTTOU)) {
2145 		pid_t groupID = team->group->id;
2146 		locker.Unlock();
2147 		send_signal(-groupID, SIGTTOU);
2148 		return B_INTERRUPTED;
2149 	}
2150 
2151 	team->group->session->foreground_group = processGroupID;
2152 
2153 	return B_OK;
2154 }
2155 
2156 
2157 /*!	Removes the specified team from the global team hash, and from its parent.
2158 	It also moves all of its children up to the parent.
2159 	You must hold the team lock when you call this function.
2160 */
2161 void
2162 team_remove_team(struct team *team)
2163 {
2164 	struct team *parent = team->parent;
2165 
2166 	// remember how long this team lasted
2167 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2168 		+ team->dead_children->kernel_time;
2169 	parent->dead_children->user_time += team->dead_threads_user_time
2170 		+ team->dead_children->user_time;
2171 
2172 	hash_remove(sTeamHash, team);
2173 	sUsedTeams--;
2174 
2175 	team->state = TEAM_STATE_DEATH;
2176 
2177 	// If we're a controlling process (i.e. a session leader with controlling
2178 	// terminal), there's a bit of signalling we have to do.
2179 	if (team->session_id == team->id
2180 		&& team->group->session->controlling_tty >= 0) {
2181 		process_session* session = team->group->session;
2182 
2183 		session->controlling_tty = -1;
2184 
2185 		// send SIGHUP to the foreground
2186 		if (session->foreground_group >= 0) {
2187 			send_signal_etc(-session->foreground_group, SIGHUP,
2188 				SIGNAL_FLAG_TEAMS_LOCKED);
2189 		}
2190 
2191 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2192 		// stopped processes
2193 		struct team* child = team->children;
2194 		while (child != NULL) {
2195 			process_group* childGroup = child->group;
2196 			if (!childGroup->orphaned
2197 				&& update_orphaned_process_group(childGroup, team->id)
2198 				&& process_group_has_stopped_processes(childGroup)) {
2199 				send_signal_etc(-childGroup->id, SIGHUP,
2200 					SIGNAL_FLAG_TEAMS_LOCKED);
2201 				send_signal_etc(-childGroup->id, SIGCONT,
2202 					SIGNAL_FLAG_TEAMS_LOCKED);
2203 			}
2204 
2205 			child = child->siblings_next;
2206 		}
2207 	} else {
2208 		// update "orphaned" flags of all children's process groups
2209 		struct team* child = team->children;
2210 		while (child != NULL) {
2211 			process_group* childGroup = child->group;
2212 			if (!childGroup->orphaned)
2213 				update_orphaned_process_group(childGroup, team->id);
2214 
2215 			child = child->siblings_next;
2216 		}
2217 
2218 		// update "orphaned" flag of this team's process group
2219 		update_orphaned_process_group(team->group, team->id);
2220 	}
2221 
2222 	// reparent each of the team's children
2223 	reparent_children(team);
2224 
2225 	// remove us from our process group
2226 	remove_team_from_group(team);
2227 
2228 	// remove us from our parent
2229 	remove_team_from_parent(parent, team);
2230 }
2231 
2232 
2233 void
2234 team_delete_team(struct team *team)
2235 {
2236 	team_id teamID = team->id;
2237 	port_id debuggerPort = -1;
2238 	cpu_status state;
2239 
2240 	if (team->num_threads > 0) {
2241 		// there are other threads still in this team,
2242 		// cycle through and signal kill on each of the threads
2243 		// ToDo: this can be optimized. There's got to be a better solution.
2244 		struct thread *temp_thread;
2245 		char death_sem_name[B_OS_NAME_LENGTH];
2246 		sem_id deathSem;
2247 		int32 threadCount;
2248 
2249 		sprintf(death_sem_name, "team %ld death sem", teamID);
2250 		deathSem = create_sem(0, death_sem_name);
2251 		if (deathSem < 0)
2252 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2253 
2254 		state = disable_interrupts();
2255 		GRAB_TEAM_LOCK();
2256 
2257 		team->death_sem = deathSem;
2258 		threadCount = team->num_threads;
2259 
2260 		// If the team was being debugged, that will stop with the termination
2261 		// of the nub thread. The team structure has already been removed from
2262 		// the team hash table at this point, so noone can install a debugger
2263 		// anymore. We fetch the debugger's port to send it a message at the
2264 		// bitter end.
2265 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2266 
2267 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2268 			debuggerPort = team->debug_info.debugger_port;
2269 
2270 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2271 
2272 		// we can safely walk the list because of the lock. no new threads can be created
2273 		// because of the TEAM_STATE_DEATH flag on the team
2274 		temp_thread = team->thread_list;
2275 		while (temp_thread) {
2276 			struct thread *next = temp_thread->team_next;
2277 
2278 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2279 			temp_thread = next;
2280 		}
2281 
2282 		RELEASE_TEAM_LOCK();
2283 		restore_interrupts(state);
2284 
2285 		// wait until all threads in team are dead.
2286 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2287 		delete_sem(team->death_sem);
2288 	}
2289 
2290 	// If someone is waiting for this team to be loaded, but it dies
2291 	// unexpectedly before being done, we need to notify the waiting
2292 	// thread now.
2293 
2294 	state = disable_interrupts();
2295 	GRAB_TEAM_LOCK();
2296 
2297 	if (team->loading_info) {
2298 		// there's indeed someone waiting
2299 		struct team_loading_info *loadingInfo = team->loading_info;
2300 		team->loading_info = NULL;
2301 
2302 		loadingInfo->result = B_ERROR;
2303 		loadingInfo->done = true;
2304 
2305 		GRAB_THREAD_LOCK();
2306 
2307 		// wake up the waiting thread
2308 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2309 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2310 
2311 		RELEASE_THREAD_LOCK();
2312 	}
2313 
2314 	RELEASE_TEAM_LOCK();
2315 	restore_interrupts(state);
2316 
2317 	// notify team watchers
2318 
2319 	{
2320 		// we're not reachable from anyone anymore at this point, so we
2321 		// can safely access the list without any locking
2322 		struct team_watcher *watcher;
2323 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2324 				&team->watcher_list)) != NULL) {
2325 			watcher->hook(teamID, watcher->data);
2326 			free(watcher);
2327 		}
2328 	}
2329 
2330 	// free team resources
2331 
2332 	vfs_free_io_context(team->io_context);
2333 	delete_owned_ports(teamID);
2334 	sem_delete_owned_sems(teamID);
2335 	remove_images(team);
2336 	vm_delete_address_space(team->address_space);
2337 
2338 	delete_team_struct(team);
2339 
2340 	// notify the debugger, that the team is gone
2341 	user_debug_team_deleted(teamID, debuggerPort);
2342 }
2343 
2344 
2345 struct team *
2346 team_get_kernel_team(void)
2347 {
2348 	return sKernelTeam;
2349 }
2350 
2351 
2352 team_id
2353 team_get_kernel_team_id(void)
2354 {
2355 	if (!sKernelTeam)
2356 		return 0;
2357 
2358 	return sKernelTeam->id;
2359 }
2360 
2361 
2362 team_id
2363 team_get_current_team_id(void)
2364 {
2365 	return thread_get_current_thread()->team->id;
2366 }
2367 
2368 
2369 status_t
2370 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2371 {
2372 	cpu_status state;
2373 	struct team *team;
2374 	status_t status;
2375 
2376 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2377 	if (id == 1) {
2378 		// we're the kernel team, so we don't have to go through all
2379 		// the hassle (locking and hash lookup)
2380 		*_addressSpace = vm_get_kernel_address_space();
2381 		return B_OK;
2382 	}
2383 
2384 	state = disable_interrupts();
2385 	GRAB_TEAM_LOCK();
2386 
2387 	team = team_get_team_struct_locked(id);
2388 	if (team != NULL) {
2389 		atomic_add(&team->address_space->ref_count, 1);
2390 		*_addressSpace = team->address_space;
2391 		status = B_OK;
2392 	} else
2393 		status = B_BAD_VALUE;
2394 
2395 	RELEASE_TEAM_LOCK();
2396 	restore_interrupts(state);
2397 
2398 	return status;
2399 }
2400 
2401 
2402 /*!	Sets the team's job control state.
2403 	Interrupts must be disabled and the team lock be held.
2404 	\a threadsLocked indicates whether the thread lock is being held, too.
2405 */
2406 void
2407 team_set_job_control_state(struct team* team, job_control_state newState,
2408 	int signal, bool threadsLocked)
2409 {
2410 	if (team == NULL || team->job_control_entry == NULL)
2411 		return;
2412 
2413 	// don't touch anything, if the state stays the same or the team is already
2414 	// dead
2415 	job_control_entry* entry = team->job_control_entry;
2416 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2417 		return;
2418 
2419 	T(SetJobControlState(team->id, newState, signal));
2420 
2421 	// remove from the old list
2422 	switch (entry->state) {
2423 		case JOB_CONTROL_STATE_NONE:
2424 			// entry is in no list ATM
2425 			break;
2426 		case JOB_CONTROL_STATE_DEAD:
2427 			// can't get here
2428 			break;
2429 		case JOB_CONTROL_STATE_STOPPED:
2430 			team->parent->stopped_children->entries.Remove(entry);
2431 			break;
2432 		case JOB_CONTROL_STATE_CONTINUED:
2433 			team->parent->continued_children->entries.Remove(entry);
2434 			break;
2435 	}
2436 
2437 	entry->state = newState;
2438 	entry->signal = signal;
2439 
2440 	// add to new list
2441 	team_job_control_children* childList = NULL;
2442 	switch (entry->state) {
2443 		case JOB_CONTROL_STATE_NONE:
2444 			// entry doesn't get into any list
2445 			break;
2446 		case JOB_CONTROL_STATE_DEAD:
2447 			childList = team->parent->dead_children;
2448 			team->parent->dead_children->count++;
2449 			// When a child dies, we need to notify all lists, since that might
2450 			// have been the last of the parent's children, and a waiting
2451 			// parent thread wouldn't wake up otherwise.
2452 			team->parent->stopped_children->condition_variable.NotifyAll(
2453 				threadsLocked);
2454 			team->parent->continued_children->condition_variable.NotifyAll(
2455 				threadsLocked);
2456 			break;
2457 		case JOB_CONTROL_STATE_STOPPED:
2458 			childList = team->parent->stopped_children;
2459 			break;
2460 		case JOB_CONTROL_STATE_CONTINUED:
2461 			childList = team->parent->continued_children;
2462 			break;
2463 	}
2464 
2465 	if (childList != NULL) {
2466 		childList->entries.Add(entry);
2467 		childList->condition_variable.NotifyAll(threadsLocked);
2468 	}
2469 }
2470 
2471 
2472 /*! Adds a hook to the team that is called as soon as this
2473 	team goes away.
2474 	This call might get public in the future.
2475 */
2476 status_t
2477 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2478 {
2479 	struct team_watcher *watcher;
2480 	struct team *team;
2481 	cpu_status state;
2482 
2483 	if (hook == NULL || teamID < B_OK)
2484 		return B_BAD_VALUE;
2485 
2486 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2487 	if (watcher == NULL)
2488 		return B_NO_MEMORY;
2489 
2490 	watcher->hook = hook;
2491 	watcher->data = data;
2492 
2493 	// find team and add watcher
2494 
2495 	state = disable_interrupts();
2496 	GRAB_TEAM_LOCK();
2497 
2498 	team = team_get_team_struct_locked(teamID);
2499 	if (team != NULL)
2500 		list_add_item(&team->watcher_list, watcher);
2501 
2502 	RELEASE_TEAM_LOCK();
2503 	restore_interrupts(state);
2504 
2505 	if (team == NULL) {
2506 		free(watcher);
2507 		return B_BAD_TEAM_ID;
2508 	}
2509 
2510 	return B_OK;
2511 }
2512 
2513 
2514 status_t
2515 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2516 {
2517 	struct team_watcher *watcher = NULL;
2518 	struct team *team;
2519 	cpu_status state;
2520 
2521 	if (hook == NULL || teamID < B_OK)
2522 		return B_BAD_VALUE;
2523 
2524 	// find team and remove watcher (if present)
2525 
2526 	state = disable_interrupts();
2527 	GRAB_TEAM_LOCK();
2528 
2529 	team = team_get_team_struct_locked(teamID);
2530 	if (team != NULL) {
2531 		// search for watcher
2532 		while ((watcher = (struct team_watcher*)list_get_next_item(
2533 				&team->watcher_list, watcher)) != NULL) {
2534 			if (watcher->hook == hook && watcher->data == data) {
2535 				// got it!
2536 				list_remove_item(&team->watcher_list, watcher);
2537 				break;
2538 			}
2539 		}
2540 	}
2541 
2542 	RELEASE_TEAM_LOCK();
2543 	restore_interrupts(state);
2544 
2545 	if (watcher == NULL)
2546 		return B_ENTRY_NOT_FOUND;
2547 
2548 	free(watcher);
2549 	return B_OK;
2550 }
2551 
2552 
2553 //	#pragma mark - Public kernel API
2554 
2555 
2556 thread_id
2557 load_image(int32 argCount, const char **args, const char **env)
2558 {
2559 	int32 envCount = 0;
2560 
2561 	// count env variables
2562 	while (env && env[envCount] != NULL)
2563 		envCount++;
2564 
2565 	return load_image_etc(argCount, (char * const *)args, envCount,
2566 		(char * const *)env, B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED,
2567 		-1, 0, true);
2568 }
2569 
2570 
2571 status_t
2572 wait_for_team(team_id id, status_t *_returnCode)
2573 {
2574 	struct team *team;
2575 	thread_id thread;
2576 	cpu_status state;
2577 
2578 	// find main thread and wait for that
2579 
2580 	state = disable_interrupts();
2581 	GRAB_TEAM_LOCK();
2582 
2583 	team = team_get_team_struct_locked(id);
2584 	if (team != NULL && team->main_thread != NULL)
2585 		thread = team->main_thread->id;
2586 	else
2587 		thread = B_BAD_THREAD_ID;
2588 
2589 	RELEASE_TEAM_LOCK();
2590 	restore_interrupts(state);
2591 
2592 	if (thread < 0)
2593 		return thread;
2594 
2595 	return wait_for_thread(thread, _returnCode);
2596 }
2597 
2598 
2599 status_t
2600 kill_team(team_id id)
2601 {
2602 	status_t status = B_OK;
2603 	thread_id threadID = -1;
2604 	struct team *team;
2605 	cpu_status state;
2606 
2607 	state = disable_interrupts();
2608 	GRAB_TEAM_LOCK();
2609 
2610 	team = team_get_team_struct_locked(id);
2611 	if (team != NULL) {
2612 		if (team != sKernelTeam) {
2613 			threadID = team->id;
2614 				// the team ID is the same as the ID of its main thread
2615 		} else
2616 			status = B_NOT_ALLOWED;
2617 	} else
2618 		status = B_BAD_THREAD_ID;
2619 
2620 	RELEASE_TEAM_LOCK();
2621 	restore_interrupts(state);
2622 
2623 	if (status < B_OK)
2624 		return status;
2625 
2626 	// just kill the main thread in the team. The cleanup code there will
2627 	// take care of the team
2628 	return kill_thread(threadID);
2629 }
2630 
2631 
2632 status_t
2633 _get_team_info(team_id id, team_info *info, size_t size)
2634 {
2635 	cpu_status state;
2636 	status_t status = B_OK;
2637 	struct team *team;
2638 
2639 	state = disable_interrupts();
2640 	GRAB_TEAM_LOCK();
2641 
2642 	if (id == B_CURRENT_TEAM)
2643 		team = thread_get_current_thread()->team;
2644 	else
2645 		team = team_get_team_struct_locked(id);
2646 
2647 	if (team == NULL) {
2648 		status = B_BAD_TEAM_ID;
2649 		goto err;
2650 	}
2651 
2652 	status = fill_team_info(team, info, size);
2653 
2654 err:
2655 	RELEASE_TEAM_LOCK();
2656 	restore_interrupts(state);
2657 
2658 	return status;
2659 }
2660 
2661 
2662 status_t
2663 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2664 {
2665 	status_t status = B_BAD_TEAM_ID;
2666 	struct team *team = NULL;
2667 	int32 slot = *cookie;
2668 	team_id lastTeamID;
2669 	cpu_status state;
2670 
2671 	if (slot < 1)
2672 		slot = 1;
2673 
2674 	state = disable_interrupts();
2675 	GRAB_TEAM_LOCK();
2676 
2677 	lastTeamID = peek_next_thread_id();
2678 	if (slot >= lastTeamID)
2679 		goto err;
2680 
2681 	// get next valid team
2682 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2683 		slot++;
2684 
2685 	if (team) {
2686 		status = fill_team_info(team, info, size);
2687 		*cookie = ++slot;
2688 	}
2689 
2690 err:
2691 	RELEASE_TEAM_LOCK();
2692 	restore_interrupts(state);
2693 
2694 	return status;
2695 }
2696 
2697 
2698 status_t
2699 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2700 {
2701 	bigtime_t kernelTime = 0, userTime = 0;
2702 	status_t status = B_OK;
2703 	struct team *team;
2704 	cpu_status state;
2705 
2706 	if (size != sizeof(team_usage_info)
2707 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2708 		return B_BAD_VALUE;
2709 
2710 	state = disable_interrupts();
2711 	GRAB_TEAM_LOCK();
2712 
2713 	if (id == B_CURRENT_TEAM)
2714 		team = thread_get_current_thread()->team;
2715 	else
2716 		team = team_get_team_struct_locked(id);
2717 
2718 	if (team == NULL) {
2719 		status = B_BAD_TEAM_ID;
2720 		goto out;
2721 	}
2722 
2723 	switch (who) {
2724 		case B_TEAM_USAGE_SELF:
2725 		{
2726 			struct thread *thread = team->thread_list;
2727 
2728 			for (; thread != NULL; thread = thread->team_next) {
2729 				kernelTime += thread->kernel_time;
2730 				userTime += thread->user_time;
2731 			}
2732 
2733 			kernelTime += team->dead_threads_kernel_time;
2734 			userTime += team->dead_threads_user_time;
2735 			break;
2736 		}
2737 
2738 		case B_TEAM_USAGE_CHILDREN:
2739 		{
2740 			struct team *child = team->children;
2741 			for (; child != NULL; child = child->siblings_next) {
2742 				struct thread *thread = team->thread_list;
2743 
2744 				for (; thread != NULL; thread = thread->team_next) {
2745 					kernelTime += thread->kernel_time;
2746 					userTime += thread->user_time;
2747 				}
2748 
2749 				kernelTime += child->dead_threads_kernel_time;
2750 				userTime += child->dead_threads_user_time;
2751 			}
2752 
2753 			kernelTime += team->dead_children->kernel_time;
2754 			userTime += team->dead_children->user_time;
2755 			break;
2756 		}
2757 	}
2758 
2759 out:
2760 	RELEASE_TEAM_LOCK();
2761 	restore_interrupts(state);
2762 
2763 	if (status == B_OK) {
2764 		info->kernel_time = kernelTime;
2765 		info->user_time = userTime;
2766 	}
2767 
2768 	return status;
2769 }
2770 
2771 
2772 pid_t
2773 getpid(void)
2774 {
2775 	return thread_get_current_thread()->team->id;
2776 }
2777 
2778 
2779 pid_t
2780 getppid(void)
2781 {
2782 	struct team *team = thread_get_current_thread()->team;
2783 	cpu_status state;
2784 	pid_t parent;
2785 
2786 	state = disable_interrupts();
2787 	GRAB_TEAM_LOCK();
2788 
2789 	parent = team->parent->id;
2790 
2791 	RELEASE_TEAM_LOCK();
2792 	restore_interrupts(state);
2793 
2794 	return parent;
2795 }
2796 
2797 
2798 pid_t
2799 getpgid(pid_t process)
2800 {
2801 	struct thread *thread;
2802 	pid_t result = -1;
2803 	cpu_status state;
2804 
2805 	if (process == 0)
2806 		process = thread_get_current_thread()->team->id;
2807 
2808 	state = disable_interrupts();
2809 	GRAB_THREAD_LOCK();
2810 
2811 	thread = thread_get_thread_struct_locked(process);
2812 	if (thread != NULL)
2813 		result = thread->team->group_id;
2814 
2815 	RELEASE_THREAD_LOCK();
2816 	restore_interrupts(state);
2817 
2818 	return thread != NULL ? result : B_BAD_VALUE;
2819 }
2820 
2821 
2822 pid_t
2823 getsid(pid_t process)
2824 {
2825 	struct thread *thread;
2826 	pid_t result = -1;
2827 	cpu_status state;
2828 
2829 	if (process == 0)
2830 		process = thread_get_current_thread()->team->id;
2831 
2832 	state = disable_interrupts();
2833 	GRAB_THREAD_LOCK();
2834 
2835 	thread = thread_get_thread_struct_locked(process);
2836 	if (thread != NULL)
2837 		result = thread->team->session_id;
2838 
2839 	RELEASE_THREAD_LOCK();
2840 	restore_interrupts(state);
2841 
2842 	return thread != NULL ? result : B_BAD_VALUE;
2843 }
2844 
2845 
2846 //	#pragma mark - User syscalls
2847 
2848 
2849 status_t
2850 _user_exec(const char *userPath, int32 argCount, char * const *userArgs,
2851 	int32 envCount, char * const *userEnvironment)
2852 {
2853 	char path[B_PATH_NAME_LENGTH];
2854 
2855 	if (argCount < 1)
2856 		return B_BAD_VALUE;
2857 
2858 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userArgs)
2859 		|| !IS_USER_ADDRESS(userEnvironment)
2860 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2861 		return B_BAD_ADDRESS;
2862 
2863 	return exec_team(path, argCount, userArgs, envCount, userEnvironment);
2864 		// this one only returns in case of error
2865 }
2866 
2867 
2868 thread_id
2869 _user_fork(void)
2870 {
2871 	return fork_team();
2872 }
2873 
2874 
2875 thread_id
2876 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
2877 {
2878 	status_t returnCode;
2879 	int32 reason;
2880 	thread_id deadChild;
2881 
2882 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
2883 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
2884 		return B_BAD_ADDRESS;
2885 
2886 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
2887 
2888 	if (deadChild >= B_OK) {
2889 		// copy result data on successful completion
2890 		if ((_userReason != NULL
2891 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
2892 			|| (_userReturnCode != NULL
2893 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
2894 					< B_OK)) {
2895 			return B_BAD_ADDRESS;
2896 		}
2897 
2898 		return deadChild;
2899 	}
2900 
2901 	return syscall_restart_handle_post(deadChild);
2902 }
2903 
2904 
2905 pid_t
2906 _user_process_info(pid_t process, int32 which)
2907 {
2908 	// we only allow to return the parent of the current process
2909 	if (which == PARENT_ID
2910 		&& process != 0 && process != thread_get_current_thread()->team->id)
2911 		return B_BAD_VALUE;
2912 
2913 	switch (which) {
2914 		case SESSION_ID:
2915 			return getsid(process);
2916 		case GROUP_ID:
2917 			return getpgid(process);
2918 		case PARENT_ID:
2919 			return getppid();
2920 	}
2921 
2922 	return B_BAD_VALUE;
2923 }
2924 
2925 
2926 pid_t
2927 _user_setpgid(pid_t processID, pid_t groupID)
2928 {
2929 	struct thread *thread = thread_get_current_thread();
2930 	struct team *currentTeam = thread->team;
2931 	struct team *team;
2932 
2933 	if (groupID < 0)
2934 		return B_BAD_VALUE;
2935 
2936 	if (processID == 0)
2937 		processID = currentTeam->id;
2938 
2939 	// if the group ID is not specified, use the target process' ID
2940 	if (groupID == 0)
2941 		groupID = processID;
2942 
2943 	if (processID == currentTeam->id) {
2944 		// we set our own group
2945 
2946 		// we must not change our process group ID if we're a session leader
2947 		if (is_session_leader(currentTeam))
2948 			return B_NOT_ALLOWED;
2949 	} else {
2950 		// another team is the target of the call -- check it out
2951 		InterruptsSpinLocker _(team_spinlock);
2952 
2953 		team = team_get_team_struct_locked(processID);
2954 		if (team == NULL)
2955 			return ESRCH;
2956 
2957 		// The team must be a child of the calling team and in the same session.
2958 		// (If that's the case it isn't a session leader either.)
2959 		if (team->parent != currentTeam
2960 			|| team->session_id != currentTeam->session_id) {
2961 			return B_NOT_ALLOWED;
2962 		}
2963 
2964 		if (team->group_id == groupID)
2965 			return groupID;
2966 
2967 		// The call is also supposed to fail on a child, when the child already
2968 		// has executed exec*() [EACCES].
2969 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
2970 			return EACCES;
2971 	}
2972 
2973 	struct process_group *group = NULL;
2974 	if (groupID == processID) {
2975 		// A new process group might be needed.
2976 		group = create_process_group(groupID);
2977 		if (group == NULL)
2978 			return B_NO_MEMORY;
2979 
2980 		// Assume orphaned. We consider the situation of the team's parent
2981 		// below.
2982 		group->orphaned = true;
2983 	}
2984 
2985 	status_t status = B_OK;
2986 	struct process_group *freeGroup = NULL;
2987 
2988 	InterruptsSpinLocker locker(team_spinlock);
2989 
2990 	team = team_get_team_struct_locked(processID);
2991 	if (team != NULL) {
2992 		// check the conditions again -- they might have changed in the meantime
2993 		if (is_session_leader(team)
2994 			|| team->session_id != currentTeam->session_id) {
2995 			status = B_NOT_ALLOWED;
2996 		} else if (team != currentTeam
2997 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
2998 			status = EACCES;
2999 		} else if (team->group_id == groupID) {
3000 			// the team is already in the desired process group
3001 			freeGroup = group;
3002 		} else {
3003 			// Check if a process group with the requested ID already exists.
3004 			struct process_group *targetGroup
3005 				= team_get_process_group_locked(team->group->session, groupID);
3006 			if (targetGroup != NULL) {
3007 				// In case of processID == groupID we have to free the
3008 				// allocated group.
3009 				freeGroup = group;
3010 			} else if (processID == groupID) {
3011 				// We created a new process group, let us insert it into the
3012 				// team's session.
3013 				insert_group_into_session(team->group->session, group);
3014 				targetGroup = group;
3015 			}
3016 
3017 			if (targetGroup != NULL) {
3018 				// we got a group, let's move the team there
3019 				process_group* oldGroup = team->group;
3020 
3021 				remove_team_from_group(team);
3022 				insert_team_into_group(targetGroup, team);
3023 
3024 				// Update the "orphaned" flag of all potentially affected
3025 				// groups.
3026 
3027 				// the team's old group
3028 				if (oldGroup->teams != NULL) {
3029 					oldGroup->orphaned = false;
3030 					update_orphaned_process_group(oldGroup, -1);
3031 				}
3032 
3033 				// the team's new group
3034 				struct team* parent = team->parent;
3035 				targetGroup->orphaned &= parent == NULL
3036 					|| parent->group == targetGroup
3037 					|| team->parent->session_id != team->session_id;
3038 
3039 				// children's groups
3040 				struct team* child = team->children;
3041 				while (child != NULL) {
3042 					child->group->orphaned = false;
3043 					update_orphaned_process_group(child->group, -1);
3044 
3045 					child = child->siblings_next;
3046 				}
3047 			} else
3048 				status = B_NOT_ALLOWED;
3049 		}
3050 	} else
3051 		status = B_NOT_ALLOWED;
3052 
3053 	// Changing the process group might have changed the situation for a parent
3054 	// waiting in wait_for_child(). Hence we notify it.
3055 	if (status == B_OK) {
3056 		team->parent->dead_children->condition_variable.NotifyAll(false);
3057 		team->parent->stopped_children->condition_variable.NotifyAll(false);
3058 		team->parent->continued_children->condition_variable.NotifyAll(false);
3059 	}
3060 
3061 	locker.Unlock();
3062 
3063 	if (status != B_OK) {
3064 		// in case of error, the group hasn't been added into the hash
3065 		team_delete_process_group(group);
3066 	}
3067 
3068 	team_delete_process_group(freeGroup);
3069 
3070 	return status == B_OK ? groupID : status;
3071 }
3072 
3073 
3074 pid_t
3075 _user_setsid(void)
3076 {
3077 	struct team *team = thread_get_current_thread()->team;
3078 	struct process_session *session;
3079 	struct process_group *group;
3080 	cpu_status state;
3081 	bool failed = false;
3082 
3083 	// the team must not already be a process group leader
3084 	if (is_process_group_leader(team))
3085 		return B_NOT_ALLOWED;
3086 
3087 	group = create_process_group(team->id);
3088 	if (group == NULL)
3089 		return B_NO_MEMORY;
3090 
3091 	session = create_process_session(group->id);
3092 	if (session == NULL) {
3093 		team_delete_process_group(group);
3094 		return B_NO_MEMORY;
3095 	}
3096 
3097 	state = disable_interrupts();
3098 	GRAB_TEAM_LOCK();
3099 
3100 	// this may have changed since the check above
3101 	if (!is_process_group_leader(team)) {
3102 		remove_team_from_group(team);
3103 
3104 		insert_group_into_session(session, group);
3105 		insert_team_into_group(group, team);
3106 	} else
3107 		failed = true;
3108 
3109 	RELEASE_TEAM_LOCK();
3110 	restore_interrupts(state);
3111 
3112 	if (failed) {
3113 		team_delete_process_group(group);
3114 		free(session);
3115 		return B_NOT_ALLOWED;
3116 	}
3117 
3118 	return team->group_id;
3119 }
3120 
3121 
3122 status_t
3123 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3124 {
3125 	status_t returnCode;
3126 	status_t status;
3127 
3128 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3129 		return B_BAD_ADDRESS;
3130 
3131 	status = wait_for_team(id, &returnCode);
3132 	if (status >= B_OK && _userReturnCode != NULL) {
3133 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3134 			return B_BAD_ADDRESS;
3135 		return B_OK;
3136 	}
3137 
3138 	return syscall_restart_handle_post(status);
3139 }
3140 
3141 
3142 team_id
3143 _user_load_image(int32 argCount, const char **userArgs, int32 envCount,
3144 	const char **userEnv, int32 priority, uint32 flags, port_id errorPort,
3145 	uint32 errorToken)
3146 {
3147 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3148 
3149 	if (argCount < 1 || userArgs == NULL || userEnv == NULL)
3150 		return B_BAD_VALUE;
3151 
3152 	if (!IS_USER_ADDRESS(userArgs) || !IS_USER_ADDRESS(userEnv))
3153 		return B_BAD_ADDRESS;
3154 
3155 	return load_image_etc(argCount, (char * const *)userArgs,
3156 		envCount, (char * const *)userEnv, priority, flags, errorPort,
3157 		errorToken, false);
3158 }
3159 
3160 
3161 void
3162 _user_exit_team(status_t returnValue)
3163 {
3164 	struct thread *thread = thread_get_current_thread();
3165 
3166 	thread->exit.status = returnValue;
3167 	thread->exit.reason = THREAD_RETURN_EXIT;
3168 
3169 	send_signal(thread->id, SIGKILL);
3170 }
3171 
3172 
3173 status_t
3174 _user_kill_team(team_id team)
3175 {
3176 	return kill_team(team);
3177 }
3178 
3179 
3180 status_t
3181 _user_get_team_info(team_id id, team_info *userInfo)
3182 {
3183 	status_t status;
3184 	team_info info;
3185 
3186 	if (!IS_USER_ADDRESS(userInfo))
3187 		return B_BAD_ADDRESS;
3188 
3189 	status = _get_team_info(id, &info, sizeof(team_info));
3190 	if (status == B_OK) {
3191 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3192 			return B_BAD_ADDRESS;
3193 	}
3194 
3195 	return status;
3196 }
3197 
3198 
3199 status_t
3200 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3201 {
3202 	status_t status;
3203 	team_info info;
3204 	int32 cookie;
3205 
3206 	if (!IS_USER_ADDRESS(userCookie)
3207 		|| !IS_USER_ADDRESS(userInfo)
3208 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3209 		return B_BAD_ADDRESS;
3210 
3211 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3212 	if (status != B_OK)
3213 		return status;
3214 
3215 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3216 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3217 		return B_BAD_ADDRESS;
3218 
3219 	return status;
3220 }
3221 
3222 
3223 team_id
3224 _user_get_current_team(void)
3225 {
3226 	return team_get_current_team_id();
3227 }
3228 
3229 
3230 status_t
3231 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3232 {
3233 	team_usage_info info;
3234 	status_t status;
3235 
3236 	if (!IS_USER_ADDRESS(userInfo))
3237 		return B_BAD_ADDRESS;
3238 
3239 	status = _get_team_usage_info(team, who, &info, size);
3240 	if (status != B_OK)
3241 		return status;
3242 
3243 	if (user_memcpy(userInfo, &info, size) < B_OK)
3244 		return B_BAD_ADDRESS;
3245 
3246 	return status;
3247 }
3248 
3249